Btrfs: add a delalloc mutex to inodes for delalloc reservations
[linux-3.10.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36
37 /* control flags for do_chunk_alloc's force field
38  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
39  * if we really need one.
40  *
41  * CHUNK_ALLOC_FORCE means it must try to allocate one
42  *
43  * CHUNK_ALLOC_LIMITED means to only try and allocate one
44  * if we have very few chunks already allocated.  This is
45  * used as part of the clustering code to help make sure
46  * we have a good pool of storage to cluster in, without
47  * filling the FS with empty chunks
48  *
49  */
50 enum {
51         CHUNK_ALLOC_NO_FORCE = 0,
52         CHUNK_ALLOC_FORCE = 1,
53         CHUNK_ALLOC_LIMITED = 2,
54 };
55
56 /*
57  * Control how reservations are dealt with.
58  *
59  * RESERVE_FREE - freeing a reservation.
60  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
61  *   ENOSPC accounting
62  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
63  *   bytes_may_use as the ENOSPC accounting is done elsewhere
64  */
65 enum {
66         RESERVE_FREE = 0,
67         RESERVE_ALLOC = 1,
68         RESERVE_ALLOC_NO_ACCOUNT = 2,
69 };
70
71 static int update_block_group(struct btrfs_trans_handle *trans,
72                               struct btrfs_root *root,
73                               u64 bytenr, u64 num_bytes, int alloc);
74 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
75                                 struct btrfs_root *root,
76                                 u64 bytenr, u64 num_bytes, u64 parent,
77                                 u64 root_objectid, u64 owner_objectid,
78                                 u64 owner_offset, int refs_to_drop,
79                                 struct btrfs_delayed_extent_op *extra_op);
80 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
81                                     struct extent_buffer *leaf,
82                                     struct btrfs_extent_item *ei);
83 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
84                                       struct btrfs_root *root,
85                                       u64 parent, u64 root_objectid,
86                                       u64 flags, u64 owner, u64 offset,
87                                       struct btrfs_key *ins, int ref_mod);
88 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
89                                      struct btrfs_root *root,
90                                      u64 parent, u64 root_objectid,
91                                      u64 flags, struct btrfs_disk_key *key,
92                                      int level, struct btrfs_key *ins);
93 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
94                           struct btrfs_root *extent_root, u64 alloc_bytes,
95                           u64 flags, int force);
96 static int find_next_key(struct btrfs_path *path, int level,
97                          struct btrfs_key *key);
98 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
99                             int dump_block_groups);
100 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
101                                        u64 num_bytes, int reserve);
102
103 static noinline int
104 block_group_cache_done(struct btrfs_block_group_cache *cache)
105 {
106         smp_mb();
107         return cache->cached == BTRFS_CACHE_FINISHED;
108 }
109
110 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
111 {
112         return (cache->flags & bits) == bits;
113 }
114
115 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
116 {
117         atomic_inc(&cache->count);
118 }
119
120 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
121 {
122         if (atomic_dec_and_test(&cache->count)) {
123                 WARN_ON(cache->pinned > 0);
124                 WARN_ON(cache->reserved > 0);
125                 kfree(cache->free_space_ctl);
126                 kfree(cache);
127         }
128 }
129
130 /*
131  * this adds the block group to the fs_info rb tree for the block group
132  * cache
133  */
134 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
135                                 struct btrfs_block_group_cache *block_group)
136 {
137         struct rb_node **p;
138         struct rb_node *parent = NULL;
139         struct btrfs_block_group_cache *cache;
140
141         spin_lock(&info->block_group_cache_lock);
142         p = &info->block_group_cache_tree.rb_node;
143
144         while (*p) {
145                 parent = *p;
146                 cache = rb_entry(parent, struct btrfs_block_group_cache,
147                                  cache_node);
148                 if (block_group->key.objectid < cache->key.objectid) {
149                         p = &(*p)->rb_left;
150                 } else if (block_group->key.objectid > cache->key.objectid) {
151                         p = &(*p)->rb_right;
152                 } else {
153                         spin_unlock(&info->block_group_cache_lock);
154                         return -EEXIST;
155                 }
156         }
157
158         rb_link_node(&block_group->cache_node, parent, p);
159         rb_insert_color(&block_group->cache_node,
160                         &info->block_group_cache_tree);
161         spin_unlock(&info->block_group_cache_lock);
162
163         return 0;
164 }
165
166 /*
167  * This will return the block group at or after bytenr if contains is 0, else
168  * it will return the block group that contains the bytenr
169  */
170 static struct btrfs_block_group_cache *
171 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
172                               int contains)
173 {
174         struct btrfs_block_group_cache *cache, *ret = NULL;
175         struct rb_node *n;
176         u64 end, start;
177
178         spin_lock(&info->block_group_cache_lock);
179         n = info->block_group_cache_tree.rb_node;
180
181         while (n) {
182                 cache = rb_entry(n, struct btrfs_block_group_cache,
183                                  cache_node);
184                 end = cache->key.objectid + cache->key.offset - 1;
185                 start = cache->key.objectid;
186
187                 if (bytenr < start) {
188                         if (!contains && (!ret || start < ret->key.objectid))
189                                 ret = cache;
190                         n = n->rb_left;
191                 } else if (bytenr > start) {
192                         if (contains && bytenr <= end) {
193                                 ret = cache;
194                                 break;
195                         }
196                         n = n->rb_right;
197                 } else {
198                         ret = cache;
199                         break;
200                 }
201         }
202         if (ret)
203                 btrfs_get_block_group(ret);
204         spin_unlock(&info->block_group_cache_lock);
205
206         return ret;
207 }
208
209 static int add_excluded_extent(struct btrfs_root *root,
210                                u64 start, u64 num_bytes)
211 {
212         u64 end = start + num_bytes - 1;
213         set_extent_bits(&root->fs_info->freed_extents[0],
214                         start, end, EXTENT_UPTODATE, GFP_NOFS);
215         set_extent_bits(&root->fs_info->freed_extents[1],
216                         start, end, EXTENT_UPTODATE, GFP_NOFS);
217         return 0;
218 }
219
220 static void free_excluded_extents(struct btrfs_root *root,
221                                   struct btrfs_block_group_cache *cache)
222 {
223         u64 start, end;
224
225         start = cache->key.objectid;
226         end = start + cache->key.offset - 1;
227
228         clear_extent_bits(&root->fs_info->freed_extents[0],
229                           start, end, EXTENT_UPTODATE, GFP_NOFS);
230         clear_extent_bits(&root->fs_info->freed_extents[1],
231                           start, end, EXTENT_UPTODATE, GFP_NOFS);
232 }
233
234 static int exclude_super_stripes(struct btrfs_root *root,
235                                  struct btrfs_block_group_cache *cache)
236 {
237         u64 bytenr;
238         u64 *logical;
239         int stripe_len;
240         int i, nr, ret;
241
242         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
243                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
244                 cache->bytes_super += stripe_len;
245                 ret = add_excluded_extent(root, cache->key.objectid,
246                                           stripe_len);
247                 BUG_ON(ret);
248         }
249
250         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
251                 bytenr = btrfs_sb_offset(i);
252                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
253                                        cache->key.objectid, bytenr,
254                                        0, &logical, &nr, &stripe_len);
255                 BUG_ON(ret);
256
257                 while (nr--) {
258                         cache->bytes_super += stripe_len;
259                         ret = add_excluded_extent(root, logical[nr],
260                                                   stripe_len);
261                         BUG_ON(ret);
262                 }
263
264                 kfree(logical);
265         }
266         return 0;
267 }
268
269 static struct btrfs_caching_control *
270 get_caching_control(struct btrfs_block_group_cache *cache)
271 {
272         struct btrfs_caching_control *ctl;
273
274         spin_lock(&cache->lock);
275         if (cache->cached != BTRFS_CACHE_STARTED) {
276                 spin_unlock(&cache->lock);
277                 return NULL;
278         }
279
280         /* We're loading it the fast way, so we don't have a caching_ctl. */
281         if (!cache->caching_ctl) {
282                 spin_unlock(&cache->lock);
283                 return NULL;
284         }
285
286         ctl = cache->caching_ctl;
287         atomic_inc(&ctl->count);
288         spin_unlock(&cache->lock);
289         return ctl;
290 }
291
292 static void put_caching_control(struct btrfs_caching_control *ctl)
293 {
294         if (atomic_dec_and_test(&ctl->count))
295                 kfree(ctl);
296 }
297
298 /*
299  * this is only called by cache_block_group, since we could have freed extents
300  * we need to check the pinned_extents for any extents that can't be used yet
301  * since their free space will be released as soon as the transaction commits.
302  */
303 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
304                               struct btrfs_fs_info *info, u64 start, u64 end)
305 {
306         u64 extent_start, extent_end, size, total_added = 0;
307         int ret;
308
309         while (start < end) {
310                 ret = find_first_extent_bit(info->pinned_extents, start,
311                                             &extent_start, &extent_end,
312                                             EXTENT_DIRTY | EXTENT_UPTODATE);
313                 if (ret)
314                         break;
315
316                 if (extent_start <= start) {
317                         start = extent_end + 1;
318                 } else if (extent_start > start && extent_start < end) {
319                         size = extent_start - start;
320                         total_added += size;
321                         ret = btrfs_add_free_space(block_group, start,
322                                                    size);
323                         BUG_ON(ret);
324                         start = extent_end + 1;
325                 } else {
326                         break;
327                 }
328         }
329
330         if (start < end) {
331                 size = end - start;
332                 total_added += size;
333                 ret = btrfs_add_free_space(block_group, start, size);
334                 BUG_ON(ret);
335         }
336
337         return total_added;
338 }
339
340 static noinline void caching_thread(struct btrfs_work *work)
341 {
342         struct btrfs_block_group_cache *block_group;
343         struct btrfs_fs_info *fs_info;
344         struct btrfs_caching_control *caching_ctl;
345         struct btrfs_root *extent_root;
346         struct btrfs_path *path;
347         struct extent_buffer *leaf;
348         struct btrfs_key key;
349         u64 total_found = 0;
350         u64 last = 0;
351         u32 nritems;
352         int ret = 0;
353
354         caching_ctl = container_of(work, struct btrfs_caching_control, work);
355         block_group = caching_ctl->block_group;
356         fs_info = block_group->fs_info;
357         extent_root = fs_info->extent_root;
358
359         path = btrfs_alloc_path();
360         if (!path)
361                 goto out;
362
363         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
364
365         /*
366          * We don't want to deadlock with somebody trying to allocate a new
367          * extent for the extent root while also trying to search the extent
368          * root to add free space.  So we skip locking and search the commit
369          * root, since its read-only
370          */
371         path->skip_locking = 1;
372         path->search_commit_root = 1;
373         path->reada = 1;
374
375         key.objectid = last;
376         key.offset = 0;
377         key.type = BTRFS_EXTENT_ITEM_KEY;
378 again:
379         mutex_lock(&caching_ctl->mutex);
380         /* need to make sure the commit_root doesn't disappear */
381         down_read(&fs_info->extent_commit_sem);
382
383         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
384         if (ret < 0)
385                 goto err;
386
387         leaf = path->nodes[0];
388         nritems = btrfs_header_nritems(leaf);
389
390         while (1) {
391                 if (btrfs_fs_closing(fs_info) > 1) {
392                         last = (u64)-1;
393                         break;
394                 }
395
396                 if (path->slots[0] < nritems) {
397                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
398                 } else {
399                         ret = find_next_key(path, 0, &key);
400                         if (ret)
401                                 break;
402
403                         if (need_resched() ||
404                             btrfs_next_leaf(extent_root, path)) {
405                                 caching_ctl->progress = last;
406                                 btrfs_release_path(path);
407                                 up_read(&fs_info->extent_commit_sem);
408                                 mutex_unlock(&caching_ctl->mutex);
409                                 cond_resched();
410                                 goto again;
411                         }
412                         leaf = path->nodes[0];
413                         nritems = btrfs_header_nritems(leaf);
414                         continue;
415                 }
416
417                 if (key.objectid < block_group->key.objectid) {
418                         path->slots[0]++;
419                         continue;
420                 }
421
422                 if (key.objectid >= block_group->key.objectid +
423                     block_group->key.offset)
424                         break;
425
426                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
427                         total_found += add_new_free_space(block_group,
428                                                           fs_info, last,
429                                                           key.objectid);
430                         last = key.objectid + key.offset;
431
432                         if (total_found > (1024 * 1024 * 2)) {
433                                 total_found = 0;
434                                 wake_up(&caching_ctl->wait);
435                         }
436                 }
437                 path->slots[0]++;
438         }
439         ret = 0;
440
441         total_found += add_new_free_space(block_group, fs_info, last,
442                                           block_group->key.objectid +
443                                           block_group->key.offset);
444         caching_ctl->progress = (u64)-1;
445
446         spin_lock(&block_group->lock);
447         block_group->caching_ctl = NULL;
448         block_group->cached = BTRFS_CACHE_FINISHED;
449         spin_unlock(&block_group->lock);
450
451 err:
452         btrfs_free_path(path);
453         up_read(&fs_info->extent_commit_sem);
454
455         free_excluded_extents(extent_root, block_group);
456
457         mutex_unlock(&caching_ctl->mutex);
458 out:
459         wake_up(&caching_ctl->wait);
460
461         put_caching_control(caching_ctl);
462         btrfs_put_block_group(block_group);
463 }
464
465 static int cache_block_group(struct btrfs_block_group_cache *cache,
466                              struct btrfs_trans_handle *trans,
467                              struct btrfs_root *root,
468                              int load_cache_only)
469 {
470         DEFINE_WAIT(wait);
471         struct btrfs_fs_info *fs_info = cache->fs_info;
472         struct btrfs_caching_control *caching_ctl;
473         int ret = 0;
474
475         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
476         BUG_ON(!caching_ctl);
477
478         INIT_LIST_HEAD(&caching_ctl->list);
479         mutex_init(&caching_ctl->mutex);
480         init_waitqueue_head(&caching_ctl->wait);
481         caching_ctl->block_group = cache;
482         caching_ctl->progress = cache->key.objectid;
483         atomic_set(&caching_ctl->count, 1);
484         caching_ctl->work.func = caching_thread;
485
486         spin_lock(&cache->lock);
487         /*
488          * This should be a rare occasion, but this could happen I think in the
489          * case where one thread starts to load the space cache info, and then
490          * some other thread starts a transaction commit which tries to do an
491          * allocation while the other thread is still loading the space cache
492          * info.  The previous loop should have kept us from choosing this block
493          * group, but if we've moved to the state where we will wait on caching
494          * block groups we need to first check if we're doing a fast load here,
495          * so we can wait for it to finish, otherwise we could end up allocating
496          * from a block group who's cache gets evicted for one reason or
497          * another.
498          */
499         while (cache->cached == BTRFS_CACHE_FAST) {
500                 struct btrfs_caching_control *ctl;
501
502                 ctl = cache->caching_ctl;
503                 atomic_inc(&ctl->count);
504                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505                 spin_unlock(&cache->lock);
506
507                 schedule();
508
509                 finish_wait(&ctl->wait, &wait);
510                 put_caching_control(ctl);
511                 spin_lock(&cache->lock);
512         }
513
514         if (cache->cached != BTRFS_CACHE_NO) {
515                 spin_unlock(&cache->lock);
516                 kfree(caching_ctl);
517                 return 0;
518         }
519         WARN_ON(cache->caching_ctl);
520         cache->caching_ctl = caching_ctl;
521         cache->cached = BTRFS_CACHE_FAST;
522         spin_unlock(&cache->lock);
523
524         /*
525          * We can't do the read from on-disk cache during a commit since we need
526          * to have the normal tree locking.  Also if we are currently trying to
527          * allocate blocks for the tree root we can't do the fast caching since
528          * we likely hold important locks.
529          */
530         if (trans && (!trans->transaction->in_commit) &&
531             (root && root != root->fs_info->tree_root) &&
532             btrfs_test_opt(root, SPACE_CACHE)) {
533                 ret = load_free_space_cache(fs_info, cache);
534
535                 spin_lock(&cache->lock);
536                 if (ret == 1) {
537                         cache->caching_ctl = NULL;
538                         cache->cached = BTRFS_CACHE_FINISHED;
539                         cache->last_byte_to_unpin = (u64)-1;
540                 } else {
541                         if (load_cache_only) {
542                                 cache->caching_ctl = NULL;
543                                 cache->cached = BTRFS_CACHE_NO;
544                         } else {
545                                 cache->cached = BTRFS_CACHE_STARTED;
546                         }
547                 }
548                 spin_unlock(&cache->lock);
549                 wake_up(&caching_ctl->wait);
550                 if (ret == 1) {
551                         put_caching_control(caching_ctl);
552                         free_excluded_extents(fs_info->extent_root, cache);
553                         return 0;
554                 }
555         } else {
556                 /*
557                  * We are not going to do the fast caching, set cached to the
558                  * appropriate value and wakeup any waiters.
559                  */
560                 spin_lock(&cache->lock);
561                 if (load_cache_only) {
562                         cache->caching_ctl = NULL;
563                         cache->cached = BTRFS_CACHE_NO;
564                 } else {
565                         cache->cached = BTRFS_CACHE_STARTED;
566                 }
567                 spin_unlock(&cache->lock);
568                 wake_up(&caching_ctl->wait);
569         }
570
571         if (load_cache_only) {
572                 put_caching_control(caching_ctl);
573                 return 0;
574         }
575
576         down_write(&fs_info->extent_commit_sem);
577         atomic_inc(&caching_ctl->count);
578         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
579         up_write(&fs_info->extent_commit_sem);
580
581         btrfs_get_block_group(cache);
582
583         btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
584
585         return ret;
586 }
587
588 /*
589  * return the block group that starts at or after bytenr
590  */
591 static struct btrfs_block_group_cache *
592 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
593 {
594         struct btrfs_block_group_cache *cache;
595
596         cache = block_group_cache_tree_search(info, bytenr, 0);
597
598         return cache;
599 }
600
601 /*
602  * return the block group that contains the given bytenr
603  */
604 struct btrfs_block_group_cache *btrfs_lookup_block_group(
605                                                  struct btrfs_fs_info *info,
606                                                  u64 bytenr)
607 {
608         struct btrfs_block_group_cache *cache;
609
610         cache = block_group_cache_tree_search(info, bytenr, 1);
611
612         return cache;
613 }
614
615 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
616                                                   u64 flags)
617 {
618         struct list_head *head = &info->space_info;
619         struct btrfs_space_info *found;
620
621         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
622
623         rcu_read_lock();
624         list_for_each_entry_rcu(found, head, list) {
625                 if (found->flags & flags) {
626                         rcu_read_unlock();
627                         return found;
628                 }
629         }
630         rcu_read_unlock();
631         return NULL;
632 }
633
634 /*
635  * after adding space to the filesystem, we need to clear the full flags
636  * on all the space infos.
637  */
638 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
639 {
640         struct list_head *head = &info->space_info;
641         struct btrfs_space_info *found;
642
643         rcu_read_lock();
644         list_for_each_entry_rcu(found, head, list)
645                 found->full = 0;
646         rcu_read_unlock();
647 }
648
649 static u64 div_factor(u64 num, int factor)
650 {
651         if (factor == 10)
652                 return num;
653         num *= factor;
654         do_div(num, 10);
655         return num;
656 }
657
658 static u64 div_factor_fine(u64 num, int factor)
659 {
660         if (factor == 100)
661                 return num;
662         num *= factor;
663         do_div(num, 100);
664         return num;
665 }
666
667 u64 btrfs_find_block_group(struct btrfs_root *root,
668                            u64 search_start, u64 search_hint, int owner)
669 {
670         struct btrfs_block_group_cache *cache;
671         u64 used;
672         u64 last = max(search_hint, search_start);
673         u64 group_start = 0;
674         int full_search = 0;
675         int factor = 9;
676         int wrapped = 0;
677 again:
678         while (1) {
679                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
680                 if (!cache)
681                         break;
682
683                 spin_lock(&cache->lock);
684                 last = cache->key.objectid + cache->key.offset;
685                 used = btrfs_block_group_used(&cache->item);
686
687                 if ((full_search || !cache->ro) &&
688                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
689                         if (used + cache->pinned + cache->reserved <
690                             div_factor(cache->key.offset, factor)) {
691                                 group_start = cache->key.objectid;
692                                 spin_unlock(&cache->lock);
693                                 btrfs_put_block_group(cache);
694                                 goto found;
695                         }
696                 }
697                 spin_unlock(&cache->lock);
698                 btrfs_put_block_group(cache);
699                 cond_resched();
700         }
701         if (!wrapped) {
702                 last = search_start;
703                 wrapped = 1;
704                 goto again;
705         }
706         if (!full_search && factor < 10) {
707                 last = search_start;
708                 full_search = 1;
709                 factor = 10;
710                 goto again;
711         }
712 found:
713         return group_start;
714 }
715
716 /* simple helper to search for an existing extent at a given offset */
717 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
718 {
719         int ret;
720         struct btrfs_key key;
721         struct btrfs_path *path;
722
723         path = btrfs_alloc_path();
724         if (!path)
725                 return -ENOMEM;
726
727         key.objectid = start;
728         key.offset = len;
729         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
730         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
731                                 0, 0);
732         btrfs_free_path(path);
733         return ret;
734 }
735
736 /*
737  * helper function to lookup reference count and flags of extent.
738  *
739  * the head node for delayed ref is used to store the sum of all the
740  * reference count modifications queued up in the rbtree. the head
741  * node may also store the extent flags to set. This way you can check
742  * to see what the reference count and extent flags would be if all of
743  * the delayed refs are not processed.
744  */
745 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
746                              struct btrfs_root *root, u64 bytenr,
747                              u64 num_bytes, u64 *refs, u64 *flags)
748 {
749         struct btrfs_delayed_ref_head *head;
750         struct btrfs_delayed_ref_root *delayed_refs;
751         struct btrfs_path *path;
752         struct btrfs_extent_item *ei;
753         struct extent_buffer *leaf;
754         struct btrfs_key key;
755         u32 item_size;
756         u64 num_refs;
757         u64 extent_flags;
758         int ret;
759
760         path = btrfs_alloc_path();
761         if (!path)
762                 return -ENOMEM;
763
764         key.objectid = bytenr;
765         key.type = BTRFS_EXTENT_ITEM_KEY;
766         key.offset = num_bytes;
767         if (!trans) {
768                 path->skip_locking = 1;
769                 path->search_commit_root = 1;
770         }
771 again:
772         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
773                                 &key, path, 0, 0);
774         if (ret < 0)
775                 goto out_free;
776
777         if (ret == 0) {
778                 leaf = path->nodes[0];
779                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
780                 if (item_size >= sizeof(*ei)) {
781                         ei = btrfs_item_ptr(leaf, path->slots[0],
782                                             struct btrfs_extent_item);
783                         num_refs = btrfs_extent_refs(leaf, ei);
784                         extent_flags = btrfs_extent_flags(leaf, ei);
785                 } else {
786 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
787                         struct btrfs_extent_item_v0 *ei0;
788                         BUG_ON(item_size != sizeof(*ei0));
789                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
790                                              struct btrfs_extent_item_v0);
791                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
792                         /* FIXME: this isn't correct for data */
793                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
794 #else
795                         BUG();
796 #endif
797                 }
798                 BUG_ON(num_refs == 0);
799         } else {
800                 num_refs = 0;
801                 extent_flags = 0;
802                 ret = 0;
803         }
804
805         if (!trans)
806                 goto out;
807
808         delayed_refs = &trans->transaction->delayed_refs;
809         spin_lock(&delayed_refs->lock);
810         head = btrfs_find_delayed_ref_head(trans, bytenr);
811         if (head) {
812                 if (!mutex_trylock(&head->mutex)) {
813                         atomic_inc(&head->node.refs);
814                         spin_unlock(&delayed_refs->lock);
815
816                         btrfs_release_path(path);
817
818                         /*
819                          * Mutex was contended, block until it's released and try
820                          * again
821                          */
822                         mutex_lock(&head->mutex);
823                         mutex_unlock(&head->mutex);
824                         btrfs_put_delayed_ref(&head->node);
825                         goto again;
826                 }
827                 if (head->extent_op && head->extent_op->update_flags)
828                         extent_flags |= head->extent_op->flags_to_set;
829                 else
830                         BUG_ON(num_refs == 0);
831
832                 num_refs += head->node.ref_mod;
833                 mutex_unlock(&head->mutex);
834         }
835         spin_unlock(&delayed_refs->lock);
836 out:
837         WARN_ON(num_refs == 0);
838         if (refs)
839                 *refs = num_refs;
840         if (flags)
841                 *flags = extent_flags;
842 out_free:
843         btrfs_free_path(path);
844         return ret;
845 }
846
847 /*
848  * Back reference rules.  Back refs have three main goals:
849  *
850  * 1) differentiate between all holders of references to an extent so that
851  *    when a reference is dropped we can make sure it was a valid reference
852  *    before freeing the extent.
853  *
854  * 2) Provide enough information to quickly find the holders of an extent
855  *    if we notice a given block is corrupted or bad.
856  *
857  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
858  *    maintenance.  This is actually the same as #2, but with a slightly
859  *    different use case.
860  *
861  * There are two kinds of back refs. The implicit back refs is optimized
862  * for pointers in non-shared tree blocks. For a given pointer in a block,
863  * back refs of this kind provide information about the block's owner tree
864  * and the pointer's key. These information allow us to find the block by
865  * b-tree searching. The full back refs is for pointers in tree blocks not
866  * referenced by their owner trees. The location of tree block is recorded
867  * in the back refs. Actually the full back refs is generic, and can be
868  * used in all cases the implicit back refs is used. The major shortcoming
869  * of the full back refs is its overhead. Every time a tree block gets
870  * COWed, we have to update back refs entry for all pointers in it.
871  *
872  * For a newly allocated tree block, we use implicit back refs for
873  * pointers in it. This means most tree related operations only involve
874  * implicit back refs. For a tree block created in old transaction, the
875  * only way to drop a reference to it is COW it. So we can detect the
876  * event that tree block loses its owner tree's reference and do the
877  * back refs conversion.
878  *
879  * When a tree block is COW'd through a tree, there are four cases:
880  *
881  * The reference count of the block is one and the tree is the block's
882  * owner tree. Nothing to do in this case.
883  *
884  * The reference count of the block is one and the tree is not the
885  * block's owner tree. In this case, full back refs is used for pointers
886  * in the block. Remove these full back refs, add implicit back refs for
887  * every pointers in the new block.
888  *
889  * The reference count of the block is greater than one and the tree is
890  * the block's owner tree. In this case, implicit back refs is used for
891  * pointers in the block. Add full back refs for every pointers in the
892  * block, increase lower level extents' reference counts. The original
893  * implicit back refs are entailed to the new block.
894  *
895  * The reference count of the block is greater than one and the tree is
896  * not the block's owner tree. Add implicit back refs for every pointer in
897  * the new block, increase lower level extents' reference count.
898  *
899  * Back Reference Key composing:
900  *
901  * The key objectid corresponds to the first byte in the extent,
902  * The key type is used to differentiate between types of back refs.
903  * There are different meanings of the key offset for different types
904  * of back refs.
905  *
906  * File extents can be referenced by:
907  *
908  * - multiple snapshots, subvolumes, or different generations in one subvol
909  * - different files inside a single subvolume
910  * - different offsets inside a file (bookend extents in file.c)
911  *
912  * The extent ref structure for the implicit back refs has fields for:
913  *
914  * - Objectid of the subvolume root
915  * - objectid of the file holding the reference
916  * - original offset in the file
917  * - how many bookend extents
918  *
919  * The key offset for the implicit back refs is hash of the first
920  * three fields.
921  *
922  * The extent ref structure for the full back refs has field for:
923  *
924  * - number of pointers in the tree leaf
925  *
926  * The key offset for the implicit back refs is the first byte of
927  * the tree leaf
928  *
929  * When a file extent is allocated, The implicit back refs is used.
930  * the fields are filled in:
931  *
932  *     (root_key.objectid, inode objectid, offset in file, 1)
933  *
934  * When a file extent is removed file truncation, we find the
935  * corresponding implicit back refs and check the following fields:
936  *
937  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
938  *
939  * Btree extents can be referenced by:
940  *
941  * - Different subvolumes
942  *
943  * Both the implicit back refs and the full back refs for tree blocks
944  * only consist of key. The key offset for the implicit back refs is
945  * objectid of block's owner tree. The key offset for the full back refs
946  * is the first byte of parent block.
947  *
948  * When implicit back refs is used, information about the lowest key and
949  * level of the tree block are required. These information are stored in
950  * tree block info structure.
951  */
952
953 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
954 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
955                                   struct btrfs_root *root,
956                                   struct btrfs_path *path,
957                                   u64 owner, u32 extra_size)
958 {
959         struct btrfs_extent_item *item;
960         struct btrfs_extent_item_v0 *ei0;
961         struct btrfs_extent_ref_v0 *ref0;
962         struct btrfs_tree_block_info *bi;
963         struct extent_buffer *leaf;
964         struct btrfs_key key;
965         struct btrfs_key found_key;
966         u32 new_size = sizeof(*item);
967         u64 refs;
968         int ret;
969
970         leaf = path->nodes[0];
971         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
972
973         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
974         ei0 = btrfs_item_ptr(leaf, path->slots[0],
975                              struct btrfs_extent_item_v0);
976         refs = btrfs_extent_refs_v0(leaf, ei0);
977
978         if (owner == (u64)-1) {
979                 while (1) {
980                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
981                                 ret = btrfs_next_leaf(root, path);
982                                 if (ret < 0)
983                                         return ret;
984                                 BUG_ON(ret > 0);
985                                 leaf = path->nodes[0];
986                         }
987                         btrfs_item_key_to_cpu(leaf, &found_key,
988                                               path->slots[0]);
989                         BUG_ON(key.objectid != found_key.objectid);
990                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
991                                 path->slots[0]++;
992                                 continue;
993                         }
994                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
995                                               struct btrfs_extent_ref_v0);
996                         owner = btrfs_ref_objectid_v0(leaf, ref0);
997                         break;
998                 }
999         }
1000         btrfs_release_path(path);
1001
1002         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1003                 new_size += sizeof(*bi);
1004
1005         new_size -= sizeof(*ei0);
1006         ret = btrfs_search_slot(trans, root, &key, path,
1007                                 new_size + extra_size, 1);
1008         if (ret < 0)
1009                 return ret;
1010         BUG_ON(ret);
1011
1012         ret = btrfs_extend_item(trans, root, path, new_size);
1013
1014         leaf = path->nodes[0];
1015         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1016         btrfs_set_extent_refs(leaf, item, refs);
1017         /* FIXME: get real generation */
1018         btrfs_set_extent_generation(leaf, item, 0);
1019         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1020                 btrfs_set_extent_flags(leaf, item,
1021                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1022                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1023                 bi = (struct btrfs_tree_block_info *)(item + 1);
1024                 /* FIXME: get first key of the block */
1025                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1026                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1027         } else {
1028                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1029         }
1030         btrfs_mark_buffer_dirty(leaf);
1031         return 0;
1032 }
1033 #endif
1034
1035 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1036 {
1037         u32 high_crc = ~(u32)0;
1038         u32 low_crc = ~(u32)0;
1039         __le64 lenum;
1040
1041         lenum = cpu_to_le64(root_objectid);
1042         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1043         lenum = cpu_to_le64(owner);
1044         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1045         lenum = cpu_to_le64(offset);
1046         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1047
1048         return ((u64)high_crc << 31) ^ (u64)low_crc;
1049 }
1050
1051 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1052                                      struct btrfs_extent_data_ref *ref)
1053 {
1054         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1055                                     btrfs_extent_data_ref_objectid(leaf, ref),
1056                                     btrfs_extent_data_ref_offset(leaf, ref));
1057 }
1058
1059 static int match_extent_data_ref(struct extent_buffer *leaf,
1060                                  struct btrfs_extent_data_ref *ref,
1061                                  u64 root_objectid, u64 owner, u64 offset)
1062 {
1063         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1064             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1065             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1066                 return 0;
1067         return 1;
1068 }
1069
1070 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1071                                            struct btrfs_root *root,
1072                                            struct btrfs_path *path,
1073                                            u64 bytenr, u64 parent,
1074                                            u64 root_objectid,
1075                                            u64 owner, u64 offset)
1076 {
1077         struct btrfs_key key;
1078         struct btrfs_extent_data_ref *ref;
1079         struct extent_buffer *leaf;
1080         u32 nritems;
1081         int ret;
1082         int recow;
1083         int err = -ENOENT;
1084
1085         key.objectid = bytenr;
1086         if (parent) {
1087                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1088                 key.offset = parent;
1089         } else {
1090                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1091                 key.offset = hash_extent_data_ref(root_objectid,
1092                                                   owner, offset);
1093         }
1094 again:
1095         recow = 0;
1096         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1097         if (ret < 0) {
1098                 err = ret;
1099                 goto fail;
1100         }
1101
1102         if (parent) {
1103                 if (!ret)
1104                         return 0;
1105 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1106                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1107                 btrfs_release_path(path);
1108                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1109                 if (ret < 0) {
1110                         err = ret;
1111                         goto fail;
1112                 }
1113                 if (!ret)
1114                         return 0;
1115 #endif
1116                 goto fail;
1117         }
1118
1119         leaf = path->nodes[0];
1120         nritems = btrfs_header_nritems(leaf);
1121         while (1) {
1122                 if (path->slots[0] >= nritems) {
1123                         ret = btrfs_next_leaf(root, path);
1124                         if (ret < 0)
1125                                 err = ret;
1126                         if (ret)
1127                                 goto fail;
1128
1129                         leaf = path->nodes[0];
1130                         nritems = btrfs_header_nritems(leaf);
1131                         recow = 1;
1132                 }
1133
1134                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1135                 if (key.objectid != bytenr ||
1136                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1137                         goto fail;
1138
1139                 ref = btrfs_item_ptr(leaf, path->slots[0],
1140                                      struct btrfs_extent_data_ref);
1141
1142                 if (match_extent_data_ref(leaf, ref, root_objectid,
1143                                           owner, offset)) {
1144                         if (recow) {
1145                                 btrfs_release_path(path);
1146                                 goto again;
1147                         }
1148                         err = 0;
1149                         break;
1150                 }
1151                 path->slots[0]++;
1152         }
1153 fail:
1154         return err;
1155 }
1156
1157 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1158                                            struct btrfs_root *root,
1159                                            struct btrfs_path *path,
1160                                            u64 bytenr, u64 parent,
1161                                            u64 root_objectid, u64 owner,
1162                                            u64 offset, int refs_to_add)
1163 {
1164         struct btrfs_key key;
1165         struct extent_buffer *leaf;
1166         u32 size;
1167         u32 num_refs;
1168         int ret;
1169
1170         key.objectid = bytenr;
1171         if (parent) {
1172                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1173                 key.offset = parent;
1174                 size = sizeof(struct btrfs_shared_data_ref);
1175         } else {
1176                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1177                 key.offset = hash_extent_data_ref(root_objectid,
1178                                                   owner, offset);
1179                 size = sizeof(struct btrfs_extent_data_ref);
1180         }
1181
1182         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1183         if (ret && ret != -EEXIST)
1184                 goto fail;
1185
1186         leaf = path->nodes[0];
1187         if (parent) {
1188                 struct btrfs_shared_data_ref *ref;
1189                 ref = btrfs_item_ptr(leaf, path->slots[0],
1190                                      struct btrfs_shared_data_ref);
1191                 if (ret == 0) {
1192                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1193                 } else {
1194                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1195                         num_refs += refs_to_add;
1196                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1197                 }
1198         } else {
1199                 struct btrfs_extent_data_ref *ref;
1200                 while (ret == -EEXIST) {
1201                         ref = btrfs_item_ptr(leaf, path->slots[0],
1202                                              struct btrfs_extent_data_ref);
1203                         if (match_extent_data_ref(leaf, ref, root_objectid,
1204                                                   owner, offset))
1205                                 break;
1206                         btrfs_release_path(path);
1207                         key.offset++;
1208                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1209                                                       size);
1210                         if (ret && ret != -EEXIST)
1211                                 goto fail;
1212
1213                         leaf = path->nodes[0];
1214                 }
1215                 ref = btrfs_item_ptr(leaf, path->slots[0],
1216                                      struct btrfs_extent_data_ref);
1217                 if (ret == 0) {
1218                         btrfs_set_extent_data_ref_root(leaf, ref,
1219                                                        root_objectid);
1220                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1221                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1222                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1223                 } else {
1224                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1225                         num_refs += refs_to_add;
1226                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1227                 }
1228         }
1229         btrfs_mark_buffer_dirty(leaf);
1230         ret = 0;
1231 fail:
1232         btrfs_release_path(path);
1233         return ret;
1234 }
1235
1236 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1237                                            struct btrfs_root *root,
1238                                            struct btrfs_path *path,
1239                                            int refs_to_drop)
1240 {
1241         struct btrfs_key key;
1242         struct btrfs_extent_data_ref *ref1 = NULL;
1243         struct btrfs_shared_data_ref *ref2 = NULL;
1244         struct extent_buffer *leaf;
1245         u32 num_refs = 0;
1246         int ret = 0;
1247
1248         leaf = path->nodes[0];
1249         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1250
1251         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1252                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1253                                       struct btrfs_extent_data_ref);
1254                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1255         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1256                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1257                                       struct btrfs_shared_data_ref);
1258                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1259 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1260         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1261                 struct btrfs_extent_ref_v0 *ref0;
1262                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1263                                       struct btrfs_extent_ref_v0);
1264                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1265 #endif
1266         } else {
1267                 BUG();
1268         }
1269
1270         BUG_ON(num_refs < refs_to_drop);
1271         num_refs -= refs_to_drop;
1272
1273         if (num_refs == 0) {
1274                 ret = btrfs_del_item(trans, root, path);
1275         } else {
1276                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1277                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1278                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1279                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1280 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1281                 else {
1282                         struct btrfs_extent_ref_v0 *ref0;
1283                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1284                                         struct btrfs_extent_ref_v0);
1285                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1286                 }
1287 #endif
1288                 btrfs_mark_buffer_dirty(leaf);
1289         }
1290         return ret;
1291 }
1292
1293 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1294                                           struct btrfs_path *path,
1295                                           struct btrfs_extent_inline_ref *iref)
1296 {
1297         struct btrfs_key key;
1298         struct extent_buffer *leaf;
1299         struct btrfs_extent_data_ref *ref1;
1300         struct btrfs_shared_data_ref *ref2;
1301         u32 num_refs = 0;
1302
1303         leaf = path->nodes[0];
1304         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1305         if (iref) {
1306                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1307                     BTRFS_EXTENT_DATA_REF_KEY) {
1308                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1309                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1310                 } else {
1311                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1312                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1313                 }
1314         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1315                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1316                                       struct btrfs_extent_data_ref);
1317                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1318         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1319                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1320                                       struct btrfs_shared_data_ref);
1321                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1322 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1323         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1324                 struct btrfs_extent_ref_v0 *ref0;
1325                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1326                                       struct btrfs_extent_ref_v0);
1327                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1328 #endif
1329         } else {
1330                 WARN_ON(1);
1331         }
1332         return num_refs;
1333 }
1334
1335 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1336                                           struct btrfs_root *root,
1337                                           struct btrfs_path *path,
1338                                           u64 bytenr, u64 parent,
1339                                           u64 root_objectid)
1340 {
1341         struct btrfs_key key;
1342         int ret;
1343
1344         key.objectid = bytenr;
1345         if (parent) {
1346                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1347                 key.offset = parent;
1348         } else {
1349                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1350                 key.offset = root_objectid;
1351         }
1352
1353         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1354         if (ret > 0)
1355                 ret = -ENOENT;
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357         if (ret == -ENOENT && parent) {
1358                 btrfs_release_path(path);
1359                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1360                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1361                 if (ret > 0)
1362                         ret = -ENOENT;
1363         }
1364 #endif
1365         return ret;
1366 }
1367
1368 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1369                                           struct btrfs_root *root,
1370                                           struct btrfs_path *path,
1371                                           u64 bytenr, u64 parent,
1372                                           u64 root_objectid)
1373 {
1374         struct btrfs_key key;
1375         int ret;
1376
1377         key.objectid = bytenr;
1378         if (parent) {
1379                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1380                 key.offset = parent;
1381         } else {
1382                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1383                 key.offset = root_objectid;
1384         }
1385
1386         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1387         btrfs_release_path(path);
1388         return ret;
1389 }
1390
1391 static inline int extent_ref_type(u64 parent, u64 owner)
1392 {
1393         int type;
1394         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1395                 if (parent > 0)
1396                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1397                 else
1398                         type = BTRFS_TREE_BLOCK_REF_KEY;
1399         } else {
1400                 if (parent > 0)
1401                         type = BTRFS_SHARED_DATA_REF_KEY;
1402                 else
1403                         type = BTRFS_EXTENT_DATA_REF_KEY;
1404         }
1405         return type;
1406 }
1407
1408 static int find_next_key(struct btrfs_path *path, int level,
1409                          struct btrfs_key *key)
1410
1411 {
1412         for (; level < BTRFS_MAX_LEVEL; level++) {
1413                 if (!path->nodes[level])
1414                         break;
1415                 if (path->slots[level] + 1 >=
1416                     btrfs_header_nritems(path->nodes[level]))
1417                         continue;
1418                 if (level == 0)
1419                         btrfs_item_key_to_cpu(path->nodes[level], key,
1420                                               path->slots[level] + 1);
1421                 else
1422                         btrfs_node_key_to_cpu(path->nodes[level], key,
1423                                               path->slots[level] + 1);
1424                 return 0;
1425         }
1426         return 1;
1427 }
1428
1429 /*
1430  * look for inline back ref. if back ref is found, *ref_ret is set
1431  * to the address of inline back ref, and 0 is returned.
1432  *
1433  * if back ref isn't found, *ref_ret is set to the address where it
1434  * should be inserted, and -ENOENT is returned.
1435  *
1436  * if insert is true and there are too many inline back refs, the path
1437  * points to the extent item, and -EAGAIN is returned.
1438  *
1439  * NOTE: inline back refs are ordered in the same way that back ref
1440  *       items in the tree are ordered.
1441  */
1442 static noinline_for_stack
1443 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1444                                  struct btrfs_root *root,
1445                                  struct btrfs_path *path,
1446                                  struct btrfs_extent_inline_ref **ref_ret,
1447                                  u64 bytenr, u64 num_bytes,
1448                                  u64 parent, u64 root_objectid,
1449                                  u64 owner, u64 offset, int insert)
1450 {
1451         struct btrfs_key key;
1452         struct extent_buffer *leaf;
1453         struct btrfs_extent_item *ei;
1454         struct btrfs_extent_inline_ref *iref;
1455         u64 flags;
1456         u64 item_size;
1457         unsigned long ptr;
1458         unsigned long end;
1459         int extra_size;
1460         int type;
1461         int want;
1462         int ret;
1463         int err = 0;
1464
1465         key.objectid = bytenr;
1466         key.type = BTRFS_EXTENT_ITEM_KEY;
1467         key.offset = num_bytes;
1468
1469         want = extent_ref_type(parent, owner);
1470         if (insert) {
1471                 extra_size = btrfs_extent_inline_ref_size(want);
1472                 path->keep_locks = 1;
1473         } else
1474                 extra_size = -1;
1475         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1476         if (ret < 0) {
1477                 err = ret;
1478                 goto out;
1479         }
1480         BUG_ON(ret);
1481
1482         leaf = path->nodes[0];
1483         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1484 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1485         if (item_size < sizeof(*ei)) {
1486                 if (!insert) {
1487                         err = -ENOENT;
1488                         goto out;
1489                 }
1490                 ret = convert_extent_item_v0(trans, root, path, owner,
1491                                              extra_size);
1492                 if (ret < 0) {
1493                         err = ret;
1494                         goto out;
1495                 }
1496                 leaf = path->nodes[0];
1497                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1498         }
1499 #endif
1500         BUG_ON(item_size < sizeof(*ei));
1501
1502         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1503         flags = btrfs_extent_flags(leaf, ei);
1504
1505         ptr = (unsigned long)(ei + 1);
1506         end = (unsigned long)ei + item_size;
1507
1508         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1509                 ptr += sizeof(struct btrfs_tree_block_info);
1510                 BUG_ON(ptr > end);
1511         } else {
1512                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1513         }
1514
1515         err = -ENOENT;
1516         while (1) {
1517                 if (ptr >= end) {
1518                         WARN_ON(ptr > end);
1519                         break;
1520                 }
1521                 iref = (struct btrfs_extent_inline_ref *)ptr;
1522                 type = btrfs_extent_inline_ref_type(leaf, iref);
1523                 if (want < type)
1524                         break;
1525                 if (want > type) {
1526                         ptr += btrfs_extent_inline_ref_size(type);
1527                         continue;
1528                 }
1529
1530                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1531                         struct btrfs_extent_data_ref *dref;
1532                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1533                         if (match_extent_data_ref(leaf, dref, root_objectid,
1534                                                   owner, offset)) {
1535                                 err = 0;
1536                                 break;
1537                         }
1538                         if (hash_extent_data_ref_item(leaf, dref) <
1539                             hash_extent_data_ref(root_objectid, owner, offset))
1540                                 break;
1541                 } else {
1542                         u64 ref_offset;
1543                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1544                         if (parent > 0) {
1545                                 if (parent == ref_offset) {
1546                                         err = 0;
1547                                         break;
1548                                 }
1549                                 if (ref_offset < parent)
1550                                         break;
1551                         } else {
1552                                 if (root_objectid == ref_offset) {
1553                                         err = 0;
1554                                         break;
1555                                 }
1556                                 if (ref_offset < root_objectid)
1557                                         break;
1558                         }
1559                 }
1560                 ptr += btrfs_extent_inline_ref_size(type);
1561         }
1562         if (err == -ENOENT && insert) {
1563                 if (item_size + extra_size >=
1564                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1565                         err = -EAGAIN;
1566                         goto out;
1567                 }
1568                 /*
1569                  * To add new inline back ref, we have to make sure
1570                  * there is no corresponding back ref item.
1571                  * For simplicity, we just do not add new inline back
1572                  * ref if there is any kind of item for this block
1573                  */
1574                 if (find_next_key(path, 0, &key) == 0 &&
1575                     key.objectid == bytenr &&
1576                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1577                         err = -EAGAIN;
1578                         goto out;
1579                 }
1580         }
1581         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1582 out:
1583         if (insert) {
1584                 path->keep_locks = 0;
1585                 btrfs_unlock_up_safe(path, 1);
1586         }
1587         return err;
1588 }
1589
1590 /*
1591  * helper to add new inline back ref
1592  */
1593 static noinline_for_stack
1594 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1595                                 struct btrfs_root *root,
1596                                 struct btrfs_path *path,
1597                                 struct btrfs_extent_inline_ref *iref,
1598                                 u64 parent, u64 root_objectid,
1599                                 u64 owner, u64 offset, int refs_to_add,
1600                                 struct btrfs_delayed_extent_op *extent_op)
1601 {
1602         struct extent_buffer *leaf;
1603         struct btrfs_extent_item *ei;
1604         unsigned long ptr;
1605         unsigned long end;
1606         unsigned long item_offset;
1607         u64 refs;
1608         int size;
1609         int type;
1610         int ret;
1611
1612         leaf = path->nodes[0];
1613         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1614         item_offset = (unsigned long)iref - (unsigned long)ei;
1615
1616         type = extent_ref_type(parent, owner);
1617         size = btrfs_extent_inline_ref_size(type);
1618
1619         ret = btrfs_extend_item(trans, root, path, size);
1620
1621         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1622         refs = btrfs_extent_refs(leaf, ei);
1623         refs += refs_to_add;
1624         btrfs_set_extent_refs(leaf, ei, refs);
1625         if (extent_op)
1626                 __run_delayed_extent_op(extent_op, leaf, ei);
1627
1628         ptr = (unsigned long)ei + item_offset;
1629         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1630         if (ptr < end - size)
1631                 memmove_extent_buffer(leaf, ptr + size, ptr,
1632                                       end - size - ptr);
1633
1634         iref = (struct btrfs_extent_inline_ref *)ptr;
1635         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1636         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1637                 struct btrfs_extent_data_ref *dref;
1638                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1639                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1640                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1641                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1642                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1643         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1644                 struct btrfs_shared_data_ref *sref;
1645                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1646                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1647                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1648         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1649                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1650         } else {
1651                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1652         }
1653         btrfs_mark_buffer_dirty(leaf);
1654         return 0;
1655 }
1656
1657 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1658                                  struct btrfs_root *root,
1659                                  struct btrfs_path *path,
1660                                  struct btrfs_extent_inline_ref **ref_ret,
1661                                  u64 bytenr, u64 num_bytes, u64 parent,
1662                                  u64 root_objectid, u64 owner, u64 offset)
1663 {
1664         int ret;
1665
1666         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1667                                            bytenr, num_bytes, parent,
1668                                            root_objectid, owner, offset, 0);
1669         if (ret != -ENOENT)
1670                 return ret;
1671
1672         btrfs_release_path(path);
1673         *ref_ret = NULL;
1674
1675         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1676                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1677                                             root_objectid);
1678         } else {
1679                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1680                                              root_objectid, owner, offset);
1681         }
1682         return ret;
1683 }
1684
1685 /*
1686  * helper to update/remove inline back ref
1687  */
1688 static noinline_for_stack
1689 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1690                                  struct btrfs_root *root,
1691                                  struct btrfs_path *path,
1692                                  struct btrfs_extent_inline_ref *iref,
1693                                  int refs_to_mod,
1694                                  struct btrfs_delayed_extent_op *extent_op)
1695 {
1696         struct extent_buffer *leaf;
1697         struct btrfs_extent_item *ei;
1698         struct btrfs_extent_data_ref *dref = NULL;
1699         struct btrfs_shared_data_ref *sref = NULL;
1700         unsigned long ptr;
1701         unsigned long end;
1702         u32 item_size;
1703         int size;
1704         int type;
1705         int ret;
1706         u64 refs;
1707
1708         leaf = path->nodes[0];
1709         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1710         refs = btrfs_extent_refs(leaf, ei);
1711         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1712         refs += refs_to_mod;
1713         btrfs_set_extent_refs(leaf, ei, refs);
1714         if (extent_op)
1715                 __run_delayed_extent_op(extent_op, leaf, ei);
1716
1717         type = btrfs_extent_inline_ref_type(leaf, iref);
1718
1719         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1720                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1721                 refs = btrfs_extent_data_ref_count(leaf, dref);
1722         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1723                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1724                 refs = btrfs_shared_data_ref_count(leaf, sref);
1725         } else {
1726                 refs = 1;
1727                 BUG_ON(refs_to_mod != -1);
1728         }
1729
1730         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1731         refs += refs_to_mod;
1732
1733         if (refs > 0) {
1734                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1735                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1736                 else
1737                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1738         } else {
1739                 size =  btrfs_extent_inline_ref_size(type);
1740                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1741                 ptr = (unsigned long)iref;
1742                 end = (unsigned long)ei + item_size;
1743                 if (ptr + size < end)
1744                         memmove_extent_buffer(leaf, ptr, ptr + size,
1745                                               end - ptr - size);
1746                 item_size -= size;
1747                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1748         }
1749         btrfs_mark_buffer_dirty(leaf);
1750         return 0;
1751 }
1752
1753 static noinline_for_stack
1754 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1755                                  struct btrfs_root *root,
1756                                  struct btrfs_path *path,
1757                                  u64 bytenr, u64 num_bytes, u64 parent,
1758                                  u64 root_objectid, u64 owner,
1759                                  u64 offset, int refs_to_add,
1760                                  struct btrfs_delayed_extent_op *extent_op)
1761 {
1762         struct btrfs_extent_inline_ref *iref;
1763         int ret;
1764
1765         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1766                                            bytenr, num_bytes, parent,
1767                                            root_objectid, owner, offset, 1);
1768         if (ret == 0) {
1769                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1770                 ret = update_inline_extent_backref(trans, root, path, iref,
1771                                                    refs_to_add, extent_op);
1772         } else if (ret == -ENOENT) {
1773                 ret = setup_inline_extent_backref(trans, root, path, iref,
1774                                                   parent, root_objectid,
1775                                                   owner, offset, refs_to_add,
1776                                                   extent_op);
1777         }
1778         return ret;
1779 }
1780
1781 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1782                                  struct btrfs_root *root,
1783                                  struct btrfs_path *path,
1784                                  u64 bytenr, u64 parent, u64 root_objectid,
1785                                  u64 owner, u64 offset, int refs_to_add)
1786 {
1787         int ret;
1788         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1789                 BUG_ON(refs_to_add != 1);
1790                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1791                                             parent, root_objectid);
1792         } else {
1793                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1794                                              parent, root_objectid,
1795                                              owner, offset, refs_to_add);
1796         }
1797         return ret;
1798 }
1799
1800 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1801                                  struct btrfs_root *root,
1802                                  struct btrfs_path *path,
1803                                  struct btrfs_extent_inline_ref *iref,
1804                                  int refs_to_drop, int is_data)
1805 {
1806         int ret;
1807
1808         BUG_ON(!is_data && refs_to_drop != 1);
1809         if (iref) {
1810                 ret = update_inline_extent_backref(trans, root, path, iref,
1811                                                    -refs_to_drop, NULL);
1812         } else if (is_data) {
1813                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1814         } else {
1815                 ret = btrfs_del_item(trans, root, path);
1816         }
1817         return ret;
1818 }
1819
1820 static int btrfs_issue_discard(struct block_device *bdev,
1821                                 u64 start, u64 len)
1822 {
1823         return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1824 }
1825
1826 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1827                                 u64 num_bytes, u64 *actual_bytes)
1828 {
1829         int ret;
1830         u64 discarded_bytes = 0;
1831         struct btrfs_bio *bbio = NULL;
1832
1833
1834         /* Tell the block device(s) that the sectors can be discarded */
1835         ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1836                               bytenr, &num_bytes, &bbio, 0);
1837         if (!ret) {
1838                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1839                 int i;
1840
1841
1842                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1843                         if (!stripe->dev->can_discard)
1844                                 continue;
1845
1846                         ret = btrfs_issue_discard(stripe->dev->bdev,
1847                                                   stripe->physical,
1848                                                   stripe->length);
1849                         if (!ret)
1850                                 discarded_bytes += stripe->length;
1851                         else if (ret != -EOPNOTSUPP)
1852                                 break;
1853
1854                         /*
1855                          * Just in case we get back EOPNOTSUPP for some reason,
1856                          * just ignore the return value so we don't screw up
1857                          * people calling discard_extent.
1858                          */
1859                         ret = 0;
1860                 }
1861                 kfree(bbio);
1862         }
1863
1864         if (actual_bytes)
1865                 *actual_bytes = discarded_bytes;
1866
1867
1868         return ret;
1869 }
1870
1871 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1872                          struct btrfs_root *root,
1873                          u64 bytenr, u64 num_bytes, u64 parent,
1874                          u64 root_objectid, u64 owner, u64 offset, int for_cow)
1875 {
1876         int ret;
1877         struct btrfs_fs_info *fs_info = root->fs_info;
1878
1879         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1880                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1881
1882         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1883                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1884                                         num_bytes,
1885                                         parent, root_objectid, (int)owner,
1886                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1887         } else {
1888                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1889                                         num_bytes,
1890                                         parent, root_objectid, owner, offset,
1891                                         BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1892         }
1893         return ret;
1894 }
1895
1896 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1897                                   struct btrfs_root *root,
1898                                   u64 bytenr, u64 num_bytes,
1899                                   u64 parent, u64 root_objectid,
1900                                   u64 owner, u64 offset, int refs_to_add,
1901                                   struct btrfs_delayed_extent_op *extent_op)
1902 {
1903         struct btrfs_path *path;
1904         struct extent_buffer *leaf;
1905         struct btrfs_extent_item *item;
1906         u64 refs;
1907         int ret;
1908         int err = 0;
1909
1910         path = btrfs_alloc_path();
1911         if (!path)
1912                 return -ENOMEM;
1913
1914         path->reada = 1;
1915         path->leave_spinning = 1;
1916         /* this will setup the path even if it fails to insert the back ref */
1917         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1918                                            path, bytenr, num_bytes, parent,
1919                                            root_objectid, owner, offset,
1920                                            refs_to_add, extent_op);
1921         if (ret == 0)
1922                 goto out;
1923
1924         if (ret != -EAGAIN) {
1925                 err = ret;
1926                 goto out;
1927         }
1928
1929         leaf = path->nodes[0];
1930         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1931         refs = btrfs_extent_refs(leaf, item);
1932         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1933         if (extent_op)
1934                 __run_delayed_extent_op(extent_op, leaf, item);
1935
1936         btrfs_mark_buffer_dirty(leaf);
1937         btrfs_release_path(path);
1938
1939         path->reada = 1;
1940         path->leave_spinning = 1;
1941
1942         /* now insert the actual backref */
1943         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1944                                     path, bytenr, parent, root_objectid,
1945                                     owner, offset, refs_to_add);
1946         BUG_ON(ret);
1947 out:
1948         btrfs_free_path(path);
1949         return err;
1950 }
1951
1952 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1953                                 struct btrfs_root *root,
1954                                 struct btrfs_delayed_ref_node *node,
1955                                 struct btrfs_delayed_extent_op *extent_op,
1956                                 int insert_reserved)
1957 {
1958         int ret = 0;
1959         struct btrfs_delayed_data_ref *ref;
1960         struct btrfs_key ins;
1961         u64 parent = 0;
1962         u64 ref_root = 0;
1963         u64 flags = 0;
1964
1965         ins.objectid = node->bytenr;
1966         ins.offset = node->num_bytes;
1967         ins.type = BTRFS_EXTENT_ITEM_KEY;
1968
1969         ref = btrfs_delayed_node_to_data_ref(node);
1970         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1971                 parent = ref->parent;
1972         else
1973                 ref_root = ref->root;
1974
1975         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1976                 if (extent_op) {
1977                         BUG_ON(extent_op->update_key);
1978                         flags |= extent_op->flags_to_set;
1979                 }
1980                 ret = alloc_reserved_file_extent(trans, root,
1981                                                  parent, ref_root, flags,
1982                                                  ref->objectid, ref->offset,
1983                                                  &ins, node->ref_mod);
1984         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1985                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1986                                              node->num_bytes, parent,
1987                                              ref_root, ref->objectid,
1988                                              ref->offset, node->ref_mod,
1989                                              extent_op);
1990         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1991                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1992                                           node->num_bytes, parent,
1993                                           ref_root, ref->objectid,
1994                                           ref->offset, node->ref_mod,
1995                                           extent_op);
1996         } else {
1997                 BUG();
1998         }
1999         return ret;
2000 }
2001
2002 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2003                                     struct extent_buffer *leaf,
2004                                     struct btrfs_extent_item *ei)
2005 {
2006         u64 flags = btrfs_extent_flags(leaf, ei);
2007         if (extent_op->update_flags) {
2008                 flags |= extent_op->flags_to_set;
2009                 btrfs_set_extent_flags(leaf, ei, flags);
2010         }
2011
2012         if (extent_op->update_key) {
2013                 struct btrfs_tree_block_info *bi;
2014                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2015                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2016                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2017         }
2018 }
2019
2020 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2021                                  struct btrfs_root *root,
2022                                  struct btrfs_delayed_ref_node *node,
2023                                  struct btrfs_delayed_extent_op *extent_op)
2024 {
2025         struct btrfs_key key;
2026         struct btrfs_path *path;
2027         struct btrfs_extent_item *ei;
2028         struct extent_buffer *leaf;
2029         u32 item_size;
2030         int ret;
2031         int err = 0;
2032
2033         path = btrfs_alloc_path();
2034         if (!path)
2035                 return -ENOMEM;
2036
2037         key.objectid = node->bytenr;
2038         key.type = BTRFS_EXTENT_ITEM_KEY;
2039         key.offset = node->num_bytes;
2040
2041         path->reada = 1;
2042         path->leave_spinning = 1;
2043         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2044                                 path, 0, 1);
2045         if (ret < 0) {
2046                 err = ret;
2047                 goto out;
2048         }
2049         if (ret > 0) {
2050                 err = -EIO;
2051                 goto out;
2052         }
2053
2054         leaf = path->nodes[0];
2055         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2056 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2057         if (item_size < sizeof(*ei)) {
2058                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2059                                              path, (u64)-1, 0);
2060                 if (ret < 0) {
2061                         err = ret;
2062                         goto out;
2063                 }
2064                 leaf = path->nodes[0];
2065                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2066         }
2067 #endif
2068         BUG_ON(item_size < sizeof(*ei));
2069         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2070         __run_delayed_extent_op(extent_op, leaf, ei);
2071
2072         btrfs_mark_buffer_dirty(leaf);
2073 out:
2074         btrfs_free_path(path);
2075         return err;
2076 }
2077
2078 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2079                                 struct btrfs_root *root,
2080                                 struct btrfs_delayed_ref_node *node,
2081                                 struct btrfs_delayed_extent_op *extent_op,
2082                                 int insert_reserved)
2083 {
2084         int ret = 0;
2085         struct btrfs_delayed_tree_ref *ref;
2086         struct btrfs_key ins;
2087         u64 parent = 0;
2088         u64 ref_root = 0;
2089
2090         ins.objectid = node->bytenr;
2091         ins.offset = node->num_bytes;
2092         ins.type = BTRFS_EXTENT_ITEM_KEY;
2093
2094         ref = btrfs_delayed_node_to_tree_ref(node);
2095         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2096                 parent = ref->parent;
2097         else
2098                 ref_root = ref->root;
2099
2100         BUG_ON(node->ref_mod != 1);
2101         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2102                 BUG_ON(!extent_op || !extent_op->update_flags ||
2103                        !extent_op->update_key);
2104                 ret = alloc_reserved_tree_block(trans, root,
2105                                                 parent, ref_root,
2106                                                 extent_op->flags_to_set,
2107                                                 &extent_op->key,
2108                                                 ref->level, &ins);
2109         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2110                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2111                                              node->num_bytes, parent, ref_root,
2112                                              ref->level, 0, 1, extent_op);
2113         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2114                 ret = __btrfs_free_extent(trans, root, node->bytenr,
2115                                           node->num_bytes, parent, ref_root,
2116                                           ref->level, 0, 1, extent_op);
2117         } else {
2118                 BUG();
2119         }
2120         return ret;
2121 }
2122
2123 /* helper function to actually process a single delayed ref entry */
2124 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2125                                struct btrfs_root *root,
2126                                struct btrfs_delayed_ref_node *node,
2127                                struct btrfs_delayed_extent_op *extent_op,
2128                                int insert_reserved)
2129 {
2130         int ret;
2131         if (btrfs_delayed_ref_is_head(node)) {
2132                 struct btrfs_delayed_ref_head *head;
2133                 /*
2134                  * we've hit the end of the chain and we were supposed
2135                  * to insert this extent into the tree.  But, it got
2136                  * deleted before we ever needed to insert it, so all
2137                  * we have to do is clean up the accounting
2138                  */
2139                 BUG_ON(extent_op);
2140                 head = btrfs_delayed_node_to_head(node);
2141                 if (insert_reserved) {
2142                         btrfs_pin_extent(root, node->bytenr,
2143                                          node->num_bytes, 1);
2144                         if (head->is_data) {
2145                                 ret = btrfs_del_csums(trans, root,
2146                                                       node->bytenr,
2147                                                       node->num_bytes);
2148                                 BUG_ON(ret);
2149                         }
2150                 }
2151                 mutex_unlock(&head->mutex);
2152                 return 0;
2153         }
2154
2155         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2156             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2157                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2158                                            insert_reserved);
2159         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2160                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2161                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2162                                            insert_reserved);
2163         else
2164                 BUG();
2165         return ret;
2166 }
2167
2168 static noinline struct btrfs_delayed_ref_node *
2169 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2170 {
2171         struct rb_node *node;
2172         struct btrfs_delayed_ref_node *ref;
2173         int action = BTRFS_ADD_DELAYED_REF;
2174 again:
2175         /*
2176          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2177          * this prevents ref count from going down to zero when
2178          * there still are pending delayed ref.
2179          */
2180         node = rb_prev(&head->node.rb_node);
2181         while (1) {
2182                 if (!node)
2183                         break;
2184                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2185                                 rb_node);
2186                 if (ref->bytenr != head->node.bytenr)
2187                         break;
2188                 if (ref->action == action)
2189                         return ref;
2190                 node = rb_prev(node);
2191         }
2192         if (action == BTRFS_ADD_DELAYED_REF) {
2193                 action = BTRFS_DROP_DELAYED_REF;
2194                 goto again;
2195         }
2196         return NULL;
2197 }
2198
2199 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2200                                        struct btrfs_root *root,
2201                                        struct list_head *cluster)
2202 {
2203         struct btrfs_delayed_ref_root *delayed_refs;
2204         struct btrfs_delayed_ref_node *ref;
2205         struct btrfs_delayed_ref_head *locked_ref = NULL;
2206         struct btrfs_delayed_extent_op *extent_op;
2207         int ret;
2208         int count = 0;
2209         int must_insert_reserved = 0;
2210
2211         delayed_refs = &trans->transaction->delayed_refs;
2212         while (1) {
2213                 if (!locked_ref) {
2214                         /* pick a new head ref from the cluster list */
2215                         if (list_empty(cluster))
2216                                 break;
2217
2218                         locked_ref = list_entry(cluster->next,
2219                                      struct btrfs_delayed_ref_head, cluster);
2220
2221                         /* grab the lock that says we are going to process
2222                          * all the refs for this head */
2223                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2224
2225                         /*
2226                          * we may have dropped the spin lock to get the head
2227                          * mutex lock, and that might have given someone else
2228                          * time to free the head.  If that's true, it has been
2229                          * removed from our list and we can move on.
2230                          */
2231                         if (ret == -EAGAIN) {
2232                                 locked_ref = NULL;
2233                                 count++;
2234                                 continue;
2235                         }
2236                 }
2237
2238                 /*
2239                  * locked_ref is the head node, so we have to go one
2240                  * node back for any delayed ref updates
2241                  */
2242                 ref = select_delayed_ref(locked_ref);
2243
2244                 if (ref && ref->seq &&
2245                     btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2246                         /*
2247                          * there are still refs with lower seq numbers in the
2248                          * process of being added. Don't run this ref yet.
2249                          */
2250                         list_del_init(&locked_ref->cluster);
2251                         mutex_unlock(&locked_ref->mutex);
2252                         locked_ref = NULL;
2253                         delayed_refs->num_heads_ready++;
2254                         spin_unlock(&delayed_refs->lock);
2255                         cond_resched();
2256                         spin_lock(&delayed_refs->lock);
2257                         continue;
2258                 }
2259
2260                 /*
2261                  * record the must insert reserved flag before we
2262                  * drop the spin lock.
2263                  */
2264                 must_insert_reserved = locked_ref->must_insert_reserved;
2265                 locked_ref->must_insert_reserved = 0;
2266
2267                 extent_op = locked_ref->extent_op;
2268                 locked_ref->extent_op = NULL;
2269
2270                 if (!ref) {
2271                         /* All delayed refs have been processed, Go ahead
2272                          * and send the head node to run_one_delayed_ref,
2273                          * so that any accounting fixes can happen
2274                          */
2275                         ref = &locked_ref->node;
2276
2277                         if (extent_op && must_insert_reserved) {
2278                                 kfree(extent_op);
2279                                 extent_op = NULL;
2280                         }
2281
2282                         if (extent_op) {
2283                                 spin_unlock(&delayed_refs->lock);
2284
2285                                 ret = run_delayed_extent_op(trans, root,
2286                                                             ref, extent_op);
2287                                 BUG_ON(ret);
2288                                 kfree(extent_op);
2289
2290                                 goto next;
2291                         }
2292
2293                         list_del_init(&locked_ref->cluster);
2294                         locked_ref = NULL;
2295                 }
2296
2297                 ref->in_tree = 0;
2298                 rb_erase(&ref->rb_node, &delayed_refs->root);
2299                 delayed_refs->num_entries--;
2300                 /*
2301                  * we modified num_entries, but as we're currently running
2302                  * delayed refs, skip
2303                  *     wake_up(&delayed_refs->seq_wait);
2304                  * here.
2305                  */
2306                 spin_unlock(&delayed_refs->lock);
2307
2308                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2309                                           must_insert_reserved);
2310                 BUG_ON(ret);
2311
2312                 btrfs_put_delayed_ref(ref);
2313                 kfree(extent_op);
2314                 count++;
2315 next:
2316                 do_chunk_alloc(trans, root->fs_info->extent_root,
2317                                2 * 1024 * 1024,
2318                                btrfs_get_alloc_profile(root, 0),
2319                                CHUNK_ALLOC_NO_FORCE);
2320                 cond_resched();
2321                 spin_lock(&delayed_refs->lock);
2322         }
2323         return count;
2324 }
2325
2326
2327 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2328                         unsigned long num_refs)
2329 {
2330         struct list_head *first_seq = delayed_refs->seq_head.next;
2331
2332         spin_unlock(&delayed_refs->lock);
2333         pr_debug("waiting for more refs (num %ld, first %p)\n",
2334                  num_refs, first_seq);
2335         wait_event(delayed_refs->seq_wait,
2336                    num_refs != delayed_refs->num_entries ||
2337                    delayed_refs->seq_head.next != first_seq);
2338         pr_debug("done waiting for more refs (num %ld, first %p)\n",
2339                  delayed_refs->num_entries, delayed_refs->seq_head.next);
2340         spin_lock(&delayed_refs->lock);
2341 }
2342
2343 /*
2344  * this starts processing the delayed reference count updates and
2345  * extent insertions we have queued up so far.  count can be
2346  * 0, which means to process everything in the tree at the start
2347  * of the run (but not newly added entries), or it can be some target
2348  * number you'd like to process.
2349  */
2350 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2351                            struct btrfs_root *root, unsigned long count)
2352 {
2353         struct rb_node *node;
2354         struct btrfs_delayed_ref_root *delayed_refs;
2355         struct btrfs_delayed_ref_node *ref;
2356         struct list_head cluster;
2357         int ret;
2358         u64 delayed_start;
2359         int run_all = count == (unsigned long)-1;
2360         int run_most = 0;
2361         unsigned long num_refs = 0;
2362         int consider_waiting;
2363
2364         if (root == root->fs_info->extent_root)
2365                 root = root->fs_info->tree_root;
2366
2367         do_chunk_alloc(trans, root->fs_info->extent_root,
2368                        2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2369                        CHUNK_ALLOC_NO_FORCE);
2370
2371         delayed_refs = &trans->transaction->delayed_refs;
2372         INIT_LIST_HEAD(&cluster);
2373 again:
2374         consider_waiting = 0;
2375         spin_lock(&delayed_refs->lock);
2376         if (count == 0) {
2377                 count = delayed_refs->num_entries * 2;
2378                 run_most = 1;
2379         }
2380         while (1) {
2381                 if (!(run_all || run_most) &&
2382                     delayed_refs->num_heads_ready < 64)
2383                         break;
2384
2385                 /*
2386                  * go find something we can process in the rbtree.  We start at
2387                  * the beginning of the tree, and then build a cluster
2388                  * of refs to process starting at the first one we are able to
2389                  * lock
2390                  */
2391                 delayed_start = delayed_refs->run_delayed_start;
2392                 ret = btrfs_find_ref_cluster(trans, &cluster,
2393                                              delayed_refs->run_delayed_start);
2394                 if (ret)
2395                         break;
2396
2397                 if (delayed_start >= delayed_refs->run_delayed_start) {
2398                         if (consider_waiting == 0) {
2399                                 /*
2400                                  * btrfs_find_ref_cluster looped. let's do one
2401                                  * more cycle. if we don't run any delayed ref
2402                                  * during that cycle (because we can't because
2403                                  * all of them are blocked) and if the number of
2404                                  * refs doesn't change, we avoid busy waiting.
2405                                  */
2406                                 consider_waiting = 1;
2407                                 num_refs = delayed_refs->num_entries;
2408                         } else {
2409                                 wait_for_more_refs(delayed_refs, num_refs);
2410                                 /*
2411                                  * after waiting, things have changed. we
2412                                  * dropped the lock and someone else might have
2413                                  * run some refs, built new clusters and so on.
2414                                  * therefore, we restart staleness detection.
2415                                  */
2416                                 consider_waiting = 0;
2417                         }
2418                 }
2419
2420                 ret = run_clustered_refs(trans, root, &cluster);
2421                 BUG_ON(ret < 0);
2422
2423                 count -= min_t(unsigned long, ret, count);
2424
2425                 if (count == 0)
2426                         break;
2427
2428                 if (ret || delayed_refs->run_delayed_start == 0) {
2429                         /* refs were run, let's reset staleness detection */
2430                         consider_waiting = 0;
2431                 }
2432         }
2433
2434         if (run_all) {
2435                 node = rb_first(&delayed_refs->root);
2436                 if (!node)
2437                         goto out;
2438                 count = (unsigned long)-1;
2439
2440                 while (node) {
2441                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2442                                        rb_node);
2443                         if (btrfs_delayed_ref_is_head(ref)) {
2444                                 struct btrfs_delayed_ref_head *head;
2445
2446                                 head = btrfs_delayed_node_to_head(ref);
2447                                 atomic_inc(&ref->refs);
2448
2449                                 spin_unlock(&delayed_refs->lock);
2450                                 /*
2451                                  * Mutex was contended, block until it's
2452                                  * released and try again
2453                                  */
2454                                 mutex_lock(&head->mutex);
2455                                 mutex_unlock(&head->mutex);
2456
2457                                 btrfs_put_delayed_ref(ref);
2458                                 cond_resched();
2459                                 goto again;
2460                         }
2461                         node = rb_next(node);
2462                 }
2463                 spin_unlock(&delayed_refs->lock);
2464                 schedule_timeout(1);
2465                 goto again;
2466         }
2467 out:
2468         spin_unlock(&delayed_refs->lock);
2469         return 0;
2470 }
2471
2472 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2473                                 struct btrfs_root *root,
2474                                 u64 bytenr, u64 num_bytes, u64 flags,
2475                                 int is_data)
2476 {
2477         struct btrfs_delayed_extent_op *extent_op;
2478         int ret;
2479
2480         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2481         if (!extent_op)
2482                 return -ENOMEM;
2483
2484         extent_op->flags_to_set = flags;
2485         extent_op->update_flags = 1;
2486         extent_op->update_key = 0;
2487         extent_op->is_data = is_data ? 1 : 0;
2488
2489         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2490                                           num_bytes, extent_op);
2491         if (ret)
2492                 kfree(extent_op);
2493         return ret;
2494 }
2495
2496 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2497                                       struct btrfs_root *root,
2498                                       struct btrfs_path *path,
2499                                       u64 objectid, u64 offset, u64 bytenr)
2500 {
2501         struct btrfs_delayed_ref_head *head;
2502         struct btrfs_delayed_ref_node *ref;
2503         struct btrfs_delayed_data_ref *data_ref;
2504         struct btrfs_delayed_ref_root *delayed_refs;
2505         struct rb_node *node;
2506         int ret = 0;
2507
2508         ret = -ENOENT;
2509         delayed_refs = &trans->transaction->delayed_refs;
2510         spin_lock(&delayed_refs->lock);
2511         head = btrfs_find_delayed_ref_head(trans, bytenr);
2512         if (!head)
2513                 goto out;
2514
2515         if (!mutex_trylock(&head->mutex)) {
2516                 atomic_inc(&head->node.refs);
2517                 spin_unlock(&delayed_refs->lock);
2518
2519                 btrfs_release_path(path);
2520
2521                 /*
2522                  * Mutex was contended, block until it's released and let
2523                  * caller try again
2524                  */
2525                 mutex_lock(&head->mutex);
2526                 mutex_unlock(&head->mutex);
2527                 btrfs_put_delayed_ref(&head->node);
2528                 return -EAGAIN;
2529         }
2530
2531         node = rb_prev(&head->node.rb_node);
2532         if (!node)
2533                 goto out_unlock;
2534
2535         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2536
2537         if (ref->bytenr != bytenr)
2538                 goto out_unlock;
2539
2540         ret = 1;
2541         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2542                 goto out_unlock;
2543
2544         data_ref = btrfs_delayed_node_to_data_ref(ref);
2545
2546         node = rb_prev(node);
2547         if (node) {
2548                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2549                 if (ref->bytenr == bytenr)
2550                         goto out_unlock;
2551         }
2552
2553         if (data_ref->root != root->root_key.objectid ||
2554             data_ref->objectid != objectid || data_ref->offset != offset)
2555                 goto out_unlock;
2556
2557         ret = 0;
2558 out_unlock:
2559         mutex_unlock(&head->mutex);
2560 out:
2561         spin_unlock(&delayed_refs->lock);
2562         return ret;
2563 }
2564
2565 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2566                                         struct btrfs_root *root,
2567                                         struct btrfs_path *path,
2568                                         u64 objectid, u64 offset, u64 bytenr)
2569 {
2570         struct btrfs_root *extent_root = root->fs_info->extent_root;
2571         struct extent_buffer *leaf;
2572         struct btrfs_extent_data_ref *ref;
2573         struct btrfs_extent_inline_ref *iref;
2574         struct btrfs_extent_item *ei;
2575         struct btrfs_key key;
2576         u32 item_size;
2577         int ret;
2578
2579         key.objectid = bytenr;
2580         key.offset = (u64)-1;
2581         key.type = BTRFS_EXTENT_ITEM_KEY;
2582
2583         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2584         if (ret < 0)
2585                 goto out;
2586         BUG_ON(ret == 0);
2587
2588         ret = -ENOENT;
2589         if (path->slots[0] == 0)
2590                 goto out;
2591
2592         path->slots[0]--;
2593         leaf = path->nodes[0];
2594         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2595
2596         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2597                 goto out;
2598
2599         ret = 1;
2600         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2601 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2602         if (item_size < sizeof(*ei)) {
2603                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2604                 goto out;
2605         }
2606 #endif
2607         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2608
2609         if (item_size != sizeof(*ei) +
2610             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2611                 goto out;
2612
2613         if (btrfs_extent_generation(leaf, ei) <=
2614             btrfs_root_last_snapshot(&root->root_item))
2615                 goto out;
2616
2617         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2618         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2619             BTRFS_EXTENT_DATA_REF_KEY)
2620                 goto out;
2621
2622         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2623         if (btrfs_extent_refs(leaf, ei) !=
2624             btrfs_extent_data_ref_count(leaf, ref) ||
2625             btrfs_extent_data_ref_root(leaf, ref) !=
2626             root->root_key.objectid ||
2627             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2628             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2629                 goto out;
2630
2631         ret = 0;
2632 out:
2633         return ret;
2634 }
2635
2636 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2637                           struct btrfs_root *root,
2638                           u64 objectid, u64 offset, u64 bytenr)
2639 {
2640         struct btrfs_path *path;
2641         int ret;
2642         int ret2;
2643
2644         path = btrfs_alloc_path();
2645         if (!path)
2646                 return -ENOENT;
2647
2648         do {
2649                 ret = check_committed_ref(trans, root, path, objectid,
2650                                           offset, bytenr);
2651                 if (ret && ret != -ENOENT)
2652                         goto out;
2653
2654                 ret2 = check_delayed_ref(trans, root, path, objectid,
2655                                          offset, bytenr);
2656         } while (ret2 == -EAGAIN);
2657
2658         if (ret2 && ret2 != -ENOENT) {
2659                 ret = ret2;
2660                 goto out;
2661         }
2662
2663         if (ret != -ENOENT || ret2 != -ENOENT)
2664                 ret = 0;
2665 out:
2666         btrfs_free_path(path);
2667         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2668                 WARN_ON(ret > 0);
2669         return ret;
2670 }
2671
2672 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2673                            struct btrfs_root *root,
2674                            struct extent_buffer *buf,
2675                            int full_backref, int inc, int for_cow)
2676 {
2677         u64 bytenr;
2678         u64 num_bytes;
2679         u64 parent;
2680         u64 ref_root;
2681         u32 nritems;
2682         struct btrfs_key key;
2683         struct btrfs_file_extent_item *fi;
2684         int i;
2685         int level;
2686         int ret = 0;
2687         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2688                             u64, u64, u64, u64, u64, u64, int);
2689
2690         ref_root = btrfs_header_owner(buf);
2691         nritems = btrfs_header_nritems(buf);
2692         level = btrfs_header_level(buf);
2693
2694         if (!root->ref_cows && level == 0)
2695                 return 0;
2696
2697         if (inc)
2698                 process_func = btrfs_inc_extent_ref;
2699         else
2700                 process_func = btrfs_free_extent;
2701
2702         if (full_backref)
2703                 parent = buf->start;
2704         else
2705                 parent = 0;
2706
2707         for (i = 0; i < nritems; i++) {
2708                 if (level == 0) {
2709                         btrfs_item_key_to_cpu(buf, &key, i);
2710                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2711                                 continue;
2712                         fi = btrfs_item_ptr(buf, i,
2713                                             struct btrfs_file_extent_item);
2714                         if (btrfs_file_extent_type(buf, fi) ==
2715                             BTRFS_FILE_EXTENT_INLINE)
2716                                 continue;
2717                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2718                         if (bytenr == 0)
2719                                 continue;
2720
2721                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2722                         key.offset -= btrfs_file_extent_offset(buf, fi);
2723                         ret = process_func(trans, root, bytenr, num_bytes,
2724                                            parent, ref_root, key.objectid,
2725                                            key.offset, for_cow);
2726                         if (ret)
2727                                 goto fail;
2728                 } else {
2729                         bytenr = btrfs_node_blockptr(buf, i);
2730                         num_bytes = btrfs_level_size(root, level - 1);
2731                         ret = process_func(trans, root, bytenr, num_bytes,
2732                                            parent, ref_root, level - 1, 0,
2733                                            for_cow);
2734                         if (ret)
2735                                 goto fail;
2736                 }
2737         }
2738         return 0;
2739 fail:
2740         BUG();
2741         return ret;
2742 }
2743
2744 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2745                   struct extent_buffer *buf, int full_backref, int for_cow)
2746 {
2747         return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2748 }
2749
2750 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2751                   struct extent_buffer *buf, int full_backref, int for_cow)
2752 {
2753         return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2754 }
2755
2756 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2757                                  struct btrfs_root *root,
2758                                  struct btrfs_path *path,
2759                                  struct btrfs_block_group_cache *cache)
2760 {
2761         int ret;
2762         struct btrfs_root *extent_root = root->fs_info->extent_root;
2763         unsigned long bi;
2764         struct extent_buffer *leaf;
2765
2766         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2767         if (ret < 0)
2768                 goto fail;
2769         BUG_ON(ret);
2770
2771         leaf = path->nodes[0];
2772         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2773         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2774         btrfs_mark_buffer_dirty(leaf);
2775         btrfs_release_path(path);
2776 fail:
2777         if (ret)
2778                 return ret;
2779         return 0;
2780
2781 }
2782
2783 static struct btrfs_block_group_cache *
2784 next_block_group(struct btrfs_root *root,
2785                  struct btrfs_block_group_cache *cache)
2786 {
2787         struct rb_node *node;
2788         spin_lock(&root->fs_info->block_group_cache_lock);
2789         node = rb_next(&cache->cache_node);
2790         btrfs_put_block_group(cache);
2791         if (node) {
2792                 cache = rb_entry(node, struct btrfs_block_group_cache,
2793                                  cache_node);
2794                 btrfs_get_block_group(cache);
2795         } else
2796                 cache = NULL;
2797         spin_unlock(&root->fs_info->block_group_cache_lock);
2798         return cache;
2799 }
2800
2801 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2802                             struct btrfs_trans_handle *trans,
2803                             struct btrfs_path *path)
2804 {
2805         struct btrfs_root *root = block_group->fs_info->tree_root;
2806         struct inode *inode = NULL;
2807         u64 alloc_hint = 0;
2808         int dcs = BTRFS_DC_ERROR;
2809         int num_pages = 0;
2810         int retries = 0;
2811         int ret = 0;
2812
2813         /*
2814          * If this block group is smaller than 100 megs don't bother caching the
2815          * block group.
2816          */
2817         if (block_group->key.offset < (100 * 1024 * 1024)) {
2818                 spin_lock(&block_group->lock);
2819                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2820                 spin_unlock(&block_group->lock);
2821                 return 0;
2822         }
2823
2824 again:
2825         inode = lookup_free_space_inode(root, block_group, path);
2826         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2827                 ret = PTR_ERR(inode);
2828                 btrfs_release_path(path);
2829                 goto out;
2830         }
2831
2832         if (IS_ERR(inode)) {
2833                 BUG_ON(retries);
2834                 retries++;
2835
2836                 if (block_group->ro)
2837                         goto out_free;
2838
2839                 ret = create_free_space_inode(root, trans, block_group, path);
2840                 if (ret)
2841                         goto out_free;
2842                 goto again;
2843         }
2844
2845         /* We've already setup this transaction, go ahead and exit */
2846         if (block_group->cache_generation == trans->transid &&
2847             i_size_read(inode)) {
2848                 dcs = BTRFS_DC_SETUP;
2849                 goto out_put;
2850         }
2851
2852         /*
2853          * We want to set the generation to 0, that way if anything goes wrong
2854          * from here on out we know not to trust this cache when we load up next
2855          * time.
2856          */
2857         BTRFS_I(inode)->generation = 0;
2858         ret = btrfs_update_inode(trans, root, inode);
2859         WARN_ON(ret);
2860
2861         if (i_size_read(inode) > 0) {
2862                 ret = btrfs_truncate_free_space_cache(root, trans, path,
2863                                                       inode);
2864                 if (ret)
2865                         goto out_put;
2866         }
2867
2868         spin_lock(&block_group->lock);
2869         if (block_group->cached != BTRFS_CACHE_FINISHED) {
2870                 /* We're not cached, don't bother trying to write stuff out */
2871                 dcs = BTRFS_DC_WRITTEN;
2872                 spin_unlock(&block_group->lock);
2873                 goto out_put;
2874         }
2875         spin_unlock(&block_group->lock);
2876
2877         num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2878         if (!num_pages)
2879                 num_pages = 1;
2880
2881         /*
2882          * Just to make absolutely sure we have enough space, we're going to
2883          * preallocate 12 pages worth of space for each block group.  In
2884          * practice we ought to use at most 8, but we need extra space so we can
2885          * add our header and have a terminator between the extents and the
2886          * bitmaps.
2887          */
2888         num_pages *= 16;
2889         num_pages *= PAGE_CACHE_SIZE;
2890
2891         ret = btrfs_check_data_free_space(inode, num_pages);
2892         if (ret)
2893                 goto out_put;
2894
2895         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2896                                               num_pages, num_pages,
2897                                               &alloc_hint);
2898         if (!ret)
2899                 dcs = BTRFS_DC_SETUP;
2900         btrfs_free_reserved_data_space(inode, num_pages);
2901
2902 out_put:
2903         iput(inode);
2904 out_free:
2905         btrfs_release_path(path);
2906 out:
2907         spin_lock(&block_group->lock);
2908         if (!ret && dcs == BTRFS_DC_SETUP)
2909                 block_group->cache_generation = trans->transid;
2910         block_group->disk_cache_state = dcs;
2911         spin_unlock(&block_group->lock);
2912
2913         return ret;
2914 }
2915
2916 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2917                                    struct btrfs_root *root)
2918 {
2919         struct btrfs_block_group_cache *cache;
2920         int err = 0;
2921         struct btrfs_path *path;
2922         u64 last = 0;
2923
2924         path = btrfs_alloc_path();
2925         if (!path)
2926                 return -ENOMEM;
2927
2928 again:
2929         while (1) {
2930                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2931                 while (cache) {
2932                         if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2933                                 break;
2934                         cache = next_block_group(root, cache);
2935                 }
2936                 if (!cache) {
2937                         if (last == 0)
2938                                 break;
2939                         last = 0;
2940                         continue;
2941                 }
2942                 err = cache_save_setup(cache, trans, path);
2943                 last = cache->key.objectid + cache->key.offset;
2944                 btrfs_put_block_group(cache);
2945         }
2946
2947         while (1) {
2948                 if (last == 0) {
2949                         err = btrfs_run_delayed_refs(trans, root,
2950                                                      (unsigned long)-1);
2951                         BUG_ON(err);
2952                 }
2953
2954                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2955                 while (cache) {
2956                         if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2957                                 btrfs_put_block_group(cache);
2958                                 goto again;
2959                         }
2960
2961                         if (cache->dirty)
2962                                 break;
2963                         cache = next_block_group(root, cache);
2964                 }
2965                 if (!cache) {
2966                         if (last == 0)
2967                                 break;
2968                         last = 0;
2969                         continue;
2970                 }
2971
2972                 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2973                         cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2974                 cache->dirty = 0;
2975                 last = cache->key.objectid + cache->key.offset;
2976
2977                 err = write_one_cache_group(trans, root, path, cache);
2978                 BUG_ON(err);
2979                 btrfs_put_block_group(cache);
2980         }
2981
2982         while (1) {
2983                 /*
2984                  * I don't think this is needed since we're just marking our
2985                  * preallocated extent as written, but just in case it can't
2986                  * hurt.
2987                  */
2988                 if (last == 0) {
2989                         err = btrfs_run_delayed_refs(trans, root,
2990                                                      (unsigned long)-1);
2991                         BUG_ON(err);
2992                 }
2993
2994                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2995                 while (cache) {
2996                         /*
2997                          * Really this shouldn't happen, but it could if we
2998                          * couldn't write the entire preallocated extent and
2999                          * splitting the extent resulted in a new block.
3000                          */
3001                         if (cache->dirty) {
3002                                 btrfs_put_block_group(cache);
3003                                 goto again;
3004                         }
3005                         if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3006                                 break;
3007                         cache = next_block_group(root, cache);
3008                 }
3009                 if (!cache) {
3010                         if (last == 0)
3011                                 break;
3012                         last = 0;
3013                         continue;
3014                 }
3015
3016                 btrfs_write_out_cache(root, trans, cache, path);
3017
3018                 /*
3019                  * If we didn't have an error then the cache state is still
3020                  * NEED_WRITE, so we can set it to WRITTEN.
3021                  */
3022                 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3023                         cache->disk_cache_state = BTRFS_DC_WRITTEN;
3024                 last = cache->key.objectid + cache->key.offset;
3025                 btrfs_put_block_group(cache);
3026         }
3027
3028         btrfs_free_path(path);
3029         return 0;
3030 }
3031
3032 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3033 {
3034         struct btrfs_block_group_cache *block_group;
3035         int readonly = 0;
3036
3037         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3038         if (!block_group || block_group->ro)
3039                 readonly = 1;
3040         if (block_group)
3041                 btrfs_put_block_group(block_group);
3042         return readonly;
3043 }
3044
3045 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3046                              u64 total_bytes, u64 bytes_used,
3047                              struct btrfs_space_info **space_info)
3048 {
3049         struct btrfs_space_info *found;
3050         int i;
3051         int factor;
3052
3053         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3054                      BTRFS_BLOCK_GROUP_RAID10))
3055                 factor = 2;
3056         else
3057                 factor = 1;
3058
3059         found = __find_space_info(info, flags);
3060         if (found) {
3061                 spin_lock(&found->lock);
3062                 found->total_bytes += total_bytes;
3063                 found->disk_total += total_bytes * factor;
3064                 found->bytes_used += bytes_used;
3065                 found->disk_used += bytes_used * factor;
3066                 found->full = 0;
3067                 spin_unlock(&found->lock);
3068                 *space_info = found;
3069                 return 0;
3070         }
3071         found = kzalloc(sizeof(*found), GFP_NOFS);
3072         if (!found)
3073                 return -ENOMEM;
3074
3075         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3076                 INIT_LIST_HEAD(&found->block_groups[i]);
3077         init_rwsem(&found->groups_sem);
3078         spin_lock_init(&found->lock);
3079         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3080         found->total_bytes = total_bytes;
3081         found->disk_total = total_bytes * factor;
3082         found->bytes_used = bytes_used;
3083         found->disk_used = bytes_used * factor;
3084         found->bytes_pinned = 0;
3085         found->bytes_reserved = 0;
3086         found->bytes_readonly = 0;
3087         found->bytes_may_use = 0;
3088         found->full = 0;
3089         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3090         found->chunk_alloc = 0;
3091         found->flush = 0;
3092         init_waitqueue_head(&found->wait);
3093         *space_info = found;
3094         list_add_rcu(&found->list, &info->space_info);
3095         return 0;
3096 }
3097
3098 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3099 {
3100         u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3101
3102         /* chunk -> extended profile */
3103         if (extra_flags == 0)
3104                 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3105
3106         if (flags & BTRFS_BLOCK_GROUP_DATA)
3107                 fs_info->avail_data_alloc_bits |= extra_flags;
3108         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3109                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3110         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3111                 fs_info->avail_system_alloc_bits |= extra_flags;
3112 }
3113
3114 /*
3115  * @flags: available profiles in extended format (see ctree.h)
3116  *
3117  * Returns reduced profile in chunk format.  If profile changing is in
3118  * progress (either running or paused) picks the target profile (if it's
3119  * already available), otherwise falls back to plain reducing.
3120  */
3121 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3122 {
3123         /*
3124          * we add in the count of missing devices because we want
3125          * to make sure that any RAID levels on a degraded FS
3126          * continue to be honored.
3127          */
3128         u64 num_devices = root->fs_info->fs_devices->rw_devices +
3129                 root->fs_info->fs_devices->missing_devices;
3130
3131         /* pick restriper's target profile if it's available */
3132         spin_lock(&root->fs_info->balance_lock);
3133         if (root->fs_info->balance_ctl) {
3134                 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3135                 u64 tgt = 0;
3136
3137                 if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3138                     (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3139                     (flags & bctl->data.target)) {
3140                         tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3141                 } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3142                            (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3143                            (flags & bctl->sys.target)) {
3144                         tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3145                 } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3146                            (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3147                            (flags & bctl->meta.target)) {
3148                         tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3149                 }
3150
3151                 if (tgt) {
3152                         spin_unlock(&root->fs_info->balance_lock);
3153                         flags = tgt;
3154                         goto out;
3155                 }
3156         }
3157         spin_unlock(&root->fs_info->balance_lock);
3158
3159         if (num_devices == 1)
3160                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3161         if (num_devices < 4)
3162                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3163
3164         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3165             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3166                       BTRFS_BLOCK_GROUP_RAID10))) {
3167                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3168         }
3169
3170         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3171             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3172                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3173         }
3174
3175         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3176             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3177              (flags & BTRFS_BLOCK_GROUP_RAID10) |
3178              (flags & BTRFS_BLOCK_GROUP_DUP))) {
3179                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3180         }
3181
3182 out:
3183         /* extended -> chunk profile */
3184         flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3185         return flags;
3186 }
3187
3188 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3189 {
3190         if (flags & BTRFS_BLOCK_GROUP_DATA)
3191                 flags |= root->fs_info->avail_data_alloc_bits;
3192         else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3193                 flags |= root->fs_info->avail_system_alloc_bits;
3194         else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3195                 flags |= root->fs_info->avail_metadata_alloc_bits;
3196
3197         return btrfs_reduce_alloc_profile(root, flags);
3198 }
3199
3200 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3201 {
3202         u64 flags;
3203
3204         if (data)
3205                 flags = BTRFS_BLOCK_GROUP_DATA;
3206         else if (root == root->fs_info->chunk_root)
3207                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3208         else
3209                 flags = BTRFS_BLOCK_GROUP_METADATA;
3210
3211         return get_alloc_profile(root, flags);
3212 }
3213
3214 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3215 {
3216         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3217                                                        BTRFS_BLOCK_GROUP_DATA);
3218 }
3219
3220 /*
3221  * This will check the space that the inode allocates from to make sure we have
3222  * enough space for bytes.
3223  */
3224 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3225 {
3226         struct btrfs_space_info *data_sinfo;
3227         struct btrfs_root *root = BTRFS_I(inode)->root;
3228         u64 used;
3229         int ret = 0, committed = 0, alloc_chunk = 1;
3230
3231         /* make sure bytes are sectorsize aligned */
3232         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3233
3234         if (root == root->fs_info->tree_root ||
3235             BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3236                 alloc_chunk = 0;
3237                 committed = 1;
3238         }
3239
3240         data_sinfo = BTRFS_I(inode)->space_info;
3241         if (!data_sinfo)
3242                 goto alloc;
3243
3244 again:
3245         /* make sure we have enough space to handle the data first */
3246         spin_lock(&data_sinfo->lock);
3247         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3248                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3249                 data_sinfo->bytes_may_use;
3250
3251         if (used + bytes > data_sinfo->total_bytes) {
3252                 struct btrfs_trans_handle *trans;
3253
3254                 /*
3255                  * if we don't have enough free bytes in this space then we need
3256                  * to alloc a new chunk.
3257                  */
3258                 if (!data_sinfo->full && alloc_chunk) {
3259                         u64 alloc_target;
3260
3261                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3262                         spin_unlock(&data_sinfo->lock);
3263 alloc:
3264                         alloc_target = btrfs_get_alloc_profile(root, 1);
3265                         trans = btrfs_join_transaction(root);
3266                         if (IS_ERR(trans))
3267                                 return PTR_ERR(trans);
3268
3269                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3270                                              bytes + 2 * 1024 * 1024,
3271                                              alloc_target,
3272                                              CHUNK_ALLOC_NO_FORCE);
3273                         btrfs_end_transaction(trans, root);
3274                         if (ret < 0) {
3275                                 if (ret != -ENOSPC)
3276                                         return ret;
3277                                 else
3278                                         goto commit_trans;
3279                         }
3280
3281                         if (!data_sinfo) {
3282                                 btrfs_set_inode_space_info(root, inode);
3283                                 data_sinfo = BTRFS_I(inode)->space_info;
3284                         }
3285                         goto again;
3286                 }
3287
3288                 /*
3289                  * If we have less pinned bytes than we want to allocate then
3290                  * don't bother committing the transaction, it won't help us.
3291                  */
3292                 if (data_sinfo->bytes_pinned < bytes)
3293                         committed = 1;
3294                 spin_unlock(&data_sinfo->lock);
3295
3296                 /* commit the current transaction and try again */
3297 commit_trans:
3298                 if (!committed &&
3299                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
3300                         committed = 1;
3301                         trans = btrfs_join_transaction(root);
3302                         if (IS_ERR(trans))
3303                                 return PTR_ERR(trans);
3304                         ret = btrfs_commit_transaction(trans, root);
3305                         if (ret)
3306                                 return ret;
3307                         goto again;
3308                 }
3309
3310                 return -ENOSPC;
3311         }
3312         data_sinfo->bytes_may_use += bytes;
3313         trace_btrfs_space_reservation(root->fs_info, "space_info",
3314                                       (u64)data_sinfo, bytes, 1);
3315         spin_unlock(&data_sinfo->lock);
3316
3317         return 0;
3318 }
3319
3320 /*
3321  * Called if we need to clear a data reservation for this inode.
3322  */
3323 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3324 {
3325         struct btrfs_root *root = BTRFS_I(inode)->root;
3326         struct btrfs_space_info *data_sinfo;
3327
3328         /* make sure bytes are sectorsize aligned */
3329         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3330
3331         data_sinfo = BTRFS_I(inode)->space_info;
3332         spin_lock(&data_sinfo->lock);
3333         data_sinfo->bytes_may_use -= bytes;
3334         trace_btrfs_space_reservation(root->fs_info, "space_info",
3335                                       (u64)data_sinfo, bytes, 0);
3336         spin_unlock(&data_sinfo->lock);
3337 }
3338
3339 static void force_metadata_allocation(struct btrfs_fs_info *info)
3340 {
3341         struct list_head *head = &info->space_info;
3342         struct btrfs_space_info *found;
3343
3344         rcu_read_lock();
3345         list_for_each_entry_rcu(found, head, list) {
3346                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3347                         found->force_alloc = CHUNK_ALLOC_FORCE;
3348         }
3349         rcu_read_unlock();
3350 }
3351
3352 static int should_alloc_chunk(struct btrfs_root *root,
3353                               struct btrfs_space_info *sinfo, u64 alloc_bytes,
3354                               int force)
3355 {
3356         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3357         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3358         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3359         u64 thresh;
3360
3361         if (force == CHUNK_ALLOC_FORCE)
3362                 return 1;
3363
3364         /*
3365          * We need to take into account the global rsv because for all intents
3366          * and purposes it's used space.  Don't worry about locking the
3367          * global_rsv, it doesn't change except when the transaction commits.
3368          */
3369         num_allocated += global_rsv->size;
3370
3371         /*
3372          * in limited mode, we want to have some free space up to
3373          * about 1% of the FS size.
3374          */
3375         if (force == CHUNK_ALLOC_LIMITED) {
3376                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3377                 thresh = max_t(u64, 64 * 1024 * 1024,
3378                                div_factor_fine(thresh, 1));
3379
3380                 if (num_bytes - num_allocated < thresh)
3381                         return 1;
3382         }
3383         thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3384
3385         /* 256MB or 2% of the FS */
3386         thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
3387
3388         if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
3389                 return 0;
3390         return 1;
3391 }
3392
3393 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3394                           struct btrfs_root *extent_root, u64 alloc_bytes,
3395                           u64 flags, int force)
3396 {
3397         struct btrfs_space_info *space_info;
3398         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3399         int wait_for_alloc = 0;
3400         int ret = 0;
3401
3402         BUG_ON(!profile_is_valid(flags, 0));
3403
3404         space_info = __find_space_info(extent_root->fs_info, flags);
3405         if (!space_info) {
3406                 ret = update_space_info(extent_root->fs_info, flags,
3407                                         0, 0, &space_info);
3408                 BUG_ON(ret);
3409         }
3410         BUG_ON(!space_info);
3411
3412 again:
3413         spin_lock(&space_info->lock);
3414         if (space_info->force_alloc)
3415                 force = space_info->force_alloc;
3416         if (space_info->full) {
3417                 spin_unlock(&space_info->lock);
3418                 return 0;
3419         }
3420
3421         if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3422                 spin_unlock(&space_info->lock);
3423                 return 0;
3424         } else if (space_info->chunk_alloc) {
3425                 wait_for_alloc = 1;
3426         } else {
3427                 space_info->chunk_alloc = 1;
3428         }
3429
3430         spin_unlock(&space_info->lock);
3431
3432         mutex_lock(&fs_info->chunk_mutex);
3433
3434         /*
3435          * The chunk_mutex is held throughout the entirety of a chunk
3436          * allocation, so once we've acquired the chunk_mutex we know that the
3437          * other guy is done and we need to recheck and see if we should
3438          * allocate.
3439          */
3440         if (wait_for_alloc) {
3441                 mutex_unlock(&fs_info->chunk_mutex);
3442                 wait_for_alloc = 0;
3443                 goto again;
3444         }
3445
3446         /*
3447          * If we have mixed data/metadata chunks we want to make sure we keep
3448          * allocating mixed chunks instead of individual chunks.
3449          */
3450         if (btrfs_mixed_space_info(space_info))
3451                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3452
3453         /*
3454          * if we're doing a data chunk, go ahead and make sure that
3455          * we keep a reasonable number of metadata chunks allocated in the
3456          * FS as well.
3457          */
3458         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3459                 fs_info->data_chunk_allocations++;
3460                 if (!(fs_info->data_chunk_allocations %
3461                       fs_info->metadata_ratio))
3462                         force_metadata_allocation(fs_info);
3463         }
3464
3465         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3466         if (ret < 0 && ret != -ENOSPC)
3467                 goto out;
3468
3469         spin_lock(&space_info->lock);
3470         if (ret)
3471                 space_info->full = 1;
3472         else
3473                 ret = 1;
3474
3475         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3476         space_info->chunk_alloc = 0;
3477         spin_unlock(&space_info->lock);
3478 out:
3479         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3480         return ret;
3481 }
3482
3483 /*
3484  * shrink metadata reservation for delalloc
3485  */
3486 static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
3487                            bool wait_ordered)
3488 {
3489         struct btrfs_block_rsv *block_rsv;
3490         struct btrfs_space_info *space_info;
3491         struct btrfs_trans_handle *trans;
3492         u64 reserved;
3493         u64 max_reclaim;
3494         u64 reclaimed = 0;
3495         long time_left;
3496         unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3497         int loops = 0;
3498         unsigned long progress;
3499
3500         trans = (struct btrfs_trans_handle *)current->journal_info;
3501         block_rsv = &root->fs_info->delalloc_block_rsv;
3502         space_info = block_rsv->space_info;
3503
3504         smp_mb();
3505         reserved = space_info->bytes_may_use;
3506         progress = space_info->reservation_progress;
3507
3508         if (reserved == 0)
3509                 return 0;
3510
3511         smp_mb();
3512         if (root->fs_info->delalloc_bytes == 0) {
3513                 if (trans)
3514                         return 0;
3515                 btrfs_wait_ordered_extents(root, 0, 0);
3516                 return 0;
3517         }
3518
3519         max_reclaim = min(reserved, to_reclaim);
3520         nr_pages = max_t(unsigned long, nr_pages,
3521                          max_reclaim >> PAGE_CACHE_SHIFT);
3522         while (loops < 1024) {
3523                 /* have the flusher threads jump in and do some IO */
3524                 smp_mb();
3525                 nr_pages = min_t(unsigned long, nr_pages,
3526                        root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3527                 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3528                                                 WB_REASON_FS_FREE_SPACE);
3529
3530                 spin_lock(&space_info->lock);
3531                 if (reserved > space_info->bytes_may_use)
3532                         reclaimed += reserved - space_info->bytes_may_use;
3533                 reserved = space_info->bytes_may_use;
3534                 spin_unlock(&space_info->lock);
3535
3536                 loops++;
3537
3538                 if (reserved == 0 || reclaimed >= max_reclaim)
3539                         break;
3540
3541                 if (trans && trans->transaction->blocked)
3542                         return -EAGAIN;
3543
3544                 if (wait_ordered && !trans) {
3545                         btrfs_wait_ordered_extents(root, 0, 0);
3546                 } else {
3547                         time_left = schedule_timeout_interruptible(1);
3548
3549                         /* We were interrupted, exit */
3550                         if (time_left)
3551                                 break;
3552                 }
3553
3554                 /* we've kicked the IO a few times, if anything has been freed,
3555                  * exit.  There is no sense in looping here for a long time
3556                  * when we really need to commit the transaction, or there are
3557                  * just too many writers without enough free space
3558                  */
3559
3560                 if (loops > 3) {
3561                         smp_mb();
3562                         if (progress != space_info->reservation_progress)
3563                                 break;
3564                 }
3565
3566         }
3567
3568         return reclaimed >= to_reclaim;
3569 }
3570
3571 /**
3572  * maybe_commit_transaction - possibly commit the transaction if its ok to
3573  * @root - the root we're allocating for
3574  * @bytes - the number of bytes we want to reserve
3575  * @force - force the commit
3576  *
3577  * This will check to make sure that committing the transaction will actually
3578  * get us somewhere and then commit the transaction if it does.  Otherwise it
3579  * will return -ENOSPC.
3580  */
3581 static int may_commit_transaction(struct btrfs_root *root,
3582                                   struct btrfs_space_info *space_info,
3583                                   u64 bytes, int force)
3584 {
3585         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3586         struct btrfs_trans_handle *trans;
3587
3588         trans = (struct btrfs_trans_handle *)current->journal_info;
3589         if (trans)
3590                 return -EAGAIN;
3591
3592         if (force)
3593                 goto commit;
3594
3595         /* See if there is enough pinned space to make this reservation */
3596         spin_lock(&space_info->lock);
3597         if (space_info->bytes_pinned >= bytes) {
3598                 spin_unlock(&space_info->lock);
3599                 goto commit;
3600         }
3601         spin_unlock(&space_info->lock);
3602
3603         /*
3604          * See if there is some space in the delayed insertion reservation for
3605          * this reservation.
3606          */
3607         if (space_info != delayed_rsv->space_info)
3608                 return -ENOSPC;
3609
3610         spin_lock(&delayed_rsv->lock);
3611         if (delayed_rsv->size < bytes) {
3612                 spin_unlock(&delayed_rsv->lock);
3613                 return -ENOSPC;
3614         }
3615         spin_unlock(&delayed_rsv->lock);
3616
3617 commit:
3618         trans = btrfs_join_transaction(root);
3619         if (IS_ERR(trans))
3620                 return -ENOSPC;
3621
3622         return btrfs_commit_transaction(trans, root);
3623 }
3624
3625 /**
3626  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3627  * @root - the root we're allocating for
3628  * @block_rsv - the block_rsv we're allocating for
3629  * @orig_bytes - the number of bytes we want
3630  * @flush - wether or not we can flush to make our reservation
3631  *
3632  * This will reserve orgi_bytes number of bytes from the space info associated
3633  * with the block_rsv.  If there is not enough space it will make an attempt to
3634  * flush out space to make room.  It will do this by flushing delalloc if
3635  * possible or committing the transaction.  If flush is 0 then no attempts to
3636  * regain reservations will be made and this will fail if there is not enough
3637  * space already.
3638  */
3639 static int reserve_metadata_bytes(struct btrfs_root *root,
3640                                   struct btrfs_block_rsv *block_rsv,
3641                                   u64 orig_bytes, int flush)
3642 {
3643         struct btrfs_space_info *space_info = block_rsv->space_info;
3644         u64 used;
3645         u64 num_bytes = orig_bytes;
3646         int retries = 0;
3647         int ret = 0;
3648         bool committed = false;
3649         bool flushing = false;
3650         bool wait_ordered = false;
3651
3652 again:
3653         ret = 0;
3654         spin_lock(&space_info->lock);
3655         /*
3656          * We only want to wait if somebody other than us is flushing and we are
3657          * actually alloed to flush.
3658          */
3659         while (flush && !flushing && space_info->flush) {
3660                 spin_unlock(&space_info->lock);
3661                 /*
3662                  * If we have a trans handle we can't wait because the flusher
3663                  * may have to commit the transaction, which would mean we would
3664                  * deadlock since we are waiting for the flusher to finish, but
3665                  * hold the current transaction open.
3666                  */
3667                 if (current->journal_info)
3668                         return -EAGAIN;
3669                 ret = wait_event_interruptible(space_info->wait,
3670                                                !space_info->flush);
3671                 /* Must have been interrupted, return */
3672                 if (ret)
3673                         return -EINTR;
3674
3675                 spin_lock(&space_info->lock);
3676         }
3677
3678         ret = -ENOSPC;
3679         used = space_info->bytes_used + space_info->bytes_reserved +
3680                 space_info->bytes_pinned + space_info->bytes_readonly +
3681                 space_info->bytes_may_use;
3682
3683         /*
3684          * The idea here is that we've not already over-reserved the block group
3685          * then we can go ahead and save our reservation first and then start
3686          * flushing if we need to.  Otherwise if we've already overcommitted
3687          * lets start flushing stuff first and then come back and try to make
3688          * our reservation.
3689          */
3690         if (used <= space_info->total_bytes) {
3691                 if (used + orig_bytes <= space_info->total_bytes) {
3692                         space_info->bytes_may_use += orig_bytes;
3693                         trace_btrfs_space_reservation(root->fs_info,
3694                                                       "space_info",
3695                                                       (u64)space_info,
3696                                                       orig_bytes, 1);
3697                         ret = 0;
3698                 } else {
3699                         /*
3700                          * Ok set num_bytes to orig_bytes since we aren't
3701                          * overocmmitted, this way we only try and reclaim what
3702                          * we need.
3703                          */
3704                         num_bytes = orig_bytes;
3705                 }
3706         } else {
3707                 /*
3708                  * Ok we're over committed, set num_bytes to the overcommitted
3709                  * amount plus the amount of bytes that we need for this
3710                  * reservation.
3711                  */
3712                 wait_ordered = true;
3713                 num_bytes = used - space_info->total_bytes +
3714                         (orig_bytes * (retries + 1));
3715         }
3716
3717         if (ret) {
3718                 u64 profile = btrfs_get_alloc_profile(root, 0);
3719                 u64 avail;
3720
3721                 /*
3722                  * If we have a lot of space that's pinned, don't bother doing
3723                  * the overcommit dance yet and just commit the transaction.
3724                  */
3725                 avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3726                 do_div(avail, 10);
3727                 if (space_info->bytes_pinned >= avail && flush && !committed) {
3728                         space_info->flush = 1;
3729                         flushing = true;
3730                         spin_unlock(&space_info->lock);
3731                         ret = may_commit_transaction(root, space_info,
3732                                                      orig_bytes, 1);
3733                         if (ret)
3734                                 goto out;
3735                         committed = true;
3736                         goto again;
3737                 }
3738
3739                 spin_lock(&root->fs_info->free_chunk_lock);
3740                 avail = root->fs_info->free_chunk_space;
3741
3742                 /*
3743                  * If we have dup, raid1 or raid10 then only half of the free
3744                  * space is actually useable.
3745                  */
3746                 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3747                                BTRFS_BLOCK_GROUP_RAID1 |
3748                                BTRFS_BLOCK_GROUP_RAID10))
3749                         avail >>= 1;
3750
3751                 /*
3752                  * If we aren't flushing don't let us overcommit too much, say
3753                  * 1/8th of the space.  If we can flush, let it overcommit up to
3754                  * 1/2 of the space.
3755                  */
3756                 if (flush)
3757                         avail >>= 3;
3758                 else
3759                         avail >>= 1;
3760                  spin_unlock(&root->fs_info->free_chunk_lock);
3761
3762                 if (used + num_bytes < space_info->total_bytes + avail) {
3763                         space_info->bytes_may_use += orig_bytes;
3764                         trace_btrfs_space_reservation(root->fs_info,
3765                                                       "space_info",
3766                                                       (u64)space_info,
3767                                                       orig_bytes, 1);
3768                         ret = 0;
3769                 } else {
3770                         wait_ordered = true;
3771                 }
3772         }
3773
3774         /*
3775          * Couldn't make our reservation, save our place so while we're trying
3776          * to reclaim space we can actually use it instead of somebody else
3777          * stealing it from us.
3778          */
3779         if (ret && flush) {
3780                 flushing = true;
3781                 space_info->flush = 1;
3782         }
3783
3784         spin_unlock(&space_info->lock);
3785
3786         if (!ret || !flush)
3787                 goto out;
3788
3789         /*
3790          * We do synchronous shrinking since we don't actually unreserve
3791          * metadata until after the IO is completed.
3792          */
3793         ret = shrink_delalloc(root, num_bytes, wait_ordered);
3794         if (ret < 0)
3795                 goto out;
3796
3797         ret = 0;
3798
3799         /*
3800          * So if we were overcommitted it's possible that somebody else flushed
3801          * out enough space and we simply didn't have enough space to reclaim,
3802          * so go back around and try again.
3803          */
3804         if (retries < 2) {
3805                 wait_ordered = true;
3806                 retries++;
3807                 goto again;
3808         }
3809
3810         ret = -ENOSPC;
3811         if (committed)
3812                 goto out;
3813
3814         ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3815         if (!ret) {
3816                 committed = true;
3817                 goto again;
3818         }
3819
3820 out:
3821         if (flushing) {
3822                 spin_lock(&space_info->lock);
3823                 space_info->flush = 0;
3824                 wake_up_all(&space_info->wait);
3825                 spin_unlock(&space_info->lock);
3826         }
3827         return ret;
3828 }
3829
3830 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3831                                              struct btrfs_root *root)
3832 {
3833         struct btrfs_block_rsv *block_rsv = NULL;
3834
3835         if (root->ref_cows || root == root->fs_info->csum_root)
3836                 block_rsv = trans->block_rsv;
3837
3838         if (!block_rsv)
3839                 block_rsv = root->block_rsv;
3840
3841         if (!block_rsv)
3842                 block_rsv = &root->fs_info->empty_block_rsv;
3843
3844         return block_rsv;
3845 }
3846
3847 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3848                                u64 num_bytes)
3849 {
3850         int ret = -ENOSPC;
3851         spin_lock(&block_rsv->lock);
3852         if (block_rsv->reserved >= num_bytes) {
3853                 block_rsv->reserved -= num_bytes;
3854                 if (block_rsv->reserved < block_rsv->size)
3855                         block_rsv->full = 0;
3856                 ret = 0;
3857         }
3858         spin_unlock(&block_rsv->lock);
3859         return ret;
3860 }
3861
3862 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3863                                 u64 num_bytes, int update_size)
3864 {
3865         spin_lock(&block_rsv->lock);
3866         block_rsv->reserved += num_bytes;
3867         if (update_size)
3868                 block_rsv->size += num_bytes;
3869         else if (block_rsv->reserved >= block_rsv->size)
3870                 block_rsv->full = 1;
3871         spin_unlock(&block_rsv->lock);
3872 }
3873
3874 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3875                                     struct btrfs_block_rsv *block_rsv,
3876                                     struct btrfs_block_rsv *dest, u64 num_bytes)
3877 {
3878         struct btrfs_space_info *space_info = block_rsv->space_info;
3879
3880         spin_lock(&block_rsv->lock);
3881         if (num_bytes == (u64)-1)
3882                 num_bytes = block_rsv->size;
3883         block_rsv->size -= num_bytes;
3884         if (block_rsv->reserved >= block_rsv->size) {
3885                 num_bytes = block_rsv->reserved - block_rsv->size;
3886                 block_rsv->reserved = block_rsv->size;
3887                 block_rsv->full = 1;
3888         } else {
3889                 num_bytes = 0;
3890         }
3891         spin_unlock(&block_rsv->lock);
3892
3893         if (num_bytes > 0) {
3894                 if (dest) {
3895                         spin_lock(&dest->lock);
3896                         if (!dest->full) {
3897                                 u64 bytes_to_add;
3898
3899                                 bytes_to_add = dest->size - dest->reserved;
3900                                 bytes_to_add = min(num_bytes, bytes_to_add);
3901                                 dest->reserved += bytes_to_add;
3902                                 if (dest->reserved >= dest->size)
3903                                         dest->full = 1;
3904                                 num_bytes -= bytes_to_add;
3905                         }
3906                         spin_unlock(&dest->lock);
3907                 }
3908                 if (num_bytes) {
3909                         spin_lock(&space_info->lock);
3910                         space_info->bytes_may_use -= num_bytes;
3911                         trace_btrfs_space_reservation(fs_info, "space_info",
3912                                                       (u64)space_info,
3913                                                       num_bytes, 0);
3914                         space_info->reservation_progress++;
3915                         spin_unlock(&space_info->lock);
3916                 }
3917         }
3918 }
3919
3920 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3921                                    struct btrfs_block_rsv *dst, u64 num_bytes)
3922 {
3923         int ret;
3924
3925         ret = block_rsv_use_bytes(src, num_bytes);
3926         if (ret)
3927                 return ret;
3928
3929         block_rsv_add_bytes(dst, num_bytes, 1);
3930         return 0;
3931 }
3932
3933 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3934 {
3935         memset(rsv, 0, sizeof(*rsv));
3936         spin_lock_init(&rsv->lock);
3937 }
3938
3939 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3940 {
3941         struct btrfs_block_rsv *block_rsv;
3942         struct btrfs_fs_info *fs_info = root->fs_info;
3943
3944         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3945         if (!block_rsv)
3946                 return NULL;
3947
3948         btrfs_init_block_rsv(block_rsv);
3949         block_rsv->space_info = __find_space_info(fs_info,
3950                                                   BTRFS_BLOCK_GROUP_METADATA);
3951         return block_rsv;
3952 }
3953
3954 void btrfs_free_block_rsv(struct btrfs_root *root,
3955                           struct btrfs_block_rsv *rsv)
3956 {
3957         btrfs_block_rsv_release(root, rsv, (u64)-1);
3958         kfree(rsv);
3959 }
3960
3961 static inline int __block_rsv_add(struct btrfs_root *root,
3962                                   struct btrfs_block_rsv *block_rsv,
3963                                   u64 num_bytes, int flush)
3964 {
3965         int ret;
3966
3967         if (num_bytes == 0)
3968                 return 0;
3969
3970         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3971         if (!ret) {
3972                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3973                 return 0;
3974         }
3975
3976         return ret;
3977 }
3978
3979 int btrfs_block_rsv_add(struct btrfs_root *root,
3980                         struct btrfs_block_rsv *block_rsv,
3981                         u64 num_bytes)
3982 {
3983         return __block_rsv_add(root, block_rsv, num_bytes, 1);
3984 }
3985
3986 int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3987                                 struct btrfs_block_rsv *block_rsv,
3988                                 u64 num_bytes)
3989 {
3990         return __block_rsv_add(root, block_rsv, num_bytes, 0);
3991 }
3992
3993 int btrfs_block_rsv_check(struct btrfs_root *root,
3994                           struct btrfs_block_rsv *block_rsv, int min_factor)
3995 {
3996         u64 num_bytes = 0;
3997         int ret = -ENOSPC;
3998
3999         if (!block_rsv)
4000                 return 0;
4001
4002         spin_lock(&block_rsv->lock);
4003         num_bytes = div_factor(block_rsv->size, min_factor);
4004         if (block_rsv->reserved >= num_bytes)
4005                 ret = 0;
4006         spin_unlock(&block_rsv->lock);
4007
4008         return ret;
4009 }
4010
4011 static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
4012                                            struct btrfs_block_rsv *block_rsv,
4013                                            u64 min_reserved, int flush)
4014 {
4015         u64 num_bytes = 0;
4016         int ret = -ENOSPC;
4017
4018         if (!block_rsv)
4019                 return 0;
4020
4021         spin_lock(&block_rsv->lock);
4022         num_bytes = min_reserved;
4023         if (block_rsv->reserved >= num_bytes)
4024                 ret = 0;
4025         else
4026                 num_bytes -= block_rsv->reserved;
4027         spin_unlock(&block_rsv->lock);
4028
4029         if (!ret)
4030                 return 0;
4031
4032         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4033         if (!ret) {
4034                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4035                 return 0;
4036         }
4037
4038         return ret;
4039 }
4040
4041 int btrfs_block_rsv_refill(struct btrfs_root *root,
4042                            struct btrfs_block_rsv *block_rsv,
4043                            u64 min_reserved)
4044 {
4045         return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4046 }
4047
4048 int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4049                                    struct btrfs_block_rsv *block_rsv,
4050                                    u64 min_reserved)
4051 {
4052         return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4053 }
4054
4055 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4056                             struct btrfs_block_rsv *dst_rsv,
4057                             u64 num_bytes)
4058 {
4059         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4060 }
4061
4062 void btrfs_block_rsv_release(struct btrfs_root *root,
4063                              struct btrfs_block_rsv *block_rsv,
4064                              u64 num_bytes)
4065 {
4066         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4067         if (global_rsv->full || global_rsv == block_rsv ||
4068             block_rsv->space_info != global_rsv->space_info)
4069                 global_rsv = NULL;
4070         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
4071                                 num_bytes);
4072 }
4073
4074 /*
4075  * helper to calculate size of global block reservation.
4076  * the desired value is sum of space used by extent tree,
4077  * checksum tree and root tree
4078  */
4079 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4080 {
4081         struct btrfs_space_info *sinfo;
4082         u64 num_bytes;
4083         u64 meta_used;
4084         u64 data_used;
4085         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4086
4087         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4088         spin_lock(&sinfo->lock);
4089         data_used = sinfo->bytes_used;
4090         spin_unlock(&sinfo->lock);
4091
4092         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4093         spin_lock(&sinfo->lock);
4094         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4095                 data_used = 0;
4096         meta_used = sinfo->bytes_used;
4097         spin_unlock(&sinfo->lock);
4098
4099         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4100                     csum_size * 2;
4101         num_bytes += div64_u64(data_used + meta_used, 50);
4102
4103         if (num_bytes * 3 > meta_used)
4104                 num_bytes = div64_u64(meta_used, 3);
4105
4106         return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4107 }
4108
4109 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4110 {
4111         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4112         struct btrfs_space_info *sinfo = block_rsv->space_info;
4113         u64 num_bytes;
4114
4115         num_bytes = calc_global_metadata_size(fs_info);
4116
4117         spin_lock(&block_rsv->lock);
4118         spin_lock(&sinfo->lock);
4119
4120         block_rsv->size = num_bytes;
4121
4122         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4123                     sinfo->bytes_reserved + sinfo->bytes_readonly +
4124                     sinfo->bytes_may_use;
4125
4126         if (sinfo->total_bytes > num_bytes) {
4127                 num_bytes = sinfo->total_bytes - num_bytes;
4128                 block_rsv->reserved += num_bytes;
4129                 sinfo->bytes_may_use += num_bytes;
4130                 trace_btrfs_space_reservation(fs_info, "space_info",
4131                                               (u64)sinfo, num_bytes, 1);
4132         }
4133
4134         if (block_rsv->reserved >= block_rsv->size) {
4135                 num_bytes = block_rsv->reserved - block_rsv->size;
4136                 sinfo->bytes_may_use -= num_bytes;
4137                 trace_btrfs_space_reservation(fs_info, "space_info",
4138                                               (u64)sinfo, num_bytes, 0);
4139                 sinfo->reservation_progress++;
4140                 block_rsv->reserved = block_rsv->size;
4141                 block_rsv->full = 1;
4142         }
4143
4144         spin_unlock(&sinfo->lock);
4145         spin_unlock(&block_rsv->lock);
4146 }
4147
4148 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4149 {
4150         struct btrfs_space_info *space_info;
4151
4152         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4153         fs_info->chunk_block_rsv.space_info = space_info;
4154
4155         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4156         fs_info->global_block_rsv.space_info = space_info;
4157         fs_info->delalloc_block_rsv.space_info = space_info;
4158         fs_info->trans_block_rsv.space_info = space_info;
4159         fs_info->empty_block_rsv.space_info = space_info;
4160         fs_info->delayed_block_rsv.space_info = space_info;
4161
4162         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4163         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4164         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4165         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4166         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4167
4168         update_global_block_rsv(fs_info);
4169 }
4170
4171 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4172 {
4173         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
4174                                 (u64)-1);
4175         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4176         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4177         WARN_ON(fs_info->trans_block_rsv.size > 0);
4178         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4179         WARN_ON(fs_info->chunk_block_rsv.size > 0);
4180         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4181         WARN_ON(fs_info->delayed_block_rsv.size > 0);
4182         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4183 }
4184
4185 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4186                                   struct btrfs_root *root)
4187 {
4188         if (!trans->bytes_reserved)
4189                 return;
4190
4191         trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans,
4192                                       trans->bytes_reserved, 0);
4193         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4194         trans->bytes_reserved = 0;
4195 }
4196
4197 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4198                                   struct inode *inode)
4199 {
4200         struct btrfs_root *root = BTRFS_I(inode)->root;
4201         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4202         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4203
4204         /*
4205          * We need to hold space in order to delete our orphan item once we've
4206          * added it, so this takes the reservation so we can release it later
4207          * when we are truly done with the orphan item.
4208          */
4209         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4210         trace_btrfs_space_reservation(root->fs_info, "orphan",
4211                                       btrfs_ino(inode), num_bytes, 1);
4212         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4213 }
4214
4215 void btrfs_orphan_release_metadata(struct inode *inode)
4216 {
4217         struct btrfs_root *root = BTRFS_I(inode)->root;
4218         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4219         trace_btrfs_space_reservation(root->fs_info, "orphan",
4220                                       btrfs_ino(inode), num_bytes, 0);
4221         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4222 }
4223
4224 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4225                                 struct btrfs_pending_snapshot *pending)
4226 {
4227         struct btrfs_root *root = pending->root;
4228         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4229         struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4230         /*
4231          * two for root back/forward refs, two for directory entries
4232          * and one for root of the snapshot.
4233          */
4234         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4235         dst_rsv->space_info = src_rsv->space_info;
4236         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4237 }
4238
4239 /**
4240  * drop_outstanding_extent - drop an outstanding extent
4241  * @inode: the inode we're dropping the extent for
4242  *
4243  * This is called when we are freeing up an outstanding extent, either called
4244  * after an error or after an extent is written.  This will return the number of
4245  * reserved extents that need to be freed.  This must be called with
4246  * BTRFS_I(inode)->lock held.
4247  */
4248 static unsigned drop_outstanding_extent(struct inode *inode)
4249 {
4250         unsigned drop_inode_space = 0;
4251         unsigned dropped_extents = 0;
4252
4253         BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4254         BTRFS_I(inode)->outstanding_extents--;
4255
4256         if (BTRFS_I(inode)->outstanding_extents == 0 &&
4257             BTRFS_I(inode)->delalloc_meta_reserved) {
4258                 drop_inode_space = 1;
4259                 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4260         }
4261
4262         /*
4263          * If we have more or the same amount of outsanding extents than we have
4264          * reserved then we need to leave the reserved extents count alone.
4265          */
4266         if (BTRFS_I(inode)->outstanding_extents >=
4267             BTRFS_I(inode)->reserved_extents)
4268                 return drop_inode_space;
4269
4270         dropped_extents = BTRFS_I(inode)->reserved_extents -
4271                 BTRFS_I(inode)->outstanding_extents;
4272         BTRFS_I(inode)->reserved_extents -= dropped_extents;
4273         return dropped_extents + drop_inode_space;
4274 }
4275
4276 /**
4277  * calc_csum_metadata_size - return the amount of metada space that must be
4278  *      reserved/free'd for the given bytes.
4279  * @inode: the inode we're manipulating
4280  * @num_bytes: the number of bytes in question
4281  * @reserve: 1 if we are reserving space, 0 if we are freeing space
4282  *
4283  * This adjusts the number of csum_bytes in the inode and then returns the
4284  * correct amount of metadata that must either be reserved or freed.  We
4285  * calculate how many checksums we can fit into one leaf and then divide the
4286  * number of bytes that will need to be checksumed by this value to figure out
4287  * how many checksums will be required.  If we are adding bytes then the number
4288  * may go up and we will return the number of additional bytes that must be
4289  * reserved.  If it is going down we will return the number of bytes that must
4290  * be freed.
4291  *
4292  * This must be called with BTRFS_I(inode)->lock held.
4293  */
4294 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4295                                    int reserve)
4296 {
4297         struct btrfs_root *root = BTRFS_I(inode)->root;
4298         u64 csum_size;
4299         int num_csums_per_leaf;
4300         int num_csums;
4301         int old_csums;
4302
4303         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4304             BTRFS_I(inode)->csum_bytes == 0)
4305                 return 0;
4306
4307         old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4308         if (reserve)
4309                 BTRFS_I(inode)->csum_bytes += num_bytes;
4310         else
4311                 BTRFS_I(inode)->csum_bytes -= num_bytes;
4312         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4313         num_csums_per_leaf = (int)div64_u64(csum_size,
4314                                             sizeof(struct btrfs_csum_item) +
4315                                             sizeof(struct btrfs_disk_key));
4316         num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4317         num_csums = num_csums + num_csums_per_leaf - 1;
4318         num_csums = num_csums / num_csums_per_leaf;
4319
4320         old_csums = old_csums + num_csums_per_leaf - 1;
4321         old_csums = old_csums / num_csums_per_leaf;
4322
4323         /* No change, no need to reserve more */
4324         if (old_csums == num_csums)
4325                 return 0;
4326
4327         if (reserve)
4328                 return btrfs_calc_trans_metadata_size(root,
4329                                                       num_csums - old_csums);
4330
4331         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4332 }
4333
4334 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4335 {
4336         struct btrfs_root *root = BTRFS_I(inode)->root;
4337         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4338         u64 to_reserve = 0;
4339         u64 csum_bytes;
4340         unsigned nr_extents = 0;
4341         int extra_reserve = 0;
4342         int flush = 1;
4343         int ret;
4344
4345         /* Need to be holding the i_mutex here if we aren't free space cache */
4346         if (btrfs_is_free_space_inode(root, inode))
4347                 flush = 0;
4348
4349         if (flush && btrfs_transaction_in_commit(root->fs_info))
4350                 schedule_timeout(1);
4351
4352         mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
4353         num_bytes = ALIGN(num_bytes, root->sectorsize);
4354
4355         spin_lock(&BTRFS_I(inode)->lock);
4356         BTRFS_I(inode)->outstanding_extents++;
4357
4358         if (BTRFS_I(inode)->outstanding_extents >
4359             BTRFS_I(inode)->reserved_extents)
4360                 nr_extents = BTRFS_I(inode)->outstanding_extents -
4361                         BTRFS_I(inode)->reserved_extents;
4362
4363         /*
4364          * Add an item to reserve for updating the inode when we complete the
4365          * delalloc io.
4366          */
4367         if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4368                 nr_extents++;
4369                 extra_reserve = 1;
4370         }
4371
4372         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4373         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4374         csum_bytes = BTRFS_I(inode)->csum_bytes;
4375         spin_unlock(&BTRFS_I(inode)->lock);
4376
4377         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4378         if (ret) {
4379                 u64 to_free = 0;
4380                 unsigned dropped;
4381
4382                 spin_lock(&BTRFS_I(inode)->lock);
4383                 dropped = drop_outstanding_extent(inode);
4384                 /*
4385                  * If the inodes csum_bytes is the same as the original
4386                  * csum_bytes then we know we haven't raced with any free()ers
4387                  * so we can just reduce our inodes csum bytes and carry on.
4388                  * Otherwise we have to do the normal free thing to account for
4389                  * the case that the free side didn't free up its reserve
4390                  * because of this outstanding reservation.
4391                  */
4392                 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4393                         calc_csum_metadata_size(inode, num_bytes, 0);
4394                 else
4395                         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4396                 spin_unlock(&BTRFS_I(inode)->lock);
4397                 if (dropped)
4398                         to_free += btrfs_calc_trans_metadata_size(root, dropped);
4399
4400                 if (to_free) {
4401                         btrfs_block_rsv_release(root, block_rsv, to_free);
4402                         trace_btrfs_space_reservation(root->fs_info,
4403                                                       "delalloc",
4404                                                       btrfs_ino(inode),
4405                                                       to_free, 0);
4406                 }
4407                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4408                 return ret;
4409         }
4410
4411         spin_lock(&BTRFS_I(inode)->lock);
4412         if (extra_reserve) {
4413                 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4414                 nr_extents--;
4415         }
4416         BTRFS_I(inode)->reserved_extents += nr_extents;
4417         spin_unlock(&BTRFS_I(inode)->lock);
4418         mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
4419
4420         if (to_reserve)
4421                 trace_btrfs_space_reservation(root->fs_info,"delalloc",
4422                                               btrfs_ino(inode), to_reserve, 1);
4423         block_rsv_add_bytes(block_rsv, to_reserve, 1);
4424
4425         return 0;
4426 }
4427
4428 /**
4429  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4430  * @inode: the inode to release the reservation for
4431  * @num_bytes: the number of bytes we're releasing
4432  *
4433  * This will release the metadata reservation for an inode.  This can be called
4434  * once we complete IO for a given set of bytes to release their metadata
4435  * reservations.
4436  */
4437 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4438 {
4439         struct btrfs_root *root = BTRFS_I(inode)->root;
4440         u64 to_free = 0;
4441         unsigned dropped;
4442
4443         num_bytes = ALIGN(num_bytes, root->sectorsize);
4444         spin_lock(&BTRFS_I(inode)->lock);
4445         dropped = drop_outstanding_extent(inode);
4446
4447         to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4448         spin_unlock(&BTRFS_I(inode)->lock);
4449         if (dropped > 0)
4450                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4451
4452         trace_btrfs_space_reservation(root->fs_info, "delalloc",
4453                                       btrfs_ino(inode), to_free, 0);
4454         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4455                                 to_free);
4456 }
4457
4458 /**
4459  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4460  * @inode: inode we're writing to
4461  * @num_bytes: the number of bytes we want to allocate
4462  *
4463  * This will do the following things
4464  *
4465  * o reserve space in the data space info for num_bytes
4466  * o reserve space in the metadata space info based on number of outstanding
4467  *   extents and how much csums will be needed
4468  * o add to the inodes ->delalloc_bytes
4469  * o add it to the fs_info's delalloc inodes list.
4470  *
4471  * This will return 0 for success and -ENOSPC if there is no space left.
4472  */
4473 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4474 {
4475         int ret;
4476
4477         ret = btrfs_check_data_free_space(inode, num_bytes);
4478         if (ret)
4479                 return ret;
4480
4481         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4482         if (ret) {
4483                 btrfs_free_reserved_data_space(inode, num_bytes);
4484                 return ret;
4485         }
4486
4487         return 0;
4488 }
4489
4490 /**
4491  * btrfs_delalloc_release_space - release data and metadata space for delalloc
4492  * @inode: inode we're releasing space for
4493  * @num_bytes: the number of bytes we want to free up
4494  *
4495  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
4496  * called in the case that we don't need the metadata AND data reservations
4497  * anymore.  So if there is an error or we insert an inline extent.
4498  *
4499  * This function will release the metadata space that was not used and will
4500  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4501  * list if there are no delalloc bytes left.
4502  */
4503 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4504 {
4505         btrfs_delalloc_release_metadata(inode, num_bytes);
4506         btrfs_free_reserved_data_space(inode, num_bytes);
4507 }
4508
4509 static int update_block_group(struct btrfs_trans_handle *trans,
4510                               struct btrfs_root *root,
4511                               u64 bytenr, u64 num_bytes, int alloc)
4512 {
4513         struct btrfs_block_group_cache *cache = NULL;
4514         struct btrfs_fs_info *info = root->fs_info;
4515         u64 total = num_bytes;
4516         u64 old_val;
4517         u64 byte_in_group;
4518         int factor;
4519
4520         /* block accounting for super block */
4521         spin_lock(&info->delalloc_lock);
4522         old_val = btrfs_super_bytes_used(info->super_copy);
4523         if (alloc)
4524                 old_val += num_bytes;
4525         else
4526                 old_val -= num_bytes;
4527         btrfs_set_super_bytes_used(info->super_copy, old_val);
4528         spin_unlock(&info->delalloc_lock);
4529
4530         while (total) {
4531                 cache = btrfs_lookup_block_group(info, bytenr);
4532                 if (!cache)
4533                         return -1;
4534                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4535                                     BTRFS_BLOCK_GROUP_RAID1 |
4536                                     BTRFS_BLOCK_GROUP_RAID10))
4537                         factor = 2;
4538                 else
4539                         factor = 1;
4540                 /*
4541                  * If this block group has free space cache written out, we
4542                  * need to make sure to load it if we are removing space.  This
4543                  * is because we need the unpinning stage to actually add the
4544                  * space back to the block group, otherwise we will leak space.
4545                  */
4546                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4547                         cache_block_group(cache, trans, NULL, 1);
4548
4549                 byte_in_group = bytenr - cache->key.objectid;
4550                 WARN_ON(byte_in_group > cache->key.offset);
4551
4552                 spin_lock(&cache->space_info->lock);
4553                 spin_lock(&cache->lock);
4554
4555                 if (btrfs_test_opt(root, SPACE_CACHE) &&
4556                     cache->disk_cache_state < BTRFS_DC_CLEAR)
4557                         cache->disk_cache_state = BTRFS_DC_CLEAR;
4558
4559                 cache->dirty = 1;
4560                 old_val = btrfs_block_group_used(&cache->item);
4561                 num_bytes = min(total, cache->key.offset - byte_in_group);
4562                 if (alloc) {
4563                         old_val += num_bytes;
4564                         btrfs_set_block_group_used(&cache->item, old_val);
4565                         cache->reserved -= num_bytes;
4566                         cache->space_info->bytes_reserved -= num_bytes;
4567                         cache->space_info->bytes_used += num_bytes;
4568                         cache->space_info->disk_used += num_bytes * factor;
4569                         spin_unlock(&cache->lock);
4570                         spin_unlock(&cache->space_info->lock);
4571                 } else {
4572                         old_val -= num_bytes;
4573                         btrfs_set_block_group_used(&cache->item, old_val);
4574                         cache->pinned += num_bytes;
4575                         cache->space_info->bytes_pinned += num_bytes;
4576                         cache->space_info->bytes_used -= num_bytes;
4577                         cache->space_info->disk_used -= num_bytes * factor;
4578                         spin_unlock(&cache->lock);
4579                         spin_unlock(&cache->space_info->lock);
4580
4581                         set_extent_dirty(info->pinned_extents,
4582                                          bytenr, bytenr + num_bytes - 1,
4583                                          GFP_NOFS | __GFP_NOFAIL);
4584                 }
4585                 btrfs_put_block_group(cache);
4586                 total -= num_bytes;
4587                 bytenr += num_bytes;
4588         }
4589         return 0;
4590 }
4591
4592 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4593 {
4594         struct btrfs_block_group_cache *cache;
4595         u64 bytenr;
4596
4597         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4598         if (!cache)
4599                 return 0;
4600
4601         bytenr = cache->key.objectid;
4602         btrfs_put_block_group(cache);
4603
4604         return bytenr;
4605 }
4606
4607 static int pin_down_extent(struct btrfs_root *root,
4608                            struct btrfs_block_group_cache *cache,
4609                            u64 bytenr, u64 num_bytes, int reserved)
4610 {
4611         spin_lock(&cache->space_info->lock);
4612         spin_lock(&cache->lock);
4613         cache->pinned += num_bytes;
4614         cache->space_info->bytes_pinned += num_bytes;
4615         if (reserved) {
4616                 cache->reserved -= num_bytes;
4617                 cache->space_info->bytes_reserved -= num_bytes;
4618         }
4619         spin_unlock(&cache->lock);
4620         spin_unlock(&cache->space_info->lock);
4621
4622         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4623                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4624         return 0;
4625 }
4626
4627 /*
4628  * this function must be called within transaction
4629  */
4630 int btrfs_pin_extent(struct btrfs_root *root,
4631                      u64 bytenr, u64 num_bytes, int reserved)
4632 {
4633         struct btrfs_block_group_cache *cache;
4634
4635         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4636         BUG_ON(!cache);
4637
4638         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4639
4640         btrfs_put_block_group(cache);
4641         return 0;
4642 }
4643
4644 /*
4645  * this function must be called within transaction
4646  */
4647 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4648                                     struct btrfs_root *root,
4649                                     u64 bytenr, u64 num_bytes)
4650 {
4651         struct btrfs_block_group_cache *cache;
4652
4653         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4654         BUG_ON(!cache);
4655
4656         /*
4657          * pull in the free space cache (if any) so that our pin
4658          * removes the free space from the cache.  We have load_only set
4659          * to one because the slow code to read in the free extents does check
4660          * the pinned extents.
4661          */
4662         cache_block_group(cache, trans, root, 1);
4663
4664         pin_down_extent(root, cache, bytenr, num_bytes, 0);
4665
4666         /* remove us from the free space cache (if we're there at all) */
4667         btrfs_remove_free_space(cache, bytenr, num_bytes);
4668         btrfs_put_block_group(cache);
4669         return 0;
4670 }
4671
4672 /**
4673  * btrfs_update_reserved_bytes - update the block_group and space info counters
4674  * @cache:      The cache we are manipulating
4675  * @num_bytes:  The number of bytes in question
4676  * @reserve:    One of the reservation enums
4677  *
4678  * This is called by the allocator when it reserves space, or by somebody who is
4679  * freeing space that was never actually used on disk.  For example if you
4680  * reserve some space for a new leaf in transaction A and before transaction A
4681  * commits you free that leaf, you call this with reserve set to 0 in order to
4682  * clear the reservation.
4683  *
4684  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4685  * ENOSPC accounting.  For data we handle the reservation through clearing the
4686  * delalloc bits in the io_tree.  We have to do this since we could end up
4687  * allocating less disk space for the amount of data we have reserved in the
4688  * case of compression.
4689  *
4690  * If this is a reservation and the block group has become read only we cannot
4691  * make the reservation and return -EAGAIN, otherwise this function always
4692  * succeeds.
4693  */
4694 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4695                                        u64 num_bytes, int reserve)
4696 {
4697         struct btrfs_space_info *space_info = cache->space_info;
4698         int ret = 0;
4699         spin_lock(&space_info->lock);
4700         spin_lock(&cache->lock);
4701         if (reserve != RESERVE_FREE) {
4702                 if (cache->ro) {
4703                         ret = -EAGAIN;
4704                 } else {
4705                         cache->reserved += num_bytes;
4706                         space_info->bytes_reserved += num_bytes;
4707                         if (reserve == RESERVE_ALLOC) {
4708                                 trace_btrfs_space_reservation(cache->fs_info,
4709                                                               "space_info",
4710                                                               (u64)space_info,
4711                                                               num_bytes, 0);
4712                                 space_info->bytes_may_use -= num_bytes;
4713                         }
4714                 }
4715         } else {
4716                 if (cache->ro)
4717                         space_info->bytes_readonly += num_bytes;
4718                 cache->reserved -= num_bytes;
4719                 space_info->bytes_reserved -= num_bytes;
4720                 space_info->reservation_progress++;
4721         }
4722         spin_unlock(&cache->lock);
4723         spin_unlock(&space_info->lock);
4724         return ret;
4725 }
4726
4727 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4728                                 struct btrfs_root *root)
4729 {
4730         struct btrfs_fs_info *fs_info = root->fs_info;
4731         struct btrfs_caching_control *next;
4732         struct btrfs_caching_control *caching_ctl;
4733         struct btrfs_block_group_cache *cache;
4734
4735         down_write(&fs_info->extent_commit_sem);
4736
4737         list_for_each_entry_safe(caching_ctl, next,
4738                                  &fs_info->caching_block_groups, list) {
4739                 cache = caching_ctl->block_group;
4740                 if (block_group_cache_done(cache)) {
4741                         cache->last_byte_to_unpin = (u64)-1;
4742                         list_del_init(&caching_ctl->list);
4743                         put_caching_control(caching_ctl);
4744                 } else {
4745                         cache->last_byte_to_unpin = caching_ctl->progress;
4746                 }
4747         }
4748
4749         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4750                 fs_info->pinned_extents = &fs_info->freed_extents[1];
4751         else
4752                 fs_info->pinned_extents = &fs_info->freed_extents[0];
4753
4754         up_write(&fs_info->extent_commit_sem);
4755
4756         update_global_block_rsv(fs_info);
4757         return 0;
4758 }
4759
4760 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4761 {
4762         struct btrfs_fs_info *fs_info = root->fs_info;
4763         struct btrfs_block_group_cache *cache = NULL;
4764         u64 len;
4765
4766         while (start <= end) {
4767                 if (!cache ||
4768                     start >= cache->key.objectid + cache->key.offset) {
4769                         if (cache)
4770                                 btrfs_put_block_group(cache);
4771                         cache = btrfs_lookup_block_group(fs_info, start);
4772                         BUG_ON(!cache);
4773                 }
4774
4775                 len = cache->key.objectid + cache->key.offset - start;
4776                 len = min(len, end + 1 - start);
4777
4778                 if (start < cache->last_byte_to_unpin) {
4779                         len = min(len, cache->last_byte_to_unpin - start);
4780                         btrfs_add_free_space(cache, start, len);
4781                 }
4782
4783                 start += len;
4784
4785                 spin_lock(&cache->space_info->lock);
4786                 spin_lock(&cache->lock);
4787                 cache->pinned -= len;
4788                 cache->space_info->bytes_pinned -= len;
4789                 if (cache->ro)
4790                         cache->space_info->bytes_readonly += len;
4791                 spin_unlock(&cache->lock);
4792                 spin_unlock(&cache->space_info->lock);
4793         }
4794
4795         if (cache)
4796                 btrfs_put_block_group(cache);
4797         return 0;
4798 }
4799
4800 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4801                                struct btrfs_root *root)
4802 {
4803         struct btrfs_fs_info *fs_info = root->fs_info;
4804         struct extent_io_tree *unpin;
4805         u64 start;
4806         u64 end;
4807         int ret;
4808
4809         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4810                 unpin = &fs_info->freed_extents[1];
4811         else
4812                 unpin = &fs_info->freed_extents[0];
4813
4814         while (1) {
4815                 ret = find_first_extent_bit(unpin, 0, &start, &end,
4816                                             EXTENT_DIRTY);
4817                 if (ret)
4818                         break;
4819
4820                 if (btrfs_test_opt(root, DISCARD))
4821                         ret = btrfs_discard_extent(root, start,
4822                                                    end + 1 - start, NULL);
4823
4824                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4825                 unpin_extent_range(root, start, end);
4826                 cond_resched();
4827         }
4828
4829         return 0;
4830 }
4831
4832 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4833                                 struct btrfs_root *root,
4834                                 u64 bytenr, u64 num_bytes, u64 parent,
4835                                 u64 root_objectid, u64 owner_objectid,
4836                                 u64 owner_offset, int refs_to_drop,
4837                                 struct btrfs_delayed_extent_op *extent_op)
4838 {
4839         struct btrfs_key key;
4840         struct btrfs_path *path;
4841         struct btrfs_fs_info *info = root->fs_info;
4842         struct btrfs_root *extent_root = info->extent_root;
4843         struct extent_buffer *leaf;
4844         struct btrfs_extent_item *ei;
4845         struct btrfs_extent_inline_ref *iref;
4846         int ret;
4847         int is_data;
4848         int extent_slot = 0;
4849         int found_extent = 0;
4850         int num_to_del = 1;
4851         u32 item_size;
4852         u64 refs;
4853
4854         path = btrfs_alloc_path();
4855         if (!path)
4856                 return -ENOMEM;
4857
4858         path->reada = 1;
4859         path->leave_spinning = 1;
4860
4861         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4862         BUG_ON(!is_data && refs_to_drop != 1);
4863
4864         ret = lookup_extent_backref(trans, extent_root, path, &iref,
4865                                     bytenr, num_bytes, parent,
4866                                     root_objectid, owner_objectid,
4867                                     owner_offset);
4868         if (ret == 0) {
4869                 extent_slot = path->slots[0];
4870                 while (extent_slot >= 0) {
4871                         btrfs_item_key_to_cpu(path->nodes[0], &key,
4872                                               extent_slot);
4873                         if (key.objectid != bytenr)
4874                                 break;
4875                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4876                             key.offset == num_bytes) {
4877                                 found_extent = 1;
4878                                 break;
4879                         }
4880                         if (path->slots[0] - extent_slot > 5)
4881                                 break;
4882                         extent_slot--;
4883                 }
4884 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4885                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4886                 if (found_extent && item_size < sizeof(*ei))
4887                         found_extent = 0;
4888 #endif
4889                 if (!found_extent) {
4890                         BUG_ON(iref);
4891                         ret = remove_extent_backref(trans, extent_root, path,
4892                                                     NULL, refs_to_drop,
4893                                                     is_data);
4894                         BUG_ON(ret);
4895                         btrfs_release_path(path);
4896                         path->leave_spinning = 1;
4897
4898                         key.objectid = bytenr;
4899                         key.type = BTRFS_EXTENT_ITEM_KEY;
4900                         key.offset = num_bytes;
4901
4902                         ret = btrfs_search_slot(trans, extent_root,
4903                                                 &key, path, -1, 1);
4904                         if (ret) {
4905                                 printk(KERN_ERR "umm, got %d back from search"
4906                                        ", was looking for %llu\n", ret,
4907                                        (unsigned long long)bytenr);
4908                                 if (ret > 0)
4909                                         btrfs_print_leaf(extent_root,
4910                                                          path->nodes[0]);
4911                         }
4912                         BUG_ON(ret);
4913                         extent_slot = path->slots[0];
4914                 }
4915         } else {
4916                 btrfs_print_leaf(extent_root, path->nodes[0]);
4917                 WARN_ON(1);
4918                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4919                        "parent %llu root %llu  owner %llu offset %llu\n",
4920                        (unsigned long long)bytenr,
4921                        (unsigned long long)parent,
4922                        (unsigned long long)root_objectid,
4923                        (unsigned long long)owner_objectid,
4924                        (unsigned long long)owner_offset);
4925         }
4926
4927         leaf = path->nodes[0];
4928         item_size = btrfs_item_size_nr(leaf, extent_slot);
4929 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4930         if (item_size < sizeof(*ei)) {
4931                 BUG_ON(found_extent || extent_slot != path->slots[0]);
4932                 ret = convert_extent_item_v0(trans, extent_root, path,
4933                                              owner_objectid, 0);
4934                 BUG_ON(ret < 0);
4935
4936                 btrfs_release_path(path);
4937                 path->leave_spinning = 1;
4938
4939                 key.objectid = bytenr;
4940                 key.type = BTRFS_EXTENT_ITEM_KEY;
4941                 key.offset = num_bytes;
4942
4943                 ret = btrfs_search_slot(trans, extent_root, &key, path,
4944                                         -1, 1);
4945                 if (ret) {
4946                         printk(KERN_ERR "umm, got %d back from search"
4947                                ", was looking for %llu\n", ret,
4948                                (unsigned long long)bytenr);
4949                         btrfs_print_leaf(extent_root, path->nodes[0]);
4950                 }
4951                 BUG_ON(ret);
4952                 extent_slot = path->slots[0];
4953                 leaf = path->nodes[0];
4954                 item_size = btrfs_item_size_nr(leaf, extent_slot);
4955         }
4956 #endif
4957         BUG_ON(item_size < sizeof(*ei));
4958         ei = btrfs_item_ptr(leaf, extent_slot,
4959                             struct btrfs_extent_item);
4960         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4961                 struct btrfs_tree_block_info *bi;
4962                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4963                 bi = (struct btrfs_tree_block_info *)(ei + 1);
4964                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4965         }
4966
4967         refs = btrfs_extent_refs(leaf, ei);
4968         BUG_ON(refs < refs_to_drop);
4969         refs -= refs_to_drop;
4970
4971         if (refs > 0) {
4972                 if (extent_op)
4973                         __run_delayed_extent_op(extent_op, leaf, ei);
4974                 /*
4975                  * In the case of inline back ref, reference count will
4976                  * be updated by remove_extent_backref
4977                  */
4978                 if (iref) {
4979                         BUG_ON(!found_extent);
4980                 } else {
4981                         btrfs_set_extent_refs(leaf, ei, refs);
4982                         btrfs_mark_buffer_dirty(leaf);
4983                 }
4984                 if (found_extent) {
4985                         ret = remove_extent_backref(trans, extent_root, path,
4986                                                     iref, refs_to_drop,
4987                                                     is_data);
4988                         BUG_ON(ret);
4989                 }
4990         } else {
4991                 if (found_extent) {
4992                         BUG_ON(is_data && refs_to_drop !=
4993                                extent_data_ref_count(root, path, iref));
4994                         if (iref) {
4995                                 BUG_ON(path->slots[0] != extent_slot);
4996                         } else {
4997                                 BUG_ON(path->slots[0] != extent_slot + 1);
4998                                 path->slots[0] = extent_slot;
4999                                 num_to_del = 2;
5000                         }
5001                 }
5002
5003                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
5004                                       num_to_del);
5005                 BUG_ON(ret);
5006                 btrfs_release_path(path);
5007
5008                 if (is_data) {
5009                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5010                         BUG_ON(ret);
5011                 } else {
5012                         invalidate_mapping_pages(info->btree_inode->i_mapping,
5013                              bytenr >> PAGE_CACHE_SHIFT,
5014                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
5015                 }
5016
5017                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
5018                 BUG_ON(ret);
5019         }
5020         btrfs_free_path(path);
5021         return ret;
5022 }
5023
5024 /*
5025  * when we free an block, it is possible (and likely) that we free the last
5026  * delayed ref for that extent as well.  This searches the delayed ref tree for
5027  * a given extent, and if there are no other delayed refs to be processed, it
5028  * removes it from the tree.
5029  */
5030 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
5031                                       struct btrfs_root *root, u64 bytenr)
5032 {
5033         struct btrfs_delayed_ref_head *head;
5034         struct btrfs_delayed_ref_root *delayed_refs;
5035         struct btrfs_delayed_ref_node *ref;
5036         struct rb_node *node;
5037         int ret = 0;
5038
5039         delayed_refs = &trans->transaction->delayed_refs;
5040         spin_lock(&delayed_refs->lock);
5041         head = btrfs_find_delayed_ref_head(trans, bytenr);
5042         if (!head)
5043                 goto out;
5044
5045         node = rb_prev(&head->node.rb_node);
5046         if (!node)
5047                 goto out;
5048
5049         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5050
5051         /* there are still entries for this ref, we can't drop it */
5052         if (ref->bytenr == bytenr)
5053                 goto out;
5054
5055         if (head->extent_op) {
5056                 if (!head->must_insert_reserved)
5057                         goto out;
5058                 kfree(head->extent_op);
5059                 head->extent_op = NULL;
5060         }
5061
5062         /*
5063          * waiting for the lock here would deadlock.  If someone else has it
5064          * locked they are already in the process of dropping it anyway
5065          */
5066         if (!mutex_trylock(&head->mutex))
5067                 goto out;
5068
5069         /*
5070          * at this point we have a head with no other entries.  Go
5071          * ahead and process it.
5072          */
5073         head->node.in_tree = 0;
5074         rb_erase(&head->node.rb_node, &delayed_refs->root);
5075
5076         delayed_refs->num_entries--;
5077         if (waitqueue_active(&delayed_refs->seq_wait))
5078                 wake_up(&delayed_refs->seq_wait);
5079
5080         /*
5081          * we don't take a ref on the node because we're removing it from the
5082          * tree, so we just steal the ref the tree was holding.
5083          */
5084         delayed_refs->num_heads--;
5085         if (list_empty(&head->cluster))
5086                 delayed_refs->num_heads_ready--;
5087
5088         list_del_init(&head->cluster);
5089         spin_unlock(&delayed_refs->lock);
5090
5091         BUG_ON(head->extent_op);
5092         if (head->must_insert_reserved)
5093                 ret = 1;
5094
5095         mutex_unlock(&head->mutex);
5096         btrfs_put_delayed_ref(&head->node);
5097         return ret;
5098 out:
5099         spin_unlock(&delayed_refs->lock);
5100         return 0;
5101 }
5102
5103 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5104                            struct btrfs_root *root,
5105                            struct extent_buffer *buf,
5106                            u64 parent, int last_ref, int for_cow)
5107 {
5108         struct btrfs_block_group_cache *cache = NULL;
5109         int ret;
5110
5111         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5112                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5113                                         buf->start, buf->len,
5114                                         parent, root->root_key.objectid,
5115                                         btrfs_header_level(buf),
5116                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5117                 BUG_ON(ret);
5118         }
5119
5120         if (!last_ref)
5121                 return;
5122
5123         cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5124
5125         if (btrfs_header_generation(buf) == trans->transid) {
5126                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5127                         ret = check_ref_cleanup(trans, root, buf->start);
5128                         if (!ret)
5129                                 goto out;
5130                 }
5131
5132                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5133                         pin_down_extent(root, cache, buf->start, buf->len, 1);
5134                         goto out;
5135                 }
5136
5137                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5138
5139                 btrfs_add_free_space(cache, buf->start, buf->len);
5140                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5141         }
5142 out:
5143         /*
5144          * Deleting the buffer, clear the corrupt flag since it doesn't matter
5145          * anymore.
5146          */
5147         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5148         btrfs_put_block_group(cache);
5149 }
5150
5151 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5152                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5153                       u64 owner, u64 offset, int for_cow)
5154 {
5155         int ret;
5156         struct btrfs_fs_info *fs_info = root->fs_info;
5157
5158         /*
5159          * tree log blocks never actually go into the extent allocation
5160          * tree, just update pinning info and exit early.
5161          */
5162         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5163                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5164                 /* unlocks the pinned mutex */
5165                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5166                 ret = 0;
5167         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5168                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5169                                         num_bytes,
5170                                         parent, root_objectid, (int)owner,
5171                                         BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5172                 BUG_ON(ret);
5173         } else {
5174                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5175                                                 num_bytes,
5176                                                 parent, root_objectid, owner,
5177                                                 offset, BTRFS_DROP_DELAYED_REF,
5178                                                 NULL, for_cow);
5179                 BUG_ON(ret);
5180         }
5181         return ret;
5182 }
5183
5184 static u64 stripe_align(struct btrfs_root *root, u64 val)
5185 {
5186         u64 mask = ((u64)root->stripesize - 1);
5187         u64 ret = (val + mask) & ~mask;
5188         return ret;
5189 }
5190
5191 /*
5192  * when we wait for progress in the block group caching, its because
5193  * our allocation attempt failed at least once.  So, we must sleep
5194  * and let some progress happen before we try again.
5195  *
5196  * This function will sleep at least once waiting for new free space to
5197  * show up, and then it will check the block group free space numbers
5198  * for our min num_bytes.  Another option is to have it go ahead
5199  * and look in the rbtree for a free extent of a given size, but this
5200  * is a good start.
5201  */
5202 static noinline int
5203 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5204                                 u64 num_bytes)
5205 {
5206         struct btrfs_caching_control *caching_ctl;
5207         DEFINE_WAIT(wait);
5208
5209         caching_ctl = get_caching_control(cache);
5210         if (!caching_ctl)
5211                 return 0;
5212
5213         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5214                    (cache->free_space_ctl->free_space >= num_bytes));
5215
5216         put_caching_control(caching_ctl);
5217         return 0;
5218 }
5219
5220 static noinline int
5221 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5222 {
5223         struct btrfs_caching_control *caching_ctl;
5224         DEFINE_WAIT(wait);
5225
5226         caching_ctl = get_caching_control(cache);
5227         if (!caching_ctl)
5228                 return 0;
5229
5230         wait_event(caching_ctl->wait, block_group_cache_done(cache));
5231
5232         put_caching_control(caching_ctl);
5233         return 0;
5234 }
5235
5236 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5237 {
5238         int index;
5239         if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
5240                 index = 0;
5241         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
5242                 index = 1;
5243         else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
5244                 index = 2;
5245         else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
5246                 index = 3;
5247         else
5248                 index = 4;
5249         return index;
5250 }
5251
5252 enum btrfs_loop_type {
5253         LOOP_FIND_IDEAL = 0,
5254         LOOP_CACHING_NOWAIT = 1,
5255         LOOP_CACHING_WAIT = 2,
5256         LOOP_ALLOC_CHUNK = 3,
5257         LOOP_NO_EMPTY_SIZE = 4,
5258 };
5259
5260 /*
5261  * walks the btree of allocated extents and find a hole of a given size.
5262  * The key ins is changed to record the hole:
5263  * ins->objectid == block start
5264  * ins->flags = BTRFS_EXTENT_ITEM_KEY
5265  * ins->offset == number of blocks
5266  * Any available blocks before search_start are skipped.
5267  */
5268 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5269                                      struct btrfs_root *orig_root,
5270                                      u64 num_bytes, u64 empty_size,
5271                                      u64 search_start, u64 search_end,
5272                                      u64 hint_byte, struct btrfs_key *ins,
5273                                      u64 data)
5274 {
5275         int ret = 0;
5276         struct btrfs_root *root = orig_root->fs_info->extent_root;
5277         struct btrfs_free_cluster *last_ptr = NULL;
5278         struct btrfs_block_group_cache *block_group = NULL;
5279         struct btrfs_block_group_cache *used_block_group;
5280         int empty_cluster = 2 * 1024 * 1024;
5281         int allowed_chunk_alloc = 0;
5282         int done_chunk_alloc = 0;
5283         struct btrfs_space_info *space_info;
5284         int loop = 0;
5285         int index = 0;
5286         int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5287                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5288         bool found_uncached_bg = false;
5289         bool failed_cluster_refill = false;
5290         bool failed_alloc = false;
5291         bool use_cluster = true;
5292         bool have_caching_bg = false;
5293         u64 ideal_cache_percent = 0;
5294         u64 ideal_cache_offset = 0;
5295
5296         WARN_ON(num_bytes < root->sectorsize);
5297         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5298         ins->objectid = 0;
5299         ins->offset = 0;
5300
5301         trace_find_free_extent(orig_root, num_bytes, empty_size, data);
5302
5303         space_info = __find_space_info(root->fs_info, data);
5304         if (!space_info) {
5305                 printk(KERN_ERR "No space info for %llu\n", data);
5306                 return -ENOSPC;
5307         }
5308
5309         /*
5310          * If the space info is for both data and metadata it means we have a
5311          * small filesystem and we can't use the clustering stuff.
5312          */
5313         if (btrfs_mixed_space_info(space_info))
5314                 use_cluster = false;
5315
5316         if (orig_root->ref_cows || empty_size)
5317                 allowed_chunk_alloc = 1;
5318
5319         if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5320                 last_ptr = &root->fs_info->meta_alloc_cluster;
5321                 if (!btrfs_test_opt(root, SSD))
5322                         empty_cluster = 64 * 1024;
5323         }
5324
5325         if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5326             btrfs_test_opt(root, SSD)) {
5327                 last_ptr = &root->fs_info->data_alloc_cluster;
5328         }
5329
5330         if (last_ptr) {
5331                 spin_lock(&last_ptr->lock);
5332                 if (last_ptr->block_group)
5333                         hint_byte = last_ptr->window_start;
5334                 spin_unlock(&last_ptr->lock);
5335         }
5336
5337         search_start = max(search_start, first_logical_byte(root, 0));
5338         search_start = max(search_start, hint_byte);
5339
5340         if (!last_ptr)
5341                 empty_cluster = 0;
5342
5343         if (search_start == hint_byte) {
5344 ideal_cache:
5345                 block_group = btrfs_lookup_block_group(root->fs_info,
5346                                                        search_start);
5347                 used_block_group = block_group;
5348                 /*
5349                  * we don't want to use the block group if it doesn't match our
5350                  * allocation bits, or if its not cached.
5351                  *
5352                  * However if we are re-searching with an ideal block group
5353                  * picked out then we don't care that the block group is cached.
5354                  */
5355                 if (block_group && block_group_bits(block_group, data) &&
5356                     (block_group->cached != BTRFS_CACHE_NO ||
5357                      search_start == ideal_cache_offset)) {
5358                         down_read(&space_info->groups_sem);
5359                         if (list_empty(&block_group->list) ||
5360                             block_group->ro) {
5361                                 /*
5362                                  * someone is removing this block group,
5363                                  * we can't jump into the have_block_group
5364                                  * target because our list pointers are not
5365                                  * valid
5366                                  */
5367                                 btrfs_put_block_group(block_group);
5368                                 up_read(&space_info->groups_sem);
5369                         } else {
5370                                 index = get_block_group_index(block_group);
5371                                 goto have_block_group;
5372                         }
5373                 } else if (block_group) {
5374                         btrfs_put_block_group(block_group);
5375                 }
5376         }
5377 search:
5378         have_caching_bg = false;
5379         down_read(&space_info->groups_sem);
5380         list_for_each_entry(block_group, &space_info->block_groups[index],
5381                             list) {
5382                 u64 offset;
5383                 int cached;
5384
5385                 used_block_group = block_group;
5386                 btrfs_get_block_group(block_group);
5387                 search_start = block_group->key.objectid;
5388
5389                 /*
5390                  * this can happen if we end up cycling through all the
5391                  * raid types, but we want to make sure we only allocate
5392                  * for the proper type.
5393                  */
5394                 if (!block_group_bits(block_group, data)) {
5395                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
5396                                 BTRFS_BLOCK_GROUP_RAID1 |
5397                                 BTRFS_BLOCK_GROUP_RAID10;
5398
5399                         /*
5400                          * if they asked for extra copies and this block group
5401                          * doesn't provide them, bail.  This does allow us to
5402                          * fill raid0 from raid1.
5403                          */
5404                         if ((data & extra) && !(block_group->flags & extra))
5405                                 goto loop;
5406                 }
5407
5408 have_block_group:
5409                 cached = block_group_cache_done(block_group);
5410                 if (unlikely(!cached)) {
5411                         u64 free_percent;
5412
5413                         found_uncached_bg = true;
5414                         ret = cache_block_group(block_group, trans,
5415                                                 orig_root, 1);
5416                         if (block_group->cached == BTRFS_CACHE_FINISHED)
5417                                 goto alloc;
5418
5419                         free_percent = btrfs_block_group_used(&block_group->item);
5420                         free_percent *= 100;
5421                         free_percent = div64_u64(free_percent,
5422                                                  block_group->key.offset);
5423                         free_percent = 100 - free_percent;
5424                         if (free_percent > ideal_cache_percent &&
5425                             likely(!block_group->ro)) {
5426                                 ideal_cache_offset = block_group->key.objectid;
5427                                 ideal_cache_percent = free_percent;
5428                         }
5429
5430                         /*
5431                          * The caching workers are limited to 2 threads, so we
5432                          * can queue as much work as we care to.
5433                          */
5434                         if (loop > LOOP_FIND_IDEAL) {
5435                                 ret = cache_block_group(block_group, trans,
5436                                                         orig_root, 0);
5437                                 BUG_ON(ret);
5438                         }
5439
5440                         /*
5441                          * If loop is set for cached only, try the next block
5442                          * group.
5443                          */
5444                         if (loop == LOOP_FIND_IDEAL)
5445                                 goto loop;
5446                 }
5447
5448 alloc:
5449                 if (unlikely(block_group->ro))
5450                         goto loop;
5451
5452                 /*
5453                  * Ok we want to try and use the cluster allocator, so
5454                  * lets look there
5455                  */
5456                 if (last_ptr) {
5457                         /*
5458                          * the refill lock keeps out other
5459                          * people trying to start a new cluster
5460                          */
5461                         spin_lock(&last_ptr->refill_lock);
5462                         used_block_group = last_ptr->block_group;
5463                         if (used_block_group != block_group &&
5464                             (!used_block_group ||
5465                              used_block_group->ro ||
5466                              !block_group_bits(used_block_group, data))) {
5467                                 used_block_group = block_group;
5468                                 goto refill_cluster;
5469                         }
5470
5471                         if (used_block_group != block_group)
5472                                 btrfs_get_block_group(used_block_group);
5473
5474                         offset = btrfs_alloc_from_cluster(used_block_group,
5475                           last_ptr, num_bytes, used_block_group->key.objectid);
5476                         if (offset) {
5477                                 /* we have a block, we're done */
5478                                 spin_unlock(&last_ptr->refill_lock);
5479                                 trace_btrfs_reserve_extent_cluster(root,
5480                                         block_group, search_start, num_bytes);
5481                                 goto checks;
5482                         }
5483
5484                         WARN_ON(last_ptr->block_group != used_block_group);
5485                         if (used_block_group != block_group) {
5486                                 btrfs_put_block_group(used_block_group);
5487                                 used_block_group = block_group;
5488                         }
5489 refill_cluster:
5490                         BUG_ON(used_block_group != block_group);
5491                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5492                          * set up a new clusters, so lets just skip it
5493                          * and let the allocator find whatever block
5494                          * it can find.  If we reach this point, we
5495                          * will have tried the cluster allocator
5496                          * plenty of times and not have found
5497                          * anything, so we are likely way too
5498                          * fragmented for the clustering stuff to find
5499                          * anything.
5500                          *
5501                          * However, if the cluster is taken from the
5502                          * current block group, release the cluster
5503                          * first, so that we stand a better chance of
5504                          * succeeding in the unclustered
5505                          * allocation.  */
5506                         if (loop >= LOOP_NO_EMPTY_SIZE &&
5507                             last_ptr->block_group != block_group) {
5508                                 spin_unlock(&last_ptr->refill_lock);
5509                                 goto unclustered_alloc;
5510                         }
5511
5512                         /*
5513                          * this cluster didn't work out, free it and
5514                          * start over
5515                          */
5516                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5517
5518                         if (loop >= LOOP_NO_EMPTY_SIZE) {
5519                                 spin_unlock(&last_ptr->refill_lock);
5520                                 goto unclustered_alloc;
5521                         }
5522
5523                         /* allocate a cluster in this block group */
5524                         ret = btrfs_find_space_cluster(trans, root,
5525                                                block_group, last_ptr,
5526                                                search_start, num_bytes,
5527                                                empty_cluster + empty_size);
5528                         if (ret == 0) {
5529                                 /*
5530                                  * now pull our allocation out of this
5531                                  * cluster
5532                                  */
5533                                 offset = btrfs_alloc_from_cluster(block_group,
5534                                                   last_ptr, num_bytes,
5535                                                   search_start);
5536                                 if (offset) {
5537                                         /* we found one, proceed */
5538                                         spin_unlock(&last_ptr->refill_lock);
5539                                         trace_btrfs_reserve_extent_cluster(root,
5540                                                 block_group, search_start,
5541                                                 num_bytes);
5542                                         goto checks;
5543                                 }
5544                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
5545                                    && !failed_cluster_refill) {
5546                                 spin_unlock(&last_ptr->refill_lock);
5547
5548                                 failed_cluster_refill = true;
5549                                 wait_block_group_cache_progress(block_group,
5550                                        num_bytes + empty_cluster + empty_size);
5551                                 goto have_block_group;
5552                         }
5553
5554                         /*
5555                          * at this point we either didn't find a cluster
5556                          * or we weren't able to allocate a block from our
5557                          * cluster.  Free the cluster we've been trying
5558                          * to use, and go to the next block group
5559                          */
5560                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
5561                         spin_unlock(&last_ptr->refill_lock);
5562                         goto loop;
5563                 }
5564
5565 unclustered_alloc:
5566                 spin_lock(&block_group->free_space_ctl->tree_lock);
5567                 if (cached &&
5568                     block_group->free_space_ctl->free_space <
5569                     num_bytes + empty_cluster + empty_size) {
5570                         spin_unlock(&block_group->free_space_ctl->tree_lock);
5571                         goto loop;
5572                 }
5573                 spin_unlock(&block_group->free_space_ctl->tree_lock);
5574
5575                 offset = btrfs_find_space_for_alloc(block_group, search_start,
5576                                                     num_bytes, empty_size);
5577                 /*
5578                  * If we didn't find a chunk, and we haven't failed on this
5579                  * block group before, and this block group is in the middle of
5580                  * caching and we are ok with waiting, then go ahead and wait
5581                  * for progress to be made, and set failed_alloc to true.
5582                  *
5583                  * If failed_alloc is true then we've already waited on this
5584                  * block group once and should move on to the next block group.
5585                  */
5586                 if (!offset && !failed_alloc && !cached &&
5587                     loop > LOOP_CACHING_NOWAIT) {
5588                         wait_block_group_cache_progress(block_group,
5589     &nb