Fix btrfs_del_ordered_inode to allow forcing the drop during unlinks
[linux-3.10.git] / fs / btrfs / ordered-data.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/gfp.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include "ctree.h"
23 #include "transaction.h"
24 #include "btrfs_inode.h"
25
26 struct tree_entry {
27         u64 root_objectid;
28         u64 objectid;
29         struct inode *inode;
30         struct rb_node rb_node;
31 };
32
33 /*
34  * returns > 0 if entry passed (root, objectid) is > entry,
35  * < 0 if (root, objectid) < entry and zero if they are equal
36  */
37 static int comp_entry(struct tree_entry *entry, u64 root_objectid,
38                       u64 objectid)
39 {
40         if (root_objectid < entry->root_objectid)
41                 return -1;
42         if (root_objectid > entry->root_objectid)
43                 return 1;
44         if (objectid < entry->objectid)
45                 return -1;
46         if (objectid > entry->objectid)
47                 return 1;
48         return 0;
49 }
50
51 static struct rb_node *tree_insert(struct rb_root *root, u64 root_objectid,
52                                    u64 objectid, struct rb_node *node)
53 {
54         struct rb_node ** p = &root->rb_node;
55         struct rb_node * parent = NULL;
56         struct tree_entry *entry;
57         int comp;
58
59         while(*p) {
60                 parent = *p;
61                 entry = rb_entry(parent, struct tree_entry, rb_node);
62
63                 comp = comp_entry(entry, root_objectid, objectid);
64                 if (comp < 0)
65                         p = &(*p)->rb_left;
66                 else if (comp > 0)
67                         p = &(*p)->rb_right;
68                 else
69                         return parent;
70         }
71
72         rb_link_node(node, parent, p);
73         rb_insert_color(node, root);
74         return NULL;
75 }
76
77 static struct rb_node *__tree_search(struct rb_root *root, u64 root_objectid,
78                                      u64 objectid, struct rb_node **prev_ret)
79 {
80         struct rb_node * n = root->rb_node;
81         struct rb_node *prev = NULL;
82         struct tree_entry *entry;
83         struct tree_entry *prev_entry = NULL;
84         int comp;
85
86         while(n) {
87                 entry = rb_entry(n, struct tree_entry, rb_node);
88                 prev = n;
89                 prev_entry = entry;
90                 comp = comp_entry(entry, root_objectid, objectid);
91
92                 if (comp < 0)
93                         n = n->rb_left;
94                 else if (comp > 0)
95                         n = n->rb_right;
96                 else
97                         return n;
98         }
99         if (!prev_ret)
100                 return NULL;
101
102         while(prev && comp_entry(prev_entry, root_objectid, objectid) >= 0) {
103                 prev = rb_next(prev);
104                 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
105         }
106         *prev_ret = prev;
107         return NULL;
108 }
109
110 static inline struct rb_node *tree_search(struct rb_root *root,
111                                           u64 root_objectid, u64 objectid)
112 {
113         struct rb_node *prev;
114         struct rb_node *ret;
115         ret = __tree_search(root, root_objectid, objectid, &prev);
116         if (!ret)
117                 return prev;
118         return ret;
119 }
120
121 int btrfs_add_ordered_inode(struct inode *inode)
122 {
123         struct btrfs_root *root = BTRFS_I(inode)->root;
124         u64 root_objectid = root->root_key.objectid;
125         u64 transid = root->fs_info->running_transaction->transid;
126         struct tree_entry *entry;
127         struct rb_node *node;
128         struct btrfs_ordered_inode_tree *tree;
129
130         if (transid <= BTRFS_I(inode)->ordered_trans)
131                 return 0;
132
133         tree = &root->fs_info->running_transaction->ordered_inode_tree;
134
135         read_lock(&tree->lock);
136         node = __tree_search(&tree->tree, root_objectid, inode->i_ino, NULL);
137         read_unlock(&tree->lock);
138         if (node) {
139                 return 0;
140         }
141
142         entry = kmalloc(sizeof(*entry), GFP_NOFS);
143         if (!entry)
144                 return -ENOMEM;
145
146         write_lock(&tree->lock);
147         entry->objectid = inode->i_ino;
148         entry->root_objectid = root_objectid;
149         entry->inode = inode;
150
151         node = tree_insert(&tree->tree, root_objectid,
152                            inode->i_ino, &entry->rb_node);
153
154         BTRFS_I(inode)->ordered_trans = transid;
155
156         write_unlock(&tree->lock);
157         if (node)
158                 kfree(entry);
159         else
160                 igrab(inode);
161         return 0;
162 }
163
164 int btrfs_find_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
165                                    u64 *root_objectid, u64 *objectid,
166                                    struct inode **inode)
167 {
168         struct tree_entry *entry;
169         struct rb_node *node;
170
171         write_lock(&tree->lock);
172         node = tree_search(&tree->tree, *root_objectid, *objectid);
173         if (!node) {
174                 write_unlock(&tree->lock);
175                 return 0;
176         }
177         entry = rb_entry(node, struct tree_entry, rb_node);
178
179         while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
180                 node = rb_next(node);
181                 if (!node)
182                         break;
183                 entry = rb_entry(node, struct tree_entry, rb_node);
184         }
185         if (!node) {
186                 write_unlock(&tree->lock);
187                 return 0;
188         }
189
190         *root_objectid = entry->root_objectid;
191         *inode = entry->inode;
192         atomic_inc(&entry->inode->i_count);
193         *objectid = entry->objectid;
194         write_unlock(&tree->lock);
195         return 1;
196 }
197
198 int btrfs_find_del_first_ordered_inode(struct btrfs_ordered_inode_tree *tree,
199                                        u64 *root_objectid, u64 *objectid,
200                                        struct inode **inode)
201 {
202         struct tree_entry *entry;
203         struct rb_node *node;
204
205         write_lock(&tree->lock);
206         node = tree_search(&tree->tree, *root_objectid, *objectid);
207         if (!node) {
208                 write_unlock(&tree->lock);
209                 return 0;
210         }
211
212         entry = rb_entry(node, struct tree_entry, rb_node);
213         while(comp_entry(entry, *root_objectid, *objectid) >= 0) {
214                 node = rb_next(node);
215                 if (!node)
216                         break;
217                 entry = rb_entry(node, struct tree_entry, rb_node);
218         }
219         if (!node) {
220                 write_unlock(&tree->lock);
221                 return 0;
222         }
223
224         *root_objectid = entry->root_objectid;
225         *objectid = entry->objectid;
226         *inode = entry->inode;
227         atomic_inc(&entry->inode->i_count);
228         rb_erase(node, &tree->tree);
229         write_unlock(&tree->lock);
230         kfree(entry);
231         return 1;
232 }
233
234 static void __btrfs_del_ordered_inode(struct btrfs_ordered_inode_tree *tree,
235                                      struct inode *inode,
236                                      u64 root_objectid, u64 objectid)
237 {
238         struct tree_entry *entry;
239         struct rb_node *node;
240         struct rb_node *prev;
241
242         write_lock(&tree->lock);
243         node = __tree_search(&tree->tree, root_objectid, objectid, &prev);
244         if (!node) {
245                 write_unlock(&tree->lock);
246                 return;
247         }
248         rb_erase(node, &tree->tree);
249         BTRFS_I(inode)->ordered_trans = 0;
250         write_unlock(&tree->lock);
251         atomic_dec(&inode->i_count);
252         entry = rb_entry(node, struct tree_entry, rb_node);
253         kfree(entry);
254         return;
255 }
256
257 void btrfs_del_ordered_inode(struct inode *inode, int force)
258 {
259         struct btrfs_root *root = BTRFS_I(inode)->root;
260         u64 root_objectid = root->root_key.objectid;
261
262         if (!BTRFS_I(inode)->ordered_trans) {
263                 return;
264         }
265
266         if (!force && (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) ||
267             mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
268                 return;
269
270         spin_lock(&root->fs_info->new_trans_lock);
271         if (root->fs_info->running_transaction) {
272                 struct btrfs_ordered_inode_tree *tree;
273                 tree = &root->fs_info->running_transaction->ordered_inode_tree;
274                  __btrfs_del_ordered_inode(tree, inode, root_objectid,
275                                                 inode->i_ino);
276         }
277         spin_unlock(&root->fs_info->new_trans_lock);
278 }
279
280 int btrfs_ordered_throttle(struct btrfs_root *root, struct inode *inode)
281 {
282         struct btrfs_transaction *cur = root->fs_info->running_transaction;
283         while(cur == root->fs_info->running_transaction &&
284               atomic_read(&BTRFS_I(inode)->ordered_writeback)) {
285 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
286                 congestion_wait(WRITE, HZ/20);
287 #else
288                 blk_congestion_wait(WRITE, HZ/20);
289 #endif
290         }
291         return 0;
292 }