]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - fs/fs_struct.c
nfsd4: fix decoding of compounds across page boundaries
[linux-2.6.git] / fs / fs_struct.c
1 #include <linux/export.h>
2 #include <linux/sched.h>
3 #include <linux/fs.h>
4 #include <linux/path.h>
5 #include <linux/slab.h>
6 #include <linux/fs_struct.h>
7 #include "internal.h"
8
9 static inline void path_get_longterm(struct path *path)
10 {
11         path_get(path);
12         mnt_make_longterm(path->mnt);
13 }
14
15 static inline void path_put_longterm(struct path *path)
16 {
17         mnt_make_shortterm(path->mnt);
18         path_put(path);
19 }
20
21 /*
22  * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
23  * It can block.
24  */
25 void set_fs_root(struct fs_struct *fs, struct path *path)
26 {
27         struct path old_root;
28
29         path_get_longterm(path);
30         spin_lock(&fs->lock);
31         write_seqcount_begin(&fs->seq);
32         old_root = fs->root;
33         fs->root = *path;
34         write_seqcount_end(&fs->seq);
35         spin_unlock(&fs->lock);
36         if (old_root.dentry)
37                 path_put_longterm(&old_root);
38 }
39
40 /*
41  * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
42  * It can block.
43  */
44 void set_fs_pwd(struct fs_struct *fs, struct path *path)
45 {
46         struct path old_pwd;
47
48         path_get_longterm(path);
49         spin_lock(&fs->lock);
50         write_seqcount_begin(&fs->seq);
51         old_pwd = fs->pwd;
52         fs->pwd = *path;
53         write_seqcount_end(&fs->seq);
54         spin_unlock(&fs->lock);
55
56         if (old_pwd.dentry)
57                 path_put_longterm(&old_pwd);
58 }
59
60 static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
61 {
62         if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
63                 return 0;
64         *p = *new;
65         return 1;
66 }
67
68 void chroot_fs_refs(struct path *old_root, struct path *new_root)
69 {
70         struct task_struct *g, *p;
71         struct fs_struct *fs;
72         int count = 0;
73
74         read_lock(&tasklist_lock);
75         do_each_thread(g, p) {
76                 task_lock(p);
77                 fs = p->fs;
78                 if (fs) {
79                         int hits = 0;
80                         spin_lock(&fs->lock);
81                         write_seqcount_begin(&fs->seq);
82                         hits += replace_path(&fs->root, old_root, new_root);
83                         hits += replace_path(&fs->pwd, old_root, new_root);
84                         write_seqcount_end(&fs->seq);
85                         while (hits--) {
86                                 count++;
87                                 path_get_longterm(new_root);
88                         }
89                         spin_unlock(&fs->lock);
90                 }
91                 task_unlock(p);
92         } while_each_thread(g, p);
93         read_unlock(&tasklist_lock);
94         while (count--)
95                 path_put_longterm(old_root);
96 }
97
98 void free_fs_struct(struct fs_struct *fs)
99 {
100         path_put_longterm(&fs->root);
101         path_put_longterm(&fs->pwd);
102         kmem_cache_free(fs_cachep, fs);
103 }
104
105 void exit_fs(struct task_struct *tsk)
106 {
107         struct fs_struct *fs = tsk->fs;
108
109         if (fs) {
110                 int kill;
111                 task_lock(tsk);
112                 spin_lock(&fs->lock);
113                 tsk->fs = NULL;
114                 kill = !--fs->users;
115                 spin_unlock(&fs->lock);
116                 task_unlock(tsk);
117                 if (kill)
118                         free_fs_struct(fs);
119         }
120 }
121
122 struct fs_struct *copy_fs_struct(struct fs_struct *old)
123 {
124         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
125         /* We don't need to lock fs - think why ;-) */
126         if (fs) {
127                 fs->users = 1;
128                 fs->in_exec = 0;
129                 spin_lock_init(&fs->lock);
130                 seqcount_init(&fs->seq);
131                 fs->umask = old->umask;
132
133                 spin_lock(&old->lock);
134                 fs->root = old->root;
135                 path_get_longterm(&fs->root);
136                 fs->pwd = old->pwd;
137                 path_get_longterm(&fs->pwd);
138                 spin_unlock(&old->lock);
139         }
140         return fs;
141 }
142
143 int unshare_fs_struct(void)
144 {
145         struct fs_struct *fs = current->fs;
146         struct fs_struct *new_fs = copy_fs_struct(fs);
147         int kill;
148
149         if (!new_fs)
150                 return -ENOMEM;
151
152         task_lock(current);
153         spin_lock(&fs->lock);
154         kill = !--fs->users;
155         current->fs = new_fs;
156         spin_unlock(&fs->lock);
157         task_unlock(current);
158
159         if (kill)
160                 free_fs_struct(fs);
161
162         return 0;
163 }
164 EXPORT_SYMBOL_GPL(unshare_fs_struct);
165
166 int current_umask(void)
167 {
168         return current->fs->umask;
169 }
170 EXPORT_SYMBOL(current_umask);
171
172 /* to be mentioned only in INIT_TASK */
173 struct fs_struct init_fs = {
174         .users          = 1,
175         .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
176         .seq            = SEQCNT_ZERO,
177         .umask          = 0022,
178 };
179
180 void daemonize_fs_struct(void)
181 {
182         struct fs_struct *fs = current->fs;
183
184         if (fs) {
185                 int kill;
186
187                 task_lock(current);
188
189                 spin_lock(&init_fs.lock);
190                 init_fs.users++;
191                 spin_unlock(&init_fs.lock);
192
193                 spin_lock(&fs->lock);
194                 current->fs = &init_fs;
195                 kill = !--fs->users;
196                 spin_unlock(&fs->lock);
197
198                 task_unlock(current);
199                 if (kill)
200                         free_fs_struct(fs);
201         }
202 }