474968fbb5559e4cb4b8cb2b70844c2f9ffc79d7
[linux-3.10.git] / fs / fuse / file.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2006  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15
16 static const struct file_operations fuse_direct_io_file_operations;
17
18 static int fuse_send_open(struct inode *inode, struct file *file, int isdir,
19                           struct fuse_open_out *outargp)
20 {
21         struct fuse_conn *fc = get_fuse_conn(inode);
22         struct fuse_open_in inarg;
23         struct fuse_req *req;
24         int err;
25
26         req = fuse_get_req(fc);
27         if (IS_ERR(req))
28                 return PTR_ERR(req);
29
30         memset(&inarg, 0, sizeof(inarg));
31         inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
32         if (!fc->atomic_o_trunc)
33                 inarg.flags &= ~O_TRUNC;
34         req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
35         req->in.h.nodeid = get_node_id(inode);
36         req->in.numargs = 1;
37         req->in.args[0].size = sizeof(inarg);
38         req->in.args[0].value = &inarg;
39         req->out.numargs = 1;
40         req->out.args[0].size = sizeof(*outargp);
41         req->out.args[0].value = outargp;
42         request_send(fc, req);
43         err = req->out.h.error;
44         fuse_put_request(fc, req);
45
46         return err;
47 }
48
49 struct fuse_file *fuse_file_alloc(void)
50 {
51         struct fuse_file *ff;
52         ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
53         if (ff) {
54                 ff->reserved_req = fuse_request_alloc();
55                 if (!ff->reserved_req) {
56                         kfree(ff);
57                         ff = NULL;
58                 } else {
59                         INIT_LIST_HEAD(&ff->write_entry);
60                         atomic_set(&ff->count, 0);
61                 }
62         }
63         return ff;
64 }
65
66 void fuse_file_free(struct fuse_file *ff)
67 {
68         fuse_request_free(ff->reserved_req);
69         kfree(ff);
70 }
71
72 static struct fuse_file *fuse_file_get(struct fuse_file *ff)
73 {
74         atomic_inc(&ff->count);
75         return ff;
76 }
77
78 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
79 {
80         dput(req->dentry);
81         mntput(req->vfsmount);
82         fuse_put_request(fc, req);
83 }
84
85 static void fuse_file_put(struct fuse_file *ff)
86 {
87         if (atomic_dec_and_test(&ff->count)) {
88                 struct fuse_req *req = ff->reserved_req;
89                 struct fuse_conn *fc = get_fuse_conn(req->dentry->d_inode);
90                 req->end = fuse_release_end;
91                 request_send_background(fc, req);
92                 kfree(ff);
93         }
94 }
95
96 void fuse_finish_open(struct inode *inode, struct file *file,
97                       struct fuse_file *ff, struct fuse_open_out *outarg)
98 {
99         if (outarg->open_flags & FOPEN_DIRECT_IO)
100                 file->f_op = &fuse_direct_io_file_operations;
101         if (!(outarg->open_flags & FOPEN_KEEP_CACHE))
102                 invalidate_inode_pages2(inode->i_mapping);
103         ff->fh = outarg->fh;
104         file->private_data = fuse_file_get(ff);
105 }
106
107 int fuse_open_common(struct inode *inode, struct file *file, int isdir)
108 {
109         struct fuse_open_out outarg;
110         struct fuse_file *ff;
111         int err;
112
113         /* VFS checks this, but only _after_ ->open() */
114         if (file->f_flags & O_DIRECT)
115                 return -EINVAL;
116
117         err = generic_file_open(inode, file);
118         if (err)
119                 return err;
120
121         ff = fuse_file_alloc();
122         if (!ff)
123                 return -ENOMEM;
124
125         err = fuse_send_open(inode, file, isdir, &outarg);
126         if (err)
127                 fuse_file_free(ff);
128         else {
129                 if (isdir)
130                         outarg.open_flags &= ~FOPEN_DIRECT_IO;
131                 fuse_finish_open(inode, file, ff, &outarg);
132         }
133
134         return err;
135 }
136
137 void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
138 {
139         struct fuse_req *req = ff->reserved_req;
140         struct fuse_release_in *inarg = &req->misc.release_in;
141
142         inarg->fh = ff->fh;
143         inarg->flags = flags;
144         req->in.h.opcode = opcode;
145         req->in.h.nodeid = nodeid;
146         req->in.numargs = 1;
147         req->in.args[0].size = sizeof(struct fuse_release_in);
148         req->in.args[0].value = inarg;
149 }
150
151 int fuse_release_common(struct inode *inode, struct file *file, int isdir)
152 {
153         struct fuse_file *ff = file->private_data;
154         if (ff) {
155                 struct fuse_conn *fc = get_fuse_conn(inode);
156
157                 fuse_release_fill(ff, get_node_id(inode), file->f_flags,
158                                   isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
159
160                 /* Hold vfsmount and dentry until release is finished */
161                 ff->reserved_req->vfsmount = mntget(file->f_path.mnt);
162                 ff->reserved_req->dentry = dget(file->f_path.dentry);
163
164                 spin_lock(&fc->lock);
165                 list_del(&ff->write_entry);
166                 spin_unlock(&fc->lock);
167                 /*
168                  * Normally this will send the RELEASE request,
169                  * however if some asynchronous READ or WRITE requests
170                  * are outstanding, the sending will be delayed
171                  */
172                 fuse_file_put(ff);
173         }
174
175         /* Return value is ignored by VFS */
176         return 0;
177 }
178
179 static int fuse_open(struct inode *inode, struct file *file)
180 {
181         return fuse_open_common(inode, file, 0);
182 }
183
184 static int fuse_release(struct inode *inode, struct file *file)
185 {
186         return fuse_release_common(inode, file, 0);
187 }
188
189 /*
190  * Scramble the ID space with XTEA, so that the value of the files_struct
191  * pointer is not exposed to userspace.
192  */
193 u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
194 {
195         u32 *k = fc->scramble_key;
196         u64 v = (unsigned long) id;
197         u32 v0 = v;
198         u32 v1 = v >> 32;
199         u32 sum = 0;
200         int i;
201
202         for (i = 0; i < 32; i++) {
203                 v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
204                 sum += 0x9E3779B9;
205                 v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
206         }
207
208         return (u64) v0 + ((u64) v1 << 32);
209 }
210
211 static int fuse_flush(struct file *file, fl_owner_t id)
212 {
213         struct inode *inode = file->f_path.dentry->d_inode;
214         struct fuse_conn *fc = get_fuse_conn(inode);
215         struct fuse_file *ff = file->private_data;
216         struct fuse_req *req;
217         struct fuse_flush_in inarg;
218         int err;
219
220         if (is_bad_inode(inode))
221                 return -EIO;
222
223         if (fc->no_flush)
224                 return 0;
225
226         req = fuse_get_req_nofail(fc, file);
227         memset(&inarg, 0, sizeof(inarg));
228         inarg.fh = ff->fh;
229         inarg.lock_owner = fuse_lock_owner_id(fc, id);
230         req->in.h.opcode = FUSE_FLUSH;
231         req->in.h.nodeid = get_node_id(inode);
232         req->in.numargs = 1;
233         req->in.args[0].size = sizeof(inarg);
234         req->in.args[0].value = &inarg;
235         req->force = 1;
236         request_send(fc, req);
237         err = req->out.h.error;
238         fuse_put_request(fc, req);
239         if (err == -ENOSYS) {
240                 fc->no_flush = 1;
241                 err = 0;
242         }
243         return err;
244 }
245
246 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
247                       int isdir)
248 {
249         struct inode *inode = de->d_inode;
250         struct fuse_conn *fc = get_fuse_conn(inode);
251         struct fuse_file *ff = file->private_data;
252         struct fuse_req *req;
253         struct fuse_fsync_in inarg;
254         int err;
255
256         if (is_bad_inode(inode))
257                 return -EIO;
258
259         if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
260                 return 0;
261
262         req = fuse_get_req(fc);
263         if (IS_ERR(req))
264                 return PTR_ERR(req);
265
266         memset(&inarg, 0, sizeof(inarg));
267         inarg.fh = ff->fh;
268         inarg.fsync_flags = datasync ? 1 : 0;
269         req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
270         req->in.h.nodeid = get_node_id(inode);
271         req->in.numargs = 1;
272         req->in.args[0].size = sizeof(inarg);
273         req->in.args[0].value = &inarg;
274         request_send(fc, req);
275         err = req->out.h.error;
276         fuse_put_request(fc, req);
277         if (err == -ENOSYS) {
278                 if (isdir)
279                         fc->no_fsyncdir = 1;
280                 else
281                         fc->no_fsync = 1;
282                 err = 0;
283         }
284         return err;
285 }
286
287 static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
288 {
289         return fuse_fsync_common(file, de, datasync, 0);
290 }
291
292 void fuse_read_fill(struct fuse_req *req, struct fuse_file *ff,
293                     struct inode *inode, loff_t pos, size_t count, int opcode)
294 {
295         struct fuse_read_in *inarg = &req->misc.read_in;
296
297         inarg->fh = ff->fh;
298         inarg->offset = pos;
299         inarg->size = count;
300         req->in.h.opcode = opcode;
301         req->in.h.nodeid = get_node_id(inode);
302         req->in.numargs = 1;
303         req->in.args[0].size = sizeof(struct fuse_read_in);
304         req->in.args[0].value = inarg;
305         req->out.argpages = 1;
306         req->out.argvar = 1;
307         req->out.numargs = 1;
308         req->out.args[0].size = count;
309 }
310
311 static size_t fuse_send_read(struct fuse_req *req, struct file *file,
312                              struct inode *inode, loff_t pos, size_t count,
313                              fl_owner_t owner)
314 {
315         struct fuse_conn *fc = get_fuse_conn(inode);
316         struct fuse_file *ff = file->private_data;
317
318         fuse_read_fill(req, ff, inode, pos, count, FUSE_READ);
319         if (owner != NULL) {
320                 struct fuse_read_in *inarg = &req->misc.read_in;
321
322                 inarg->read_flags |= FUSE_READ_LOCKOWNER;
323                 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
324         }
325         request_send(fc, req);
326         return req->out.args[0].size;
327 }
328
329 static int fuse_readpage(struct file *file, struct page *page)
330 {
331         struct inode *inode = page->mapping->host;
332         struct fuse_conn *fc = get_fuse_conn(inode);
333         struct fuse_req *req;
334         int err;
335
336         err = -EIO;
337         if (is_bad_inode(inode))
338                 goto out;
339
340         req = fuse_get_req(fc);
341         err = PTR_ERR(req);
342         if (IS_ERR(req))
343                 goto out;
344
345         req->out.page_zeroing = 1;
346         req->num_pages = 1;
347         req->pages[0] = page;
348         fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE,
349                        NULL);
350         err = req->out.h.error;
351         fuse_put_request(fc, req);
352         if (!err)
353                 SetPageUptodate(page);
354         fuse_invalidate_attr(inode); /* atime changed */
355  out:
356         unlock_page(page);
357         return err;
358 }
359
360 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
361 {
362         int i;
363
364         fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */
365
366         for (i = 0; i < req->num_pages; i++) {
367                 struct page *page = req->pages[i];
368                 if (!req->out.h.error)
369                         SetPageUptodate(page);
370                 else
371                         SetPageError(page);
372                 unlock_page(page);
373         }
374         if (req->ff)
375                 fuse_file_put(req->ff);
376         fuse_put_request(fc, req);
377 }
378
379 static void fuse_send_readpages(struct fuse_req *req, struct fuse_file *ff,
380                                 struct inode *inode)
381 {
382         struct fuse_conn *fc = get_fuse_conn(inode);
383         loff_t pos = page_offset(req->pages[0]);
384         size_t count = req->num_pages << PAGE_CACHE_SHIFT;
385         req->out.page_zeroing = 1;
386         fuse_read_fill(req, ff, inode, pos, count, FUSE_READ);
387         if (fc->async_read) {
388                 req->ff = fuse_file_get(ff);
389                 req->end = fuse_readpages_end;
390                 request_send_background(fc, req);
391         } else {
392                 request_send(fc, req);
393                 fuse_readpages_end(fc, req);
394         }
395 }
396
397 struct fuse_fill_data {
398         struct fuse_req *req;
399         struct fuse_file *ff;
400         struct inode *inode;
401 };
402
403 static int fuse_readpages_fill(void *_data, struct page *page)
404 {
405         struct fuse_fill_data *data = _data;
406         struct fuse_req *req = data->req;
407         struct inode *inode = data->inode;
408         struct fuse_conn *fc = get_fuse_conn(inode);
409
410         if (req->num_pages &&
411             (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
412              (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
413              req->pages[req->num_pages - 1]->index + 1 != page->index)) {
414                 fuse_send_readpages(req, data->ff, inode);
415                 data->req = req = fuse_get_req(fc);
416                 if (IS_ERR(req)) {
417                         unlock_page(page);
418                         return PTR_ERR(req);
419                 }
420         }
421         req->pages[req->num_pages] = page;
422         req->num_pages ++;
423         return 0;
424 }
425
426 static int fuse_readpages(struct file *file, struct address_space *mapping,
427                           struct list_head *pages, unsigned nr_pages)
428 {
429         struct inode *inode = mapping->host;
430         struct fuse_conn *fc = get_fuse_conn(inode);
431         struct fuse_fill_data data;
432         int err;
433
434         err = -EIO;
435         if (is_bad_inode(inode))
436                 goto out;
437
438         data.ff = file->private_data;
439         data.inode = inode;
440         data.req = fuse_get_req(fc);
441         err = PTR_ERR(data.req);
442         if (IS_ERR(data.req))
443                 goto out;
444
445         err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
446         if (!err) {
447                 if (data.req->num_pages)
448                         fuse_send_readpages(data.req, data.ff, inode);
449                 else
450                         fuse_put_request(fc, data.req);
451         }
452 out:
453         return err;
454 }
455
456 static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
457                                   unsigned long nr_segs, loff_t pos)
458 {
459         struct inode *inode = iocb->ki_filp->f_mapping->host;
460
461         if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) {
462                 int err;
463                 /*
464                  * If trying to read past EOF, make sure the i_size
465                  * attribute is up-to-date.
466                  */
467                 err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
468                 if (err)
469                         return err;
470         }
471
472         return generic_file_aio_read(iocb, iov, nr_segs, pos);
473 }
474
475 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
476                             struct inode *inode, loff_t pos, size_t count,
477                             int writepage)
478 {
479         struct fuse_conn *fc = get_fuse_conn(inode);
480         struct fuse_write_in *inarg = &req->misc.write.in;
481         struct fuse_write_out *outarg = &req->misc.write.out;
482
483         memset(inarg, 0, sizeof(struct fuse_write_in));
484         inarg->fh = ff->fh;
485         inarg->offset = pos;
486         inarg->size = count;
487         inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
488         req->in.h.opcode = FUSE_WRITE;
489         req->in.h.nodeid = get_node_id(inode);
490         req->in.argpages = 1;
491         req->in.numargs = 2;
492         if (fc->minor < 9)
493                 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
494         else
495                 req->in.args[0].size = sizeof(struct fuse_write_in);
496         req->in.args[0].value = inarg;
497         req->in.args[1].size = count;
498         req->out.numargs = 1;
499         req->out.args[0].size = sizeof(struct fuse_write_out);
500         req->out.args[0].value = outarg;
501 }
502
503 static size_t fuse_send_write(struct fuse_req *req, struct file *file,
504                               struct inode *inode, loff_t pos, size_t count,
505                               fl_owner_t owner)
506 {
507         struct fuse_conn *fc = get_fuse_conn(inode);
508         fuse_write_fill(req, file->private_data, inode, pos, count, 0);
509         if (owner != NULL) {
510                 struct fuse_write_in *inarg = &req->misc.write.in;
511                 inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
512                 inarg->lock_owner = fuse_lock_owner_id(fc, owner);
513         }
514         request_send(fc, req);
515         return req->misc.write.out.size;
516 }
517
518 static int fuse_write_begin(struct file *file, struct address_space *mapping,
519                         loff_t pos, unsigned len, unsigned flags,
520                         struct page **pagep, void **fsdata)
521 {
522         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
523
524         *pagep = __grab_cache_page(mapping, index);
525         if (!*pagep)
526                 return -ENOMEM;
527         return 0;
528 }
529
530 static int fuse_buffered_write(struct file *file, struct inode *inode,
531                                loff_t pos, unsigned count, struct page *page)
532 {
533         int err;
534         size_t nres;
535         struct fuse_conn *fc = get_fuse_conn(inode);
536         struct fuse_inode *fi = get_fuse_inode(inode);
537         unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
538         struct fuse_req *req;
539
540         if (is_bad_inode(inode))
541                 return -EIO;
542
543         req = fuse_get_req(fc);
544         if (IS_ERR(req))
545                 return PTR_ERR(req);
546
547         req->num_pages = 1;
548         req->pages[0] = page;
549         req->page_offset = offset;
550         nres = fuse_send_write(req, file, inode, pos, count, NULL);
551         err = req->out.h.error;
552         fuse_put_request(fc, req);
553         if (!err && !nres)
554                 err = -EIO;
555         if (!err) {
556                 pos += nres;
557                 spin_lock(&fc->lock);
558                 fi->attr_version = ++fc->attr_version;
559                 if (pos > inode->i_size)
560                         i_size_write(inode, pos);
561                 spin_unlock(&fc->lock);
562
563                 if (count == PAGE_CACHE_SIZE)
564                         SetPageUptodate(page);
565         }
566         fuse_invalidate_attr(inode);
567         return err ? err : nres;
568 }
569
570 static int fuse_write_end(struct file *file, struct address_space *mapping,
571                         loff_t pos, unsigned len, unsigned copied,
572                         struct page *page, void *fsdata)
573 {
574         struct inode *inode = mapping->host;
575         int res = 0;
576
577         if (copied)
578                 res = fuse_buffered_write(file, inode, pos, copied, page);
579
580         unlock_page(page);
581         page_cache_release(page);
582         return res;
583 }
584
585 static void fuse_release_user_pages(struct fuse_req *req, int write)
586 {
587         unsigned i;
588
589         for (i = 0; i < req->num_pages; i++) {
590                 struct page *page = req->pages[i];
591                 if (write)
592                         set_page_dirty_lock(page);
593                 put_page(page);
594         }
595 }
596
597 static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
598                                unsigned nbytes, int write)
599 {
600         unsigned long user_addr = (unsigned long) buf;
601         unsigned offset = user_addr & ~PAGE_MASK;
602         int npages;
603
604         /* This doesn't work with nfsd */
605         if (!current->mm)
606                 return -EPERM;
607
608         nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
609         npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
610         npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
611         down_read(&current->mm->mmap_sem);
612         npages = get_user_pages(current, current->mm, user_addr, npages, write,
613                                 0, req->pages, NULL);
614         up_read(&current->mm->mmap_sem);
615         if (npages < 0)
616                 return npages;
617
618         req->num_pages = npages;
619         req->page_offset = offset;
620         return 0;
621 }
622
623 static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
624                               size_t count, loff_t *ppos, int write)
625 {
626         struct inode *inode = file->f_path.dentry->d_inode;
627         struct fuse_conn *fc = get_fuse_conn(inode);
628         size_t nmax = write ? fc->max_write : fc->max_read;
629         loff_t pos = *ppos;
630         ssize_t res = 0;
631         struct fuse_req *req;
632
633         if (is_bad_inode(inode))
634                 return -EIO;
635
636         req = fuse_get_req(fc);
637         if (IS_ERR(req))
638                 return PTR_ERR(req);
639
640         while (count) {
641                 size_t nres;
642                 size_t nbytes = min(count, nmax);
643                 int err = fuse_get_user_pages(req, buf, nbytes, !write);
644                 if (err) {
645                         res = err;
646                         break;
647                 }
648                 nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
649                 nbytes = min(count, nbytes);
650                 if (write)
651                         nres = fuse_send_write(req, file, inode, pos, nbytes,
652                                                current->files);
653                 else
654                         nres = fuse_send_read(req, file, inode, pos, nbytes,
655                                               current->files);
656                 fuse_release_user_pages(req, !write);
657                 if (req->out.h.error) {
658                         if (!res)
659                                 res = req->out.h.error;
660                         break;
661                 } else if (nres > nbytes) {
662                         res = -EIO;
663                         break;
664                 }
665                 count -= nres;
666                 res += nres;
667                 pos += nres;
668                 buf += nres;
669                 if (nres != nbytes)
670                         break;
671                 if (count) {
672                         fuse_put_request(fc, req);
673                         req = fuse_get_req(fc);
674                         if (IS_ERR(req))
675                                 break;
676                 }
677         }
678         fuse_put_request(fc, req);
679         if (res > 0) {
680                 if (write) {
681                         spin_lock(&fc->lock);
682                         if (pos > inode->i_size)
683                                 i_size_write(inode, pos);
684                         spin_unlock(&fc->lock);
685                 }
686                 *ppos = pos;
687         }
688         fuse_invalidate_attr(inode);
689
690         return res;
691 }
692
693 static ssize_t fuse_direct_read(struct file *file, char __user *buf,
694                                      size_t count, loff_t *ppos)
695 {
696         return fuse_direct_io(file, buf, count, ppos, 0);
697 }
698
699 static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
700                                  size_t count, loff_t *ppos)
701 {
702         struct inode *inode = file->f_path.dentry->d_inode;
703         ssize_t res;
704         /* Don't allow parallel writes to the same file */
705         mutex_lock(&inode->i_mutex);
706         res = generic_write_checks(file, ppos, &count, 0);
707         if (!res)
708                 res = fuse_direct_io(file, buf, count, ppos, 1);
709         mutex_unlock(&inode->i_mutex);
710         return res;
711 }
712
713 static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
714 {
715         if ((vma->vm_flags & VM_SHARED)) {
716                 if ((vma->vm_flags & VM_WRITE))
717                         return -ENODEV;
718                 else
719                         vma->vm_flags &= ~VM_MAYWRITE;
720         }
721         return generic_file_mmap(file, vma);
722 }
723
724 static int fuse_set_page_dirty(struct page *page)
725 {
726         printk("fuse_set_page_dirty: should not happen\n");
727         dump_stack();
728         return 0;
729 }
730
731 static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
732                                   struct file_lock *fl)
733 {
734         switch (ffl->type) {
735         case F_UNLCK:
736                 break;
737
738         case F_RDLCK:
739         case F_WRLCK:
740                 if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
741                     ffl->end < ffl->start)
742                         return -EIO;
743
744                 fl->fl_start = ffl->start;
745                 fl->fl_end = ffl->end;
746                 fl->fl_pid = ffl->pid;
747                 break;
748
749         default:
750                 return -EIO;
751         }
752         fl->fl_type = ffl->type;
753         return 0;
754 }
755
756 static void fuse_lk_fill(struct fuse_req *req, struct file *file,
757                          const struct file_lock *fl, int opcode, pid_t pid,
758                          int flock)
759 {
760         struct inode *inode = file->f_path.dentry->d_inode;
761         struct fuse_conn *fc = get_fuse_conn(inode);
762         struct fuse_file *ff = file->private_data;
763         struct fuse_lk_in *arg = &req->misc.lk_in;
764
765         arg->fh = ff->fh;
766         arg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
767         arg->lk.start = fl->fl_start;
768         arg->lk.end = fl->fl_end;
769         arg->lk.type = fl->fl_type;
770         arg->lk.pid = pid;
771         if (flock)
772                 arg->lk_flags |= FUSE_LK_FLOCK;
773         req->in.h.opcode = opcode;
774         req->in.h.nodeid = get_node_id(inode);
775         req->in.numargs = 1;
776         req->in.args[0].size = sizeof(*arg);
777         req->in.args[0].value = arg;
778 }
779
780 static int fuse_getlk(struct file *file, struct file_lock *fl)
781 {
782         struct inode *inode = file->f_path.dentry->d_inode;
783         struct fuse_conn *fc = get_fuse_conn(inode);
784         struct fuse_req *req;
785         struct fuse_lk_out outarg;
786         int err;
787
788         req = fuse_get_req(fc);
789         if (IS_ERR(req))
790                 return PTR_ERR(req);
791
792         fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0);
793         req->out.numargs = 1;
794         req->out.args[0].size = sizeof(outarg);
795         req->out.args[0].value = &outarg;
796         request_send(fc, req);
797         err = req->out.h.error;
798         fuse_put_request(fc, req);
799         if (!err)
800                 err = convert_fuse_file_lock(&outarg.lk, fl);
801
802         return err;
803 }
804
805 static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
806 {
807         struct inode *inode = file->f_path.dentry->d_inode;
808         struct fuse_conn *fc = get_fuse_conn(inode);
809         struct fuse_req *req;
810         int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
811         pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
812         int err;
813
814         /* Unlock on close is handled by the flush method */
815         if (fl->fl_flags & FL_CLOSE)
816                 return 0;
817
818         req = fuse_get_req(fc);
819         if (IS_ERR(req))
820                 return PTR_ERR(req);
821
822         fuse_lk_fill(req, file, fl, opcode, pid, flock);
823         request_send(fc, req);
824         err = req->out.h.error;
825         /* locking is restartable */
826         if (err == -EINTR)
827                 err = -ERESTARTSYS;
828         fuse_put_request(fc, req);
829         return err;
830 }
831
832 static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
833 {
834         struct inode *inode = file->f_path.dentry->d_inode;
835         struct fuse_conn *fc = get_fuse_conn(inode);
836         int err;
837
838         if (cmd == F_GETLK) {
839                 if (fc->no_lock) {
840                         posix_test_lock(file, fl);
841                         err = 0;
842                 } else
843                         err = fuse_getlk(file, fl);
844         } else {
845                 if (fc->no_lock)
846                         err = posix_lock_file_wait(file, fl);
847                 else
848                         err = fuse_setlk(file, fl, 0);
849         }
850         return err;
851 }
852
853 static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
854 {
855         struct inode *inode = file->f_path.dentry->d_inode;
856         struct fuse_conn *fc = get_fuse_conn(inode);
857         int err;
858
859         if (fc->no_lock) {
860                 err = flock_lock_file_wait(file, fl);
861         } else {
862                 /* emulate flock with POSIX locks */
863                 fl->fl_owner = (fl_owner_t) file;
864                 err = fuse_setlk(file, fl, 1);
865         }
866
867         return err;
868 }
869
870 static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
871 {
872         struct inode *inode = mapping->host;
873         struct fuse_conn *fc = get_fuse_conn(inode);
874         struct fuse_req *req;
875         struct fuse_bmap_in inarg;
876         struct fuse_bmap_out outarg;
877         int err;
878
879         if (!inode->i_sb->s_bdev || fc->no_bmap)
880                 return 0;
881
882         req = fuse_get_req(fc);
883         if (IS_ERR(req))
884                 return 0;
885
886         memset(&inarg, 0, sizeof(inarg));
887         inarg.block = block;
888         inarg.blocksize = inode->i_sb->s_blocksize;
889         req->in.h.opcode = FUSE_BMAP;
890         req->in.h.nodeid = get_node_id(inode);
891         req->in.numargs = 1;
892         req->in.args[0].size = sizeof(inarg);
893         req->in.args[0].value = &inarg;
894         req->out.numargs = 1;
895         req->out.args[0].size = sizeof(outarg);
896         req->out.args[0].value = &outarg;
897         request_send(fc, req);
898         err = req->out.h.error;
899         fuse_put_request(fc, req);
900         if (err == -ENOSYS)
901                 fc->no_bmap = 1;
902
903         return err ? 0 : outarg.block;
904 }
905
906 static const struct file_operations fuse_file_operations = {
907         .llseek         = generic_file_llseek,
908         .read           = do_sync_read,
909         .aio_read       = fuse_file_aio_read,
910         .write          = do_sync_write,
911         .aio_write      = generic_file_aio_write,
912         .mmap           = fuse_file_mmap,
913         .open           = fuse_open,
914         .flush          = fuse_flush,
915         .release        = fuse_release,
916         .fsync          = fuse_fsync,
917         .lock           = fuse_file_lock,
918         .flock          = fuse_file_flock,
919         .splice_read    = generic_file_splice_read,
920 };
921
922 static const struct file_operations fuse_direct_io_file_operations = {
923         .llseek         = generic_file_llseek,
924         .read           = fuse_direct_read,
925         .write          = fuse_direct_write,
926         .open           = fuse_open,
927         .flush          = fuse_flush,
928         .release        = fuse_release,
929         .fsync          = fuse_fsync,
930         .lock           = fuse_file_lock,
931         .flock          = fuse_file_flock,
932         /* no mmap and splice_read */
933 };
934
935 static const struct address_space_operations fuse_file_aops  = {
936         .readpage       = fuse_readpage,
937         .write_begin    = fuse_write_begin,
938         .write_end      = fuse_write_end,
939         .readpages      = fuse_readpages,
940         .set_page_dirty = fuse_set_page_dirty,
941         .bmap           = fuse_bmap,
942 };
943
944 void fuse_init_file_inode(struct inode *inode)
945 {
946         inode->i_fop = &fuse_file_operations;
947         inode->i_data.a_ops = &fuse_file_aops;
948 }