ceph: don't truncate dirty pages in invalidate work thread
[linux-2.6.git] / fs / ceph / inode.c
1 #include "ceph_debug.h"
2
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/smp_lock.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/vmalloc.h>
13 #include <linux/pagevec.h>
14
15 #include "super.h"
16 #include "decode.h"
17
18 /*
19  * Ceph inode operations
20  *
21  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22  * setattr, etc.), xattr helpers, and helpers for assimilating
23  * metadata returned by the MDS into our cache.
24  *
25  * Also define helpers for doing asynchronous writeback, invalidation,
26  * and truncation for the benefit of those who can't afford to block
27  * (typically because they are in the message handler path).
28  */
29
30 static const struct inode_operations ceph_symlink_iops;
31
32 static void ceph_invalidate_work(struct work_struct *work);
33 static void ceph_writeback_work(struct work_struct *work);
34 static void ceph_vmtruncate_work(struct work_struct *work);
35
36 /*
37  * find or create an inode, given the ceph ino number
38  */
39 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
40 {
41         struct inode *inode;
42         ino_t t = ceph_vino_to_ino(vino);
43
44         inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
45         if (inode == NULL)
46                 return ERR_PTR(-ENOMEM);
47         if (inode->i_state & I_NEW) {
48                 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
49                      inode, ceph_vinop(inode), (u64)inode->i_ino);
50                 unlock_new_inode(inode);
51         }
52
53         dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
54              vino.snap, inode);
55         return inode;
56 }
57
58 /*
59  * get/constuct snapdir inode for a given directory
60  */
61 struct inode *ceph_get_snapdir(struct inode *parent)
62 {
63         struct ceph_vino vino = {
64                 .ino = ceph_ino(parent),
65                 .snap = CEPH_SNAPDIR,
66         };
67         struct inode *inode = ceph_get_inode(parent->i_sb, vino);
68         struct ceph_inode_info *ci = ceph_inode(inode);
69
70         BUG_ON(!S_ISDIR(parent->i_mode));
71         if (IS_ERR(inode))
72                 return ERR_PTR(PTR_ERR(inode));
73         inode->i_mode = parent->i_mode;
74         inode->i_uid = parent->i_uid;
75         inode->i_gid = parent->i_gid;
76         inode->i_op = &ceph_dir_iops;
77         inode->i_fop = &ceph_dir_fops;
78         ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
79         ci->i_rbytes = 0;
80         return inode;
81 }
82
83 const struct inode_operations ceph_file_iops = {
84         .permission = ceph_permission,
85         .setattr = ceph_setattr,
86         .getattr = ceph_getattr,
87         .setxattr = ceph_setxattr,
88         .getxattr = ceph_getxattr,
89         .listxattr = ceph_listxattr,
90         .removexattr = ceph_removexattr,
91 };
92
93
94 /*
95  * We use a 'frag tree' to keep track of the MDS's directory fragments
96  * for a given inode (usually there is just a single fragment).  We
97  * need to know when a child frag is delegated to a new MDS, or when
98  * it is flagged as replicated, so we can direct our requests
99  * accordingly.
100  */
101
102 /*
103  * find/create a frag in the tree
104  */
105 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
106                                                     u32 f)
107 {
108         struct rb_node **p;
109         struct rb_node *parent = NULL;
110         struct ceph_inode_frag *frag;
111         int c;
112
113         p = &ci->i_fragtree.rb_node;
114         while (*p) {
115                 parent = *p;
116                 frag = rb_entry(parent, struct ceph_inode_frag, node);
117                 c = ceph_frag_compare(f, frag->frag);
118                 if (c < 0)
119                         p = &(*p)->rb_left;
120                 else if (c > 0)
121                         p = &(*p)->rb_right;
122                 else
123                         return frag;
124         }
125
126         frag = kmalloc(sizeof(*frag), GFP_NOFS);
127         if (!frag) {
128                 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
129                        "frag %x\n", &ci->vfs_inode,
130                        ceph_vinop(&ci->vfs_inode), f);
131                 return ERR_PTR(-ENOMEM);
132         }
133         frag->frag = f;
134         frag->split_by = 0;
135         frag->mds = -1;
136         frag->ndist = 0;
137
138         rb_link_node(&frag->node, parent, p);
139         rb_insert_color(&frag->node, &ci->i_fragtree);
140
141         dout("get_or_create_frag added %llx.%llx frag %x\n",
142              ceph_vinop(&ci->vfs_inode), f);
143         return frag;
144 }
145
146 /*
147  * find a specific frag @f
148  */
149 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
150 {
151         struct rb_node *n = ci->i_fragtree.rb_node;
152
153         while (n) {
154                 struct ceph_inode_frag *frag =
155                         rb_entry(n, struct ceph_inode_frag, node);
156                 int c = ceph_frag_compare(f, frag->frag);
157                 if (c < 0)
158                         n = n->rb_left;
159                 else if (c > 0)
160                         n = n->rb_right;
161                 else
162                         return frag;
163         }
164         return NULL;
165 }
166
167 /*
168  * Choose frag containing the given value @v.  If @pfrag is
169  * specified, copy the frag delegation info to the caller if
170  * it is present.
171  */
172 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
173                      struct ceph_inode_frag *pfrag,
174                      int *found)
175 {
176         u32 t = ceph_frag_make(0, 0);
177         struct ceph_inode_frag *frag;
178         unsigned nway, i;
179         u32 n;
180
181         if (found)
182                 *found = 0;
183
184         mutex_lock(&ci->i_fragtree_mutex);
185         while (1) {
186                 WARN_ON(!ceph_frag_contains_value(t, v));
187                 frag = __ceph_find_frag(ci, t);
188                 if (!frag)
189                         break; /* t is a leaf */
190                 if (frag->split_by == 0) {
191                         if (pfrag)
192                                 memcpy(pfrag, frag, sizeof(*pfrag));
193                         if (found)
194                                 *found = 1;
195                         break;
196                 }
197
198                 /* choose child */
199                 nway = 1 << frag->split_by;
200                 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
201                      frag->split_by, nway);
202                 for (i = 0; i < nway; i++) {
203                         n = ceph_frag_make_child(t, frag->split_by, i);
204                         if (ceph_frag_contains_value(n, v)) {
205                                 t = n;
206                                 break;
207                         }
208                 }
209                 BUG_ON(i == nway);
210         }
211         dout("choose_frag(%x) = %x\n", v, t);
212
213         mutex_unlock(&ci->i_fragtree_mutex);
214         return t;
215 }
216
217 /*
218  * Process dirfrag (delegation) info from the mds.  Include leaf
219  * fragment in tree ONLY if ndist > 0.  Otherwise, only
220  * branches/splits are included in i_fragtree)
221  */
222 static int ceph_fill_dirfrag(struct inode *inode,
223                              struct ceph_mds_reply_dirfrag *dirinfo)
224 {
225         struct ceph_inode_info *ci = ceph_inode(inode);
226         struct ceph_inode_frag *frag;
227         u32 id = le32_to_cpu(dirinfo->frag);
228         int mds = le32_to_cpu(dirinfo->auth);
229         int ndist = le32_to_cpu(dirinfo->ndist);
230         int i;
231         int err = 0;
232
233         mutex_lock(&ci->i_fragtree_mutex);
234         if (ndist == 0) {
235                 /* no delegation info needed. */
236                 frag = __ceph_find_frag(ci, id);
237                 if (!frag)
238                         goto out;
239                 if (frag->split_by == 0) {
240                         /* tree leaf, remove */
241                         dout("fill_dirfrag removed %llx.%llx frag %x"
242                              " (no ref)\n", ceph_vinop(inode), id);
243                         rb_erase(&frag->node, &ci->i_fragtree);
244                         kfree(frag);
245                 } else {
246                         /* tree branch, keep and clear */
247                         dout("fill_dirfrag cleared %llx.%llx frag %x"
248                              " referral\n", ceph_vinop(inode), id);
249                         frag->mds = -1;
250                         frag->ndist = 0;
251                 }
252                 goto out;
253         }
254
255
256         /* find/add this frag to store mds delegation info */
257         frag = __get_or_create_frag(ci, id);
258         if (IS_ERR(frag)) {
259                 /* this is not the end of the world; we can continue
260                    with bad/inaccurate delegation info */
261                 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
262                        ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
263                 err = -ENOMEM;
264                 goto out;
265         }
266
267         frag->mds = mds;
268         frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
269         for (i = 0; i < frag->ndist; i++)
270                 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
271         dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
272              ceph_vinop(inode), frag->frag, frag->ndist);
273
274 out:
275         mutex_unlock(&ci->i_fragtree_mutex);
276         return err;
277 }
278
279
280 /*
281  * initialize a newly allocated inode.
282  */
283 struct inode *ceph_alloc_inode(struct super_block *sb)
284 {
285         struct ceph_inode_info *ci;
286         int i;
287
288         ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
289         if (!ci)
290                 return NULL;
291
292         dout("alloc_inode %p\n", &ci->vfs_inode);
293
294         ci->i_version = 0;
295         ci->i_time_warp_seq = 0;
296         ci->i_ceph_flags = 0;
297         ci->i_release_count = 0;
298         ci->i_symlink = NULL;
299
300         ci->i_fragtree = RB_ROOT;
301         mutex_init(&ci->i_fragtree_mutex);
302
303         ci->i_xattrs.blob = NULL;
304         ci->i_xattrs.prealloc_blob = NULL;
305         ci->i_xattrs.dirty = false;
306         ci->i_xattrs.index = RB_ROOT;
307         ci->i_xattrs.count = 0;
308         ci->i_xattrs.names_size = 0;
309         ci->i_xattrs.vals_size = 0;
310         ci->i_xattrs.version = 0;
311         ci->i_xattrs.index_version = 0;
312
313         ci->i_caps = RB_ROOT;
314         ci->i_auth_cap = NULL;
315         ci->i_dirty_caps = 0;
316         ci->i_flushing_caps = 0;
317         INIT_LIST_HEAD(&ci->i_dirty_item);
318         INIT_LIST_HEAD(&ci->i_flushing_item);
319         ci->i_cap_flush_seq = 0;
320         ci->i_cap_flush_last_tid = 0;
321         memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
322         init_waitqueue_head(&ci->i_cap_wq);
323         ci->i_hold_caps_min = 0;
324         ci->i_hold_caps_max = 0;
325         INIT_LIST_HEAD(&ci->i_cap_delay_list);
326         ci->i_cap_exporting_mds = 0;
327         ci->i_cap_exporting_mseq = 0;
328         ci->i_cap_exporting_issued = 0;
329         INIT_LIST_HEAD(&ci->i_cap_snaps);
330         ci->i_head_snapc = NULL;
331         ci->i_snap_caps = 0;
332
333         for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
334                 ci->i_nr_by_mode[i] = 0;
335
336         ci->i_truncate_seq = 0;
337         ci->i_truncate_size = 0;
338         ci->i_truncate_pending = 0;
339
340         ci->i_max_size = 0;
341         ci->i_reported_size = 0;
342         ci->i_wanted_max_size = 0;
343         ci->i_requested_max_size = 0;
344
345         ci->i_pin_ref = 0;
346         ci->i_rd_ref = 0;
347         ci->i_rdcache_ref = 0;
348         ci->i_wr_ref = 0;
349         ci->i_wrbuffer_ref = 0;
350         ci->i_wrbuffer_ref_head = 0;
351         ci->i_shared_gen = 0;
352         ci->i_rdcache_gen = 0;
353         ci->i_rdcache_revoking = 0;
354
355         INIT_LIST_HEAD(&ci->i_unsafe_writes);
356         INIT_LIST_HEAD(&ci->i_unsafe_dirops);
357         spin_lock_init(&ci->i_unsafe_lock);
358
359         ci->i_snap_realm = NULL;
360         INIT_LIST_HEAD(&ci->i_snap_realm_item);
361         INIT_LIST_HEAD(&ci->i_snap_flush_item);
362
363         INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
364         INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
365
366         INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
367
368         return &ci->vfs_inode;
369 }
370
371 void ceph_destroy_inode(struct inode *inode)
372 {
373         struct ceph_inode_info *ci = ceph_inode(inode);
374         struct ceph_inode_frag *frag;
375         struct rb_node *n;
376
377         dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
378
379         ceph_queue_caps_release(inode);
380
381         kfree(ci->i_symlink);
382         while ((n = rb_first(&ci->i_fragtree)) != NULL) {
383                 frag = rb_entry(n, struct ceph_inode_frag, node);
384                 rb_erase(n, &ci->i_fragtree);
385                 kfree(frag);
386         }
387
388         __ceph_destroy_xattrs(ci);
389         if (ci->i_xattrs.blob)
390                 ceph_buffer_put(ci->i_xattrs.blob);
391         if (ci->i_xattrs.prealloc_blob)
392                 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
393
394         kmem_cache_free(ceph_inode_cachep, ci);
395 }
396
397
398 /*
399  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
400  * careful because either the client or MDS may have more up to date
401  * info, depending on which capabilities are held, and whether
402  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
403  * and size are monotonically increasing, except when utimes() or
404  * truncate() increments the corresponding _seq values.)
405  */
406 int ceph_fill_file_size(struct inode *inode, int issued,
407                         u32 truncate_seq, u64 truncate_size, u64 size)
408 {
409         struct ceph_inode_info *ci = ceph_inode(inode);
410         int queue_trunc = 0;
411
412         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
413             (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
414                 dout("size %lld -> %llu\n", inode->i_size, size);
415                 inode->i_size = size;
416                 inode->i_blocks = (size + (1<<9) - 1) >> 9;
417                 ci->i_reported_size = size;
418                 if (truncate_seq != ci->i_truncate_seq) {
419                         dout("truncate_seq %u -> %u\n",
420                              ci->i_truncate_seq, truncate_seq);
421                         ci->i_truncate_seq = truncate_seq;
422                         /*
423                          * If we hold relevant caps, or in the case where we're
424                          * not the only client referencing this file and we
425                          * don't hold those caps, then we need to check whether
426                          * the file is either opened or mmaped
427                          */
428                         if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
429                                       CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
430                                       CEPH_CAP_FILE_EXCL)) ||
431                             mapping_mapped(inode->i_mapping) ||
432                             __ceph_caps_file_wanted(ci)) {
433                                 ci->i_truncate_pending++;
434                                 queue_trunc = 1;
435                         }
436                 }
437         }
438         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
439             ci->i_truncate_size != truncate_size) {
440                 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
441                      truncate_size);
442                 ci->i_truncate_size = truncate_size;
443         }
444         return queue_trunc;
445 }
446
447 void ceph_fill_file_time(struct inode *inode, int issued,
448                          u64 time_warp_seq, struct timespec *ctime,
449                          struct timespec *mtime, struct timespec *atime)
450 {
451         struct ceph_inode_info *ci = ceph_inode(inode);
452         int warn = 0;
453
454         if (issued & (CEPH_CAP_FILE_EXCL|
455                       CEPH_CAP_FILE_WR|
456                       CEPH_CAP_FILE_BUFFER)) {
457                 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
458                         dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
459                              inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
460                              ctime->tv_sec, ctime->tv_nsec);
461                         inode->i_ctime = *ctime;
462                 }
463                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
464                         /* the MDS did a utimes() */
465                         dout("mtime %ld.%09ld -> %ld.%09ld "
466                              "tw %d -> %d\n",
467                              inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
468                              mtime->tv_sec, mtime->tv_nsec,
469                              ci->i_time_warp_seq, (int)time_warp_seq);
470
471                         inode->i_mtime = *mtime;
472                         inode->i_atime = *atime;
473                         ci->i_time_warp_seq = time_warp_seq;
474                 } else if (time_warp_seq == ci->i_time_warp_seq) {
475                         /* nobody did utimes(); take the max */
476                         if (timespec_compare(mtime, &inode->i_mtime) > 0) {
477                                 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
478                                      inode->i_mtime.tv_sec,
479                                      inode->i_mtime.tv_nsec,
480                                      mtime->tv_sec, mtime->tv_nsec);
481                                 inode->i_mtime = *mtime;
482                         }
483                         if (timespec_compare(atime, &inode->i_atime) > 0) {
484                                 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
485                                      inode->i_atime.tv_sec,
486                                      inode->i_atime.tv_nsec,
487                                      atime->tv_sec, atime->tv_nsec);
488                                 inode->i_atime = *atime;
489                         }
490                 } else if (issued & CEPH_CAP_FILE_EXCL) {
491                         /* we did a utimes(); ignore mds values */
492                 } else {
493                         warn = 1;
494                 }
495         } else {
496                 /* we have no write caps; whatever the MDS says is true */
497                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
498                         inode->i_ctime = *ctime;
499                         inode->i_mtime = *mtime;
500                         inode->i_atime = *atime;
501                         ci->i_time_warp_seq = time_warp_seq;
502                 } else {
503                         warn = 1;
504                 }
505         }
506         if (warn) /* time_warp_seq shouldn't go backwards */
507                 dout("%p mds time_warp_seq %llu < %u\n",
508                      inode, time_warp_seq, ci->i_time_warp_seq);
509 }
510
511 /*
512  * Populate an inode based on info from mds.  May be called on new or
513  * existing inodes.
514  */
515 static int fill_inode(struct inode *inode,
516                       struct ceph_mds_reply_info_in *iinfo,
517                       struct ceph_mds_reply_dirfrag *dirinfo,
518                       struct ceph_mds_session *session,
519                       unsigned long ttl_from, int cap_fmode,
520                       struct ceph_cap_reservation *caps_reservation)
521 {
522         struct ceph_mds_reply_inode *info = iinfo->in;
523         struct ceph_inode_info *ci = ceph_inode(inode);
524         int i;
525         int issued, implemented;
526         struct timespec mtime, atime, ctime;
527         u32 nsplits;
528         struct ceph_buffer *xattr_blob = NULL;
529         int err = 0;
530         int queue_trunc = 0;
531
532         dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
533              inode, ceph_vinop(inode), le64_to_cpu(info->version),
534              ci->i_version);
535
536         /*
537          * prealloc xattr data, if it looks like we'll need it.  only
538          * if len > 4 (meaning there are actually xattrs; the first 4
539          * bytes are the xattr count).
540          */
541         if (iinfo->xattr_len > 4) {
542                 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
543                 if (!xattr_blob)
544                         pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
545                                iinfo->xattr_len);
546         }
547
548         spin_lock(&inode->i_lock);
549
550         /*
551          * provided version will be odd if inode value is projected,
552          * even if stable.  skip the update if we have a newer info
553          * (e.g., due to inode info racing form multiple MDSs), or if
554          * we are getting projected (unstable) inode info.
555          */
556         if (le64_to_cpu(info->version) > 0 &&
557             (ci->i_version & ~1) > le64_to_cpu(info->version))
558                 goto no_change;
559
560         issued = __ceph_caps_issued(ci, &implemented);
561         issued |= implemented | __ceph_caps_dirty(ci);
562
563         /* update inode */
564         ci->i_version = le64_to_cpu(info->version);
565         inode->i_version++;
566         inode->i_rdev = le32_to_cpu(info->rdev);
567
568         if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
569                 inode->i_mode = le32_to_cpu(info->mode);
570                 inode->i_uid = le32_to_cpu(info->uid);
571                 inode->i_gid = le32_to_cpu(info->gid);
572                 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
573                      inode->i_uid, inode->i_gid);
574         }
575
576         if ((issued & CEPH_CAP_LINK_EXCL) == 0)
577                 inode->i_nlink = le32_to_cpu(info->nlink);
578
579         /* be careful with mtime, atime, size */
580         ceph_decode_timespec(&atime, &info->atime);
581         ceph_decode_timespec(&mtime, &info->mtime);
582         ceph_decode_timespec(&ctime, &info->ctime);
583         queue_trunc = ceph_fill_file_size(inode, issued,
584                                           le32_to_cpu(info->truncate_seq),
585                                           le64_to_cpu(info->truncate_size),
586                                           le64_to_cpu(info->size));
587         ceph_fill_file_time(inode, issued,
588                             le32_to_cpu(info->time_warp_seq),
589                             &ctime, &mtime, &atime);
590
591         ci->i_max_size = le64_to_cpu(info->max_size);
592         ci->i_layout = info->layout;
593         inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
594
595         /* xattrs */
596         /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
597         if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
598             le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
599                 if (ci->i_xattrs.blob)
600                         ceph_buffer_put(ci->i_xattrs.blob);
601                 ci->i_xattrs.blob = xattr_blob;
602                 if (xattr_blob)
603                         memcpy(ci->i_xattrs.blob->vec.iov_base,
604                                iinfo->xattr_data, iinfo->xattr_len);
605                 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
606         }
607
608         inode->i_mapping->a_ops = &ceph_aops;
609         inode->i_mapping->backing_dev_info =
610                 &ceph_client(inode->i_sb)->backing_dev_info;
611
612         switch (inode->i_mode & S_IFMT) {
613         case S_IFIFO:
614         case S_IFBLK:
615         case S_IFCHR:
616         case S_IFSOCK:
617                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
618                 inode->i_op = &ceph_file_iops;
619                 break;
620         case S_IFREG:
621                 inode->i_op = &ceph_file_iops;
622                 inode->i_fop = &ceph_file_fops;
623                 break;
624         case S_IFLNK:
625                 inode->i_op = &ceph_symlink_iops;
626                 if (!ci->i_symlink) {
627                         int symlen = iinfo->symlink_len;
628                         char *sym;
629
630                         BUG_ON(symlen != inode->i_size);
631                         spin_unlock(&inode->i_lock);
632
633                         err = -ENOMEM;
634                         sym = kmalloc(symlen+1, GFP_NOFS);
635                         if (!sym)
636                                 goto out;
637                         memcpy(sym, iinfo->symlink, symlen);
638                         sym[symlen] = 0;
639
640                         spin_lock(&inode->i_lock);
641                         if (!ci->i_symlink)
642                                 ci->i_symlink = sym;
643                         else
644                                 kfree(sym); /* lost a race */
645                 }
646                 break;
647         case S_IFDIR:
648                 inode->i_op = &ceph_dir_iops;
649                 inode->i_fop = &ceph_dir_fops;
650
651                 ci->i_files = le64_to_cpu(info->files);
652                 ci->i_subdirs = le64_to_cpu(info->subdirs);
653                 ci->i_rbytes = le64_to_cpu(info->rbytes);
654                 ci->i_rfiles = le64_to_cpu(info->rfiles);
655                 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
656                 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
657
658                 /* set dir completion flag? */
659                 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
660                     ceph_snap(inode) == CEPH_NOSNAP &&
661                     (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) {
662                         dout(" marking %p complete (empty)\n", inode);
663                         ci->i_ceph_flags |= CEPH_I_COMPLETE;
664                         ci->i_max_offset = 2;
665                 }
666
667                 /* it may be better to set st_size in getattr instead? */
668                 if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES))
669                         inode->i_size = ci->i_rbytes;
670                 break;
671         default:
672                 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
673                        ceph_vinop(inode), inode->i_mode);
674         }
675
676 no_change:
677         spin_unlock(&inode->i_lock);
678
679         /* queue truncate if we saw i_size decrease */
680         if (queue_trunc)
681                 ceph_queue_vmtruncate(inode);
682
683         /* populate frag tree */
684         /* FIXME: move me up, if/when version reflects fragtree changes */
685         nsplits = le32_to_cpu(info->fragtree.nsplits);
686         mutex_lock(&ci->i_fragtree_mutex);
687         for (i = 0; i < nsplits; i++) {
688                 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
689                 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
690
691                 if (IS_ERR(frag))
692                         continue;
693                 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
694                 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
695         }
696         mutex_unlock(&ci->i_fragtree_mutex);
697
698         /* were we issued a capability? */
699         if (info->cap.caps) {
700                 if (ceph_snap(inode) == CEPH_NOSNAP) {
701                         ceph_add_cap(inode, session,
702                                      le64_to_cpu(info->cap.cap_id),
703                                      cap_fmode,
704                                      le32_to_cpu(info->cap.caps),
705                                      le32_to_cpu(info->cap.wanted),
706                                      le32_to_cpu(info->cap.seq),
707                                      le32_to_cpu(info->cap.mseq),
708                                      le64_to_cpu(info->cap.realm),
709                                      info->cap.flags,
710                                      caps_reservation);
711                 } else {
712                         spin_lock(&inode->i_lock);
713                         dout(" %p got snap_caps %s\n", inode,
714                              ceph_cap_string(le32_to_cpu(info->cap.caps)));
715                         ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
716                         if (cap_fmode >= 0)
717                                 __ceph_get_fmode(ci, cap_fmode);
718                         spin_unlock(&inode->i_lock);
719                 }
720         }
721
722         /* update delegation info? */
723         if (dirinfo)
724                 ceph_fill_dirfrag(inode, dirinfo);
725
726         err = 0;
727
728 out:
729         if (xattr_blob)
730                 ceph_buffer_put(xattr_blob);
731         return err;
732 }
733
734 /*
735  * caller should hold session s_mutex.
736  */
737 static void update_dentry_lease(struct dentry *dentry,
738                                 struct ceph_mds_reply_lease *lease,
739                                 struct ceph_mds_session *session,
740                                 unsigned long from_time)
741 {
742         struct ceph_dentry_info *di = ceph_dentry(dentry);
743         long unsigned duration = le32_to_cpu(lease->duration_ms);
744         long unsigned ttl = from_time + (duration * HZ) / 1000;
745         long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
746         struct inode *dir;
747
748         /* only track leases on regular dentries */
749         if (dentry->d_op != &ceph_dentry_ops)
750                 return;
751
752         spin_lock(&dentry->d_lock);
753         dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
754              dentry, le16_to_cpu(lease->mask), duration, ttl);
755
756         /* make lease_rdcache_gen match directory */
757         dir = dentry->d_parent->d_inode;
758         di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
759
760         if (lease->mask == 0)
761                 goto out_unlock;
762
763         if (di->lease_gen == session->s_cap_gen &&
764             time_before(ttl, dentry->d_time))
765                 goto out_unlock;  /* we already have a newer lease. */
766
767         if (di->lease_session && di->lease_session != session)
768                 goto out_unlock;
769
770         ceph_dentry_lru_touch(dentry);
771
772         if (!di->lease_session)
773                 di->lease_session = ceph_get_mds_session(session);
774         di->lease_gen = session->s_cap_gen;
775         di->lease_seq = le32_to_cpu(lease->seq);
776         di->lease_renew_after = half_ttl;
777         di->lease_renew_from = 0;
778         dentry->d_time = ttl;
779 out_unlock:
780         spin_unlock(&dentry->d_lock);
781         return;
782 }
783
784 /*
785  * splice a dentry to an inode.
786  * caller must hold directory i_mutex for this to be safe.
787  *
788  * we will only rehash the resulting dentry if @prehash is
789  * true; @prehash will be set to false (for the benefit of
790  * the caller) if we fail.
791  */
792 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
793                                     bool *prehash)
794 {
795         struct dentry *realdn;
796
797         /* dn must be unhashed */
798         if (!d_unhashed(dn))
799                 d_drop(dn);
800         realdn = d_materialise_unique(dn, in);
801         if (IS_ERR(realdn)) {
802                 pr_err("splice_dentry error %p inode %p ino %llx.%llx\n",
803                        dn, in, ceph_vinop(in));
804                 if (prehash)
805                         *prehash = false; /* don't rehash on error */
806                 dn = realdn; /* note realdn contains the error */
807                 goto out;
808         } else if (realdn) {
809                 dout("dn %p (%d) spliced with %p (%d) "
810                      "inode %p ino %llx.%llx\n",
811                      dn, atomic_read(&dn->d_count),
812                      realdn, atomic_read(&realdn->d_count),
813                      realdn->d_inode, ceph_vinop(realdn->d_inode));
814                 dput(dn);
815                 dn = realdn;
816         } else {
817                 BUG_ON(!ceph_dentry(dn));
818
819                 dout("dn %p attached to %p ino %llx.%llx\n",
820                      dn, dn->d_inode, ceph_vinop(dn->d_inode));
821         }
822         if ((!prehash || *prehash) && d_unhashed(dn))
823                 d_rehash(dn);
824 out:
825         return dn;
826 }
827
828 /*
829  * Set dentry's directory position based on the current dir's max, and
830  * order it in d_subdirs, so that dcache_readdir behaves.
831  */
832 static void ceph_set_dentry_offset(struct dentry *dn)
833 {
834         struct dentry *dir = dn->d_parent;
835         struct inode *inode = dn->d_parent->d_inode;
836         struct ceph_dentry_info *di;
837
838         BUG_ON(!inode);
839
840         di = ceph_dentry(dn);
841
842         spin_lock(&inode->i_lock);
843         di->offset = ceph_inode(inode)->i_max_offset++;
844         spin_unlock(&inode->i_lock);
845
846         spin_lock(&dcache_lock);
847         spin_lock(&dn->d_lock);
848         list_move_tail(&dir->d_subdirs, &dn->d_u.d_child);
849         dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
850              dn->d_u.d_child.prev, dn->d_u.d_child.next);
851         spin_unlock(&dn->d_lock);
852         spin_unlock(&dcache_lock);
853 }
854
855 /*
856  * Incorporate results into the local cache.  This is either just
857  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
858  * after a lookup).
859  *
860  * A reply may contain
861  *         a directory inode along with a dentry.
862  *  and/or a target inode
863  *
864  * Called with snap_rwsem (read).
865  */
866 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
867                     struct ceph_mds_session *session)
868 {
869         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
870         struct inode *in = NULL;
871         struct ceph_mds_reply_inode *ininfo;
872         struct ceph_vino vino;
873         int i = 0;
874         int err = 0;
875
876         dout("fill_trace %p is_dentry %d is_target %d\n", req,
877              rinfo->head->is_dentry, rinfo->head->is_target);
878
879 #if 0
880         /*
881          * Debugging hook:
882          *
883          * If we resend completed ops to a recovering mds, we get no
884          * trace.  Since that is very rare, pretend this is the case
885          * to ensure the 'no trace' handlers in the callers behave.
886          *
887          * Fill in inodes unconditionally to avoid breaking cap
888          * invariants.
889          */
890         if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
891                 pr_info("fill_trace faking empty trace on %lld %s\n",
892                         req->r_tid, ceph_mds_op_name(rinfo->head->op));
893                 if (rinfo->head->is_dentry) {
894                         rinfo->head->is_dentry = 0;
895                         err = fill_inode(req->r_locked_dir,
896                                          &rinfo->diri, rinfo->dirfrag,
897                                          session, req->r_request_started, -1);
898                 }
899                 if (rinfo->head->is_target) {
900                         rinfo->head->is_target = 0;
901                         ininfo = rinfo->targeti.in;
902                         vino.ino = le64_to_cpu(ininfo->ino);
903                         vino.snap = le64_to_cpu(ininfo->snapid);
904                         in = ceph_get_inode(sb, vino);
905                         err = fill_inode(in, &rinfo->targeti, NULL,
906                                          session, req->r_request_started,
907                                          req->r_fmode);
908                         iput(in);
909                 }
910         }
911 #endif
912
913         if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
914                 dout("fill_trace reply is empty!\n");
915                 if (rinfo->head->result == 0 && req->r_locked_dir) {
916                         struct ceph_inode_info *ci =
917                                 ceph_inode(req->r_locked_dir);
918                         dout(" clearing %p complete (empty trace)\n",
919                              req->r_locked_dir);
920                         ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
921                         ci->i_release_count++;
922                 }
923                 return 0;
924         }
925
926         if (rinfo->head->is_dentry) {
927                 struct inode *dir = req->r_locked_dir;
928
929                 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
930                                  session, req->r_request_started, -1,
931                                  &req->r_caps_reservation);
932                 if (err < 0)
933                         return err;
934         }
935
936         if (rinfo->head->is_dentry && !req->r_aborted) {
937                 /*
938                  * lookup link rename   : null -> possibly existing inode
939                  * mknod symlink mkdir  : null -> new inode
940                  * unlink               : linked -> null
941                  */
942                 struct inode *dir = req->r_locked_dir;
943                 struct dentry *dn = req->r_dentry;
944                 bool have_dir_cap, have_lease;
945
946                 BUG_ON(!dn);
947                 BUG_ON(!dir);
948                 BUG_ON(dn->d_parent->d_inode != dir);
949                 BUG_ON(ceph_ino(dir) !=
950                        le64_to_cpu(rinfo->diri.in->ino));
951                 BUG_ON(ceph_snap(dir) !=
952                        le64_to_cpu(rinfo->diri.in->snapid));
953
954                 /* do we have a lease on the whole dir? */
955                 have_dir_cap =
956                         (le32_to_cpu(rinfo->diri.in->cap.caps) &
957                          CEPH_CAP_FILE_SHARED);
958
959                 /* do we have a dn lease? */
960                 have_lease = have_dir_cap ||
961                         (le16_to_cpu(rinfo->dlease->mask) &
962                          CEPH_LOCK_DN);
963
964                 if (!have_lease)
965                         dout("fill_trace  no dentry lease or dir cap\n");
966
967                 /* rename? */
968                 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
969                         dout(" src %p '%.*s' dst %p '%.*s'\n",
970                              req->r_old_dentry,
971                              req->r_old_dentry->d_name.len,
972                              req->r_old_dentry->d_name.name,
973                              dn, dn->d_name.len, dn->d_name.name);
974                         dout("fill_trace doing d_move %p -> %p\n",
975                              req->r_old_dentry, dn);
976                         d_move(req->r_old_dentry, dn);
977                         dout(" src %p '%.*s' dst %p '%.*s'\n",
978                              req->r_old_dentry,
979                              req->r_old_dentry->d_name.len,
980                              req->r_old_dentry->d_name.name,
981                              dn, dn->d_name.len, dn->d_name.name);
982                         /* ensure target dentry is invalidated, despite
983                            rehashing bug in vfs_rename_dir */
984                         dn->d_time = jiffies;
985                         ceph_dentry(dn)->lease_shared_gen = 0;
986                         /* take overwritten dentry's readdir offset */
987                         ceph_dentry(req->r_old_dentry)->offset =
988                                 ceph_dentry(dn)->offset;
989                         dn = req->r_old_dentry;  /* use old_dentry */
990                         in = dn->d_inode;
991                 }
992
993                 /* null dentry? */
994                 if (!rinfo->head->is_target) {
995                         dout("fill_trace null dentry\n");
996                         if (dn->d_inode) {
997                                 dout("d_delete %p\n", dn);
998                                 d_delete(dn);
999                         } else {
1000                                 dout("d_instantiate %p NULL\n", dn);
1001                                 d_instantiate(dn, NULL);
1002                                 if (have_lease && d_unhashed(dn))
1003                                         d_rehash(dn);
1004                                 update_dentry_lease(dn, rinfo->dlease,
1005                                                     session,
1006                                                     req->r_request_started);
1007                         }
1008                         goto done;
1009                 }
1010
1011                 /* attach proper inode */
1012                 ininfo = rinfo->targeti.in;
1013                 vino.ino = le64_to_cpu(ininfo->ino);
1014                 vino.snap = le64_to_cpu(ininfo->snapid);
1015                 if (!dn->d_inode) {
1016                         in = ceph_get_inode(sb, vino);
1017                         if (IS_ERR(in)) {
1018                                 pr_err("fill_trace bad get_inode "
1019                                        "%llx.%llx\n", vino.ino, vino.snap);
1020                                 err = PTR_ERR(in);
1021                                 d_delete(dn);
1022                                 goto done;
1023                         }
1024                         dn = splice_dentry(dn, in, &have_lease);
1025                         if (IS_ERR(dn)) {
1026                                 err = PTR_ERR(dn);
1027                                 goto done;
1028                         }
1029                         req->r_dentry = dn;  /* may have spliced */
1030                         ceph_set_dentry_offset(dn);
1031                         igrab(in);
1032                 } else if (ceph_ino(in) == vino.ino &&
1033                            ceph_snap(in) == vino.snap) {
1034                         igrab(in);
1035                 } else {
1036                         dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1037                              dn, in, ceph_ino(in), ceph_snap(in),
1038                              vino.ino, vino.snap);
1039                         have_lease = false;
1040                         in = NULL;
1041                 }
1042
1043                 if (have_lease)
1044                         update_dentry_lease(dn, rinfo->dlease, session,
1045                                             req->r_request_started);
1046                 dout(" final dn %p\n", dn);
1047                 i++;
1048         } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1049                    req->r_op == CEPH_MDS_OP_MKSNAP) {
1050                 struct dentry *dn = req->r_dentry;
1051
1052                 /* fill out a snapdir LOOKUPSNAP dentry */
1053                 BUG_ON(!dn);
1054                 BUG_ON(!req->r_locked_dir);
1055                 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1056                 ininfo = rinfo->targeti.in;
1057                 vino.ino = le64_to_cpu(ininfo->ino);
1058                 vino.snap = le64_to_cpu(ininfo->snapid);
1059                 in = ceph_get_inode(sb, vino);
1060                 if (IS_ERR(in)) {
1061                         pr_err("fill_inode get_inode badness %llx.%llx\n",
1062                                vino.ino, vino.snap);
1063                         err = PTR_ERR(in);
1064                         d_delete(dn);
1065                         goto done;
1066                 }
1067                 dout(" linking snapped dir %p to dn %p\n", in, dn);
1068                 dn = splice_dentry(dn, in, NULL);
1069                 if (IS_ERR(dn)) {
1070                         err = PTR_ERR(dn);
1071                         goto done;
1072                 }
1073                 ceph_set_dentry_offset(dn);
1074                 req->r_dentry = dn;  /* may have spliced */
1075                 igrab(in);
1076                 rinfo->head->is_dentry = 1;  /* fool notrace handlers */
1077         }
1078
1079         if (rinfo->head->is_target) {
1080                 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1081                 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1082
1083                 if (in == NULL || ceph_ino(in) != vino.ino ||
1084                     ceph_snap(in) != vino.snap) {
1085                         in = ceph_get_inode(sb, vino);
1086                         if (IS_ERR(in)) {
1087                                 err = PTR_ERR(in);
1088                                 goto done;
1089                         }
1090                 }
1091                 req->r_target_inode = in;
1092
1093                 err = fill_inode(in,
1094                                  &rinfo->targeti, NULL,
1095                                  session, req->r_request_started,
1096                                  (le32_to_cpu(rinfo->head->result) == 0) ?
1097                                  req->r_fmode : -1,
1098                                  &req->r_caps_reservation);
1099                 if (err < 0) {
1100                         pr_err("fill_inode badness %p %llx.%llx\n",
1101                                in, ceph_vinop(in));
1102                         goto done;
1103                 }
1104         }
1105
1106 done:
1107         dout("fill_trace done err=%d\n", err);
1108         return err;
1109 }
1110
1111 /*
1112  * Prepopulate our cache with readdir results, leases, etc.
1113  */
1114 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1115                              struct ceph_mds_session *session)
1116 {
1117         struct dentry *parent = req->r_dentry;
1118         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1119         struct qstr dname;
1120         struct dentry *dn;
1121         struct inode *in;
1122         int err = 0, i;
1123         struct inode *snapdir = NULL;
1124         struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1125         u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1126         struct ceph_dentry_info *di;
1127
1128         if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1129                 snapdir = ceph_get_snapdir(parent->d_inode);
1130                 parent = d_find_alias(snapdir);
1131                 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1132                      rinfo->dir_nr, parent);
1133         } else {
1134                 dout("readdir_prepopulate %d items under dn %p\n",
1135                      rinfo->dir_nr, parent);
1136                 if (rinfo->dir_dir)
1137                         ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1138         }
1139
1140         for (i = 0; i < rinfo->dir_nr; i++) {
1141                 struct ceph_vino vino;
1142
1143                 dname.name = rinfo->dir_dname[i];
1144                 dname.len = rinfo->dir_dname_len[i];
1145                 dname.hash = full_name_hash(dname.name, dname.len);
1146
1147                 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1148                 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1149
1150 retry_lookup:
1151                 dn = d_lookup(parent, &dname);
1152                 dout("d_lookup on parent=%p name=%.*s got %p\n",
1153                      parent, dname.len, dname.name, dn);
1154
1155                 if (!dn) {
1156                         dn = d_alloc(parent, &dname);
1157                         dout("d_alloc %p '%.*s' = %p\n", parent,
1158                              dname.len, dname.name, dn);
1159                         if (dn == NULL) {
1160                                 dout("d_alloc badness\n");
1161                                 err = -ENOMEM;
1162                                 goto out;
1163                         }
1164                         err = ceph_init_dentry(dn);
1165                         if (err < 0)
1166                                 goto out;
1167                 } else if (dn->d_inode &&
1168                            (ceph_ino(dn->d_inode) != vino.ino ||
1169                             ceph_snap(dn->d_inode) != vino.snap)) {
1170                         dout(" dn %p points to wrong inode %p\n",
1171                              dn, dn->d_inode);
1172                         d_delete(dn);
1173                         dput(dn);
1174                         goto retry_lookup;
1175                 } else {
1176                         /* reorder parent's d_subdirs */
1177                         spin_lock(&dcache_lock);
1178                         spin_lock(&dn->d_lock);
1179                         list_move(&dn->d_u.d_child, &parent->d_subdirs);
1180                         spin_unlock(&dn->d_lock);
1181                         spin_unlock(&dcache_lock);
1182                 }
1183
1184                 di = dn->d_fsdata;
1185                 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1186
1187                 /* inode */
1188                 if (dn->d_inode) {
1189                         in = dn->d_inode;
1190                 } else {
1191                         in = ceph_get_inode(parent->d_sb, vino);
1192                         if (in == NULL) {
1193                                 dout("new_inode badness\n");
1194                                 d_delete(dn);
1195                                 dput(dn);
1196                                 err = -ENOMEM;
1197                                 goto out;
1198                         }
1199                         dn = splice_dentry(dn, in, NULL);
1200                 }
1201
1202                 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1203                                req->r_request_started, -1,
1204                                &req->r_caps_reservation) < 0) {
1205                         pr_err("fill_inode badness on %p\n", in);
1206                         dput(dn);
1207                         continue;
1208                 }
1209                 update_dentry_lease(dn, rinfo->dir_dlease[i],
1210                                     req->r_session, req->r_request_started);
1211                 dput(dn);
1212         }
1213         req->r_did_prepopulate = true;
1214
1215 out:
1216         if (snapdir) {
1217                 iput(snapdir);
1218                 dput(parent);
1219         }
1220         dout("readdir_prepopulate done\n");
1221         return err;
1222 }
1223
1224 int ceph_inode_set_size(struct inode *inode, loff_t size)
1225 {
1226         struct ceph_inode_info *ci = ceph_inode(inode);
1227         int ret = 0;
1228
1229         spin_lock(&inode->i_lock);
1230         dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1231         inode->i_size = size;
1232         inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1233
1234         /* tell the MDS if we are approaching max_size */
1235         if ((size << 1) >= ci->i_max_size &&
1236             (ci->i_reported_size << 1) < ci->i_max_size)
1237                 ret = 1;
1238
1239         spin_unlock(&inode->i_lock);
1240         return ret;
1241 }
1242
1243 /*
1244  * Write back inode data in a worker thread.  (This can't be done
1245  * in the message handler context.)
1246  */
1247 void ceph_queue_writeback(struct inode *inode)
1248 {
1249         if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1250                        &ceph_inode(inode)->i_wb_work)) {
1251                 dout("ceph_queue_writeback %p\n", inode);
1252                 igrab(inode);
1253         } else {
1254                 dout("ceph_queue_writeback %p failed\n", inode);
1255         }
1256 }
1257
1258 static void ceph_writeback_work(struct work_struct *work)
1259 {
1260         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1261                                                   i_wb_work);
1262         struct inode *inode = &ci->vfs_inode;
1263
1264         dout("writeback %p\n", inode);
1265         filemap_fdatawrite(&inode->i_data);
1266         iput(inode);
1267 }
1268
1269 /*
1270  * queue an async invalidation
1271  */
1272 void ceph_queue_invalidate(struct inode *inode)
1273 {
1274         if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1275                        &ceph_inode(inode)->i_pg_inv_work)) {
1276                 dout("ceph_queue_invalidate %p\n", inode);
1277                 igrab(inode);
1278         } else {
1279                 dout("ceph_queue_invalidate %p failed\n", inode);
1280         }
1281 }
1282
1283 /*
1284  * invalidate any pages that are not dirty or under writeback.  this
1285  * includes pages that are clean and mapped.
1286  */
1287 static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1288 {
1289         struct pagevec pvec;
1290         pgoff_t next = 0;
1291         int i;
1292
1293         pagevec_init(&pvec, 0);
1294         while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1295                 for (i = 0; i < pagevec_count(&pvec); i++) {
1296                         struct page *page = pvec.pages[i];
1297                         pgoff_t index;
1298                         int skip_page =
1299                                 (PageDirty(page) || PageWriteback(page));
1300
1301                         if (!skip_page)
1302                                 skip_page = !trylock_page(page);
1303
1304                         /*
1305                          * We really shouldn't be looking at the ->index of an
1306                          * unlocked page.  But we're not allowed to lock these
1307                          * pages.  So we rely upon nobody altering the ->index
1308                          * of this (pinned-by-us) page.
1309                          */
1310                         index = page->index;
1311                         if (index > next)
1312                                 next = index;
1313                         next++;
1314
1315                         if (skip_page)
1316                                 continue;
1317
1318                         generic_error_remove_page(mapping, page);
1319                         unlock_page(page);
1320                 }
1321                 pagevec_release(&pvec);
1322                 cond_resched();
1323         }
1324 }
1325
1326 /*
1327  * Invalidate inode pages in a worker thread.  (This can't be done
1328  * in the message handler context.)
1329  */
1330 static void ceph_invalidate_work(struct work_struct *work)
1331 {
1332         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1333                                                   i_pg_inv_work);
1334         struct inode *inode = &ci->vfs_inode;
1335         u32 orig_gen;
1336         int check = 0;
1337
1338         spin_lock(&inode->i_lock);
1339         dout("invalidate_pages %p gen %d revoking %d\n", inode,
1340              ci->i_rdcache_gen, ci->i_rdcache_revoking);
1341         if (ci->i_rdcache_gen == 0 ||
1342             ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1343                 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen);
1344                 /* nevermind! */
1345                 ci->i_rdcache_revoking = 0;
1346                 spin_unlock(&inode->i_lock);
1347                 goto out;
1348         }
1349         orig_gen = ci->i_rdcache_gen;
1350         spin_unlock(&inode->i_lock);
1351
1352         ceph_invalidate_nondirty_pages(inode->i_mapping);
1353
1354         spin_lock(&inode->i_lock);
1355         if (orig_gen == ci->i_rdcache_gen) {
1356                 dout("invalidate_pages %p gen %d successful\n", inode,
1357                      ci->i_rdcache_gen);
1358                 ci->i_rdcache_gen = 0;
1359                 ci->i_rdcache_revoking = 0;
1360                 check = 1;
1361         } else {
1362                 dout("invalidate_pages %p gen %d raced, gen now %d\n",
1363                      inode, orig_gen, ci->i_rdcache_gen);
1364         }
1365         spin_unlock(&inode->i_lock);
1366
1367         if (check)
1368                 ceph_check_caps(ci, 0, NULL);
1369 out:
1370         iput(inode);
1371 }
1372
1373
1374 /*
1375  * called by trunc_wq; take i_mutex ourselves
1376  *
1377  * We also truncate in a separate thread as well.
1378  */
1379 static void ceph_vmtruncate_work(struct work_struct *work)
1380 {
1381         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1382                                                   i_vmtruncate_work);
1383         struct inode *inode = &ci->vfs_inode;
1384
1385         dout("vmtruncate_work %p\n", inode);
1386         mutex_lock(&inode->i_mutex);
1387         __ceph_do_pending_vmtruncate(inode);
1388         mutex_unlock(&inode->i_mutex);
1389         iput(inode);
1390 }
1391
1392 /*
1393  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1394  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1395  */
1396 void ceph_queue_vmtruncate(struct inode *inode)
1397 {
1398         struct ceph_inode_info *ci = ceph_inode(inode);
1399
1400         if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
1401                        &ci->i_vmtruncate_work)) {
1402                 dout("ceph_queue_vmtruncate %p\n", inode);
1403                 igrab(inode);
1404         } else {
1405                 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1406                      inode, ci->i_truncate_pending);
1407         }
1408 }
1409
1410 /*
1411  * called with i_mutex held.
1412  *
1413  * Make sure any pending truncation is applied before doing anything
1414  * that may depend on it.
1415  */
1416 void __ceph_do_pending_vmtruncate(struct inode *inode)
1417 {
1418         struct ceph_inode_info *ci = ceph_inode(inode);
1419         u64 to;
1420         int wrbuffer_refs, wake = 0;
1421
1422 retry:
1423         spin_lock(&inode->i_lock);
1424         if (ci->i_truncate_pending == 0) {
1425                 dout("__do_pending_vmtruncate %p none pending\n", inode);
1426                 spin_unlock(&inode->i_lock);
1427                 return;
1428         }
1429
1430         /*
1431          * make sure any dirty snapped pages are flushed before we
1432          * possibly truncate them.. so write AND block!
1433          */
1434         if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1435                 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1436                      inode);
1437                 spin_unlock(&inode->i_lock);
1438                 filemap_write_and_wait_range(&inode->i_data, 0,
1439                                              inode->i_sb->s_maxbytes);
1440                 goto retry;
1441         }
1442
1443         to = ci->i_truncate_size;
1444         wrbuffer_refs = ci->i_wrbuffer_ref;
1445         dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1446              ci->i_truncate_pending, to);
1447         spin_unlock(&inode->i_lock);
1448
1449         truncate_inode_pages(inode->i_mapping, to);
1450
1451         spin_lock(&inode->i_lock);
1452         ci->i_truncate_pending--;
1453         if (ci->i_truncate_pending == 0)
1454                 wake = 1;
1455         spin_unlock(&inode->i_lock);
1456
1457         if (wrbuffer_refs == 0)
1458                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1459         if (wake)
1460                 wake_up(&ci->i_cap_wq);
1461 }
1462
1463
1464 /*
1465  * symlinks
1466  */
1467 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1468 {
1469         struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1470         nd_set_link(nd, ci->i_symlink);
1471         return NULL;
1472 }
1473
1474 static const struct inode_operations ceph_symlink_iops = {
1475         .readlink = generic_readlink,
1476         .follow_link = ceph_sym_follow_link,
1477 };
1478
1479 /*
1480  * setattr
1481  */
1482 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1483 {
1484         struct inode *inode = dentry->d_inode;
1485         struct ceph_inode_info *ci = ceph_inode(inode);
1486         struct inode *parent_inode = dentry->d_parent->d_inode;
1487         const unsigned int ia_valid = attr->ia_valid;
1488         struct ceph_mds_request *req;
1489         struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc;
1490         int issued;
1491         int release = 0, dirtied = 0;
1492         int mask = 0;
1493         int err = 0;
1494
1495         if (ceph_snap(inode) != CEPH_NOSNAP)
1496                 return -EROFS;
1497
1498         __ceph_do_pending_vmtruncate(inode);
1499
1500         err = inode_change_ok(inode, attr);
1501         if (err != 0)
1502                 return err;
1503
1504         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1505                                        USE_AUTH_MDS);
1506         if (IS_ERR(req))
1507                 return PTR_ERR(req);
1508
1509         spin_lock(&inode->i_lock);
1510         issued = __ceph_caps_issued(ci, NULL);
1511         dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1512
1513         if (ia_valid & ATTR_UID) {
1514                 dout("setattr %p uid %d -> %d\n", inode,
1515                      inode->i_uid, attr->ia_uid);
1516                 if (issued & CEPH_CAP_AUTH_EXCL) {
1517                         inode->i_uid = attr->ia_uid;
1518                         dirtied |= CEPH_CAP_AUTH_EXCL;
1519                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1520                            attr->ia_uid != inode->i_uid) {
1521                         req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1522                         mask |= CEPH_SETATTR_UID;
1523                         release |= CEPH_CAP_AUTH_SHARED;
1524                 }
1525         }
1526         if (ia_valid & ATTR_GID) {
1527                 dout("setattr %p gid %d -> %d\n", inode,
1528                      inode->i_gid, attr->ia_gid);
1529                 if (issued & CEPH_CAP_AUTH_EXCL) {
1530                         inode->i_gid = attr->ia_gid;
1531                         dirtied |= CEPH_CAP_AUTH_EXCL;
1532                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1533                            attr->ia_gid != inode->i_gid) {
1534                         req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1535                         mask |= CEPH_SETATTR_GID;
1536                         release |= CEPH_CAP_AUTH_SHARED;
1537                 }
1538         }
1539         if (ia_valid & ATTR_MODE) {
1540                 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1541                      attr->ia_mode);
1542                 if (issued & CEPH_CAP_AUTH_EXCL) {
1543                         inode->i_mode = attr->ia_mode;
1544                         dirtied |= CEPH_CAP_AUTH_EXCL;
1545                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1546                            attr->ia_mode != inode->i_mode) {
1547                         req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1548                         mask |= CEPH_SETATTR_MODE;
1549                         release |= CEPH_CAP_AUTH_SHARED;
1550                 }
1551         }
1552
1553         if (ia_valid & ATTR_ATIME) {
1554                 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1555                      inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1556                      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1557                 if (issued & CEPH_CAP_FILE_EXCL) {
1558                         ci->i_time_warp_seq++;
1559                         inode->i_atime = attr->ia_atime;
1560                         dirtied |= CEPH_CAP_FILE_EXCL;
1561                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1562                            timespec_compare(&inode->i_atime,
1563                                             &attr->ia_atime) < 0) {
1564                         inode->i_atime = attr->ia_atime;
1565                         dirtied |= CEPH_CAP_FILE_WR;
1566                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1567                            !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1568                         ceph_encode_timespec(&req->r_args.setattr.atime,
1569                                              &attr->ia_atime);
1570                         mask |= CEPH_SETATTR_ATIME;
1571                         release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1572                                 CEPH_CAP_FILE_WR;
1573                 }
1574         }
1575         if (ia_valid & ATTR_MTIME) {
1576                 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1577                      inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1578                      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1579                 if (issued & CEPH_CAP_FILE_EXCL) {
1580                         ci->i_time_warp_seq++;
1581                         inode->i_mtime = attr->ia_mtime;
1582                         dirtied |= CEPH_CAP_FILE_EXCL;
1583                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1584                            timespec_compare(&inode->i_mtime,
1585                                             &attr->ia_mtime) < 0) {
1586                         inode->i_mtime = attr->ia_mtime;
1587                         dirtied |= CEPH_CAP_FILE_WR;
1588                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1589                            !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1590                         ceph_encode_timespec(&req->r_args.setattr.mtime,
1591                                              &attr->ia_mtime);
1592                         mask |= CEPH_SETATTR_MTIME;
1593                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1594                                 CEPH_CAP_FILE_WR;
1595                 }
1596         }
1597         if (ia_valid & ATTR_SIZE) {
1598                 dout("setattr %p size %lld -> %lld\n", inode,
1599                      inode->i_size, attr->ia_size);
1600                 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1601                         err = -EINVAL;
1602                         goto out;
1603                 }
1604                 if ((issued & CEPH_CAP_FILE_EXCL) &&
1605                     attr->ia_size > inode->i_size) {
1606                         inode->i_size = attr->ia_size;
1607                         inode->i_blocks =
1608                                 (attr->ia_size + (1 << 9) - 1) >> 9;
1609                         inode->i_ctime = attr->ia_ctime;
1610                         ci->i_reported_size = attr->ia_size;
1611                         dirtied |= CEPH_CAP_FILE_EXCL;
1612                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1613                            attr->ia_size != inode->i_size) {
1614                         req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1615                         req->r_args.setattr.old_size =
1616                                 cpu_to_le64(inode->i_size);
1617                         mask |= CEPH_SETATTR_SIZE;
1618                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1619                                 CEPH_CAP_FILE_WR;
1620                 }
1621         }
1622
1623         /* these do nothing */
1624         if (ia_valid & ATTR_CTIME) {
1625                 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1626                                          ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1627                 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1628                      inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1629                      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1630                      only ? "ctime only" : "ignored");
1631                 inode->i_ctime = attr->ia_ctime;
1632                 if (only) {
1633                         /*
1634                          * if kernel wants to dirty ctime but nothing else,
1635                          * we need to choose a cap to dirty under, or do
1636                          * a almost-no-op setattr
1637                          */
1638                         if (issued & CEPH_CAP_AUTH_EXCL)
1639                                 dirtied |= CEPH_CAP_AUTH_EXCL;
1640                         else if (issued & CEPH_CAP_FILE_EXCL)
1641                                 dirtied |= CEPH_CAP_FILE_EXCL;
1642                         else if (issued & CEPH_CAP_XATTR_EXCL)
1643                                 dirtied |= CEPH_CAP_XATTR_EXCL;
1644                         else
1645                                 mask |= CEPH_SETATTR_CTIME;
1646                 }
1647         }
1648         if (ia_valid & ATTR_FILE)
1649                 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1650
1651         if (dirtied) {
1652                 __ceph_mark_dirty_caps(ci, dirtied);
1653                 inode->i_ctime = CURRENT_TIME;
1654         }
1655
1656         release &= issued;
1657         spin_unlock(&inode->i_lock);
1658
1659         if (mask) {
1660                 req->r_inode = igrab(inode);
1661                 req->r_inode_drop = release;
1662                 req->r_args.setattr.mask = cpu_to_le32(mask);
1663                 req->r_num_caps = 1;
1664                 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1665         }
1666         dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1667              ceph_cap_string(dirtied), mask);
1668
1669         ceph_mdsc_put_request(req);
1670         __ceph_do_pending_vmtruncate(inode);
1671         return err;
1672 out:
1673         spin_unlock(&inode->i_lock);
1674         ceph_mdsc_put_request(req);
1675         return err;
1676 }
1677
1678 /*
1679  * Verify that we have a lease on the given mask.  If not,
1680  * do a getattr against an mds.
1681  */
1682 int ceph_do_getattr(struct inode *inode, int mask)
1683 {
1684         struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
1685         struct ceph_mds_client *mdsc = &client->mdsc;
1686         struct ceph_mds_request *req;
1687         int err;
1688
1689         if (ceph_snap(inode) == CEPH_SNAPDIR) {
1690                 dout("do_getattr inode %p SNAPDIR\n", inode);
1691                 return 0;
1692         }
1693
1694         dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
1695         if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1696                 return 0;
1697
1698         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1699         if (IS_ERR(req))
1700                 return PTR_ERR(req);
1701         req->r_inode = igrab(inode);
1702         req->r_num_caps = 1;
1703         req->r_args.getattr.mask = cpu_to_le32(mask);
1704         err = ceph_mdsc_do_request(mdsc, NULL, req);
1705         ceph_mdsc_put_request(req);
1706         dout("do_getattr result=%d\n", err);
1707         return err;
1708 }
1709
1710
1711 /*
1712  * Check inode permissions.  We verify we have a valid value for
1713  * the AUTH cap, then call the generic handler.
1714  */
1715 int ceph_permission(struct inode *inode, int mask)
1716 {
1717         int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1718
1719         if (!err)
1720                 err = generic_permission(inode, mask, NULL);
1721         return err;
1722 }
1723
1724 /*
1725  * Get all attributes.  Hopefully somedata we'll have a statlite()
1726  * and can limit the fields we require to be accurate.
1727  */
1728 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1729                  struct kstat *stat)
1730 {
1731         struct inode *inode = dentry->d_inode;
1732         struct ceph_inode_info *ci = ceph_inode(inode);
1733         int err;
1734
1735         err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1736         if (!err) {
1737                 generic_fillattr(inode, stat);
1738                 stat->ino = inode->i_ino;
1739                 if (ceph_snap(inode) != CEPH_NOSNAP)
1740                         stat->dev = ceph_snap(inode);
1741                 else
1742                         stat->dev = 0;
1743                 if (S_ISDIR(inode->i_mode)) {
1744                         stat->size = ci->i_rbytes;
1745                         stat->blocks = 0;
1746                         stat->blksize = 65536;
1747                 }
1748         }
1749         return err;
1750 }