Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[linux-2.6.git] / fs / ceph / inode.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/pagevec.h>
13
14 #include "super.h"
15 #include "mds_client.h"
16 #include <linux/ceph/decode.h>
17
18 /*
19  * Ceph inode operations
20  *
21  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22  * setattr, etc.), xattr helpers, and helpers for assimilating
23  * metadata returned by the MDS into our cache.
24  *
25  * Also define helpers for doing asynchronous writeback, invalidation,
26  * and truncation for the benefit of those who can't afford to block
27  * (typically because they are in the message handler path).
28  */
29
30 static const struct inode_operations ceph_symlink_iops;
31
32 static void ceph_invalidate_work(struct work_struct *work);
33 static void ceph_writeback_work(struct work_struct *work);
34 static void ceph_vmtruncate_work(struct work_struct *work);
35
36 /*
37  * find or create an inode, given the ceph ino number
38  */
39 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
40 {
41         struct inode *inode;
42         ino_t t = ceph_vino_to_ino(vino);
43
44         inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
45         if (inode == NULL)
46                 return ERR_PTR(-ENOMEM);
47         if (inode->i_state & I_NEW) {
48                 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
49                      inode, ceph_vinop(inode), (u64)inode->i_ino);
50                 unlock_new_inode(inode);
51         }
52
53         dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
54              vino.snap, inode);
55         return inode;
56 }
57
58 /*
59  * get/constuct snapdir inode for a given directory
60  */
61 struct inode *ceph_get_snapdir(struct inode *parent)
62 {
63         struct ceph_vino vino = {
64                 .ino = ceph_ino(parent),
65                 .snap = CEPH_SNAPDIR,
66         };
67         struct inode *inode = ceph_get_inode(parent->i_sb, vino);
68         struct ceph_inode_info *ci = ceph_inode(inode);
69
70         BUG_ON(!S_ISDIR(parent->i_mode));
71         if (IS_ERR(inode))
72                 return inode;
73         inode->i_mode = parent->i_mode;
74         inode->i_uid = parent->i_uid;
75         inode->i_gid = parent->i_gid;
76         inode->i_op = &ceph_dir_iops;
77         inode->i_fop = &ceph_dir_fops;
78         ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
79         ci->i_rbytes = 0;
80         return inode;
81 }
82
83 const struct inode_operations ceph_file_iops = {
84         .permission = ceph_permission,
85         .setattr = ceph_setattr,
86         .getattr = ceph_getattr,
87         .setxattr = ceph_setxattr,
88         .getxattr = ceph_getxattr,
89         .listxattr = ceph_listxattr,
90         .removexattr = ceph_removexattr,
91 };
92
93
94 /*
95  * We use a 'frag tree' to keep track of the MDS's directory fragments
96  * for a given inode (usually there is just a single fragment).  We
97  * need to know when a child frag is delegated to a new MDS, or when
98  * it is flagged as replicated, so we can direct our requests
99  * accordingly.
100  */
101
102 /*
103  * find/create a frag in the tree
104  */
105 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
106                                                     u32 f)
107 {
108         struct rb_node **p;
109         struct rb_node *parent = NULL;
110         struct ceph_inode_frag *frag;
111         int c;
112
113         p = &ci->i_fragtree.rb_node;
114         while (*p) {
115                 parent = *p;
116                 frag = rb_entry(parent, struct ceph_inode_frag, node);
117                 c = ceph_frag_compare(f, frag->frag);
118                 if (c < 0)
119                         p = &(*p)->rb_left;
120                 else if (c > 0)
121                         p = &(*p)->rb_right;
122                 else
123                         return frag;
124         }
125
126         frag = kmalloc(sizeof(*frag), GFP_NOFS);
127         if (!frag) {
128                 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
129                        "frag %x\n", &ci->vfs_inode,
130                        ceph_vinop(&ci->vfs_inode), f);
131                 return ERR_PTR(-ENOMEM);
132         }
133         frag->frag = f;
134         frag->split_by = 0;
135         frag->mds = -1;
136         frag->ndist = 0;
137
138         rb_link_node(&frag->node, parent, p);
139         rb_insert_color(&frag->node, &ci->i_fragtree);
140
141         dout("get_or_create_frag added %llx.%llx frag %x\n",
142              ceph_vinop(&ci->vfs_inode), f);
143         return frag;
144 }
145
146 /*
147  * find a specific frag @f
148  */
149 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
150 {
151         struct rb_node *n = ci->i_fragtree.rb_node;
152
153         while (n) {
154                 struct ceph_inode_frag *frag =
155                         rb_entry(n, struct ceph_inode_frag, node);
156                 int c = ceph_frag_compare(f, frag->frag);
157                 if (c < 0)
158                         n = n->rb_left;
159                 else if (c > 0)
160                         n = n->rb_right;
161                 else
162                         return frag;
163         }
164         return NULL;
165 }
166
167 /*
168  * Choose frag containing the given value @v.  If @pfrag is
169  * specified, copy the frag delegation info to the caller if
170  * it is present.
171  */
172 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
173                      struct ceph_inode_frag *pfrag,
174                      int *found)
175 {
176         u32 t = ceph_frag_make(0, 0);
177         struct ceph_inode_frag *frag;
178         unsigned nway, i;
179         u32 n;
180
181         if (found)
182                 *found = 0;
183
184         mutex_lock(&ci->i_fragtree_mutex);
185         while (1) {
186                 WARN_ON(!ceph_frag_contains_value(t, v));
187                 frag = __ceph_find_frag(ci, t);
188                 if (!frag)
189                         break; /* t is a leaf */
190                 if (frag->split_by == 0) {
191                         if (pfrag)
192                                 memcpy(pfrag, frag, sizeof(*pfrag));
193                         if (found)
194                                 *found = 1;
195                         break;
196                 }
197
198                 /* choose child */
199                 nway = 1 << frag->split_by;
200                 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
201                      frag->split_by, nway);
202                 for (i = 0; i < nway; i++) {
203                         n = ceph_frag_make_child(t, frag->split_by, i);
204                         if (ceph_frag_contains_value(n, v)) {
205                                 t = n;
206                                 break;
207                         }
208                 }
209                 BUG_ON(i == nway);
210         }
211         dout("choose_frag(%x) = %x\n", v, t);
212
213         mutex_unlock(&ci->i_fragtree_mutex);
214         return t;
215 }
216
217 /*
218  * Process dirfrag (delegation) info from the mds.  Include leaf
219  * fragment in tree ONLY if ndist > 0.  Otherwise, only
220  * branches/splits are included in i_fragtree)
221  */
222 static int ceph_fill_dirfrag(struct inode *inode,
223                              struct ceph_mds_reply_dirfrag *dirinfo)
224 {
225         struct ceph_inode_info *ci = ceph_inode(inode);
226         struct ceph_inode_frag *frag;
227         u32 id = le32_to_cpu(dirinfo->frag);
228         int mds = le32_to_cpu(dirinfo->auth);
229         int ndist = le32_to_cpu(dirinfo->ndist);
230         int i;
231         int err = 0;
232
233         mutex_lock(&ci->i_fragtree_mutex);
234         if (ndist == 0) {
235                 /* no delegation info needed. */
236                 frag = __ceph_find_frag(ci, id);
237                 if (!frag)
238                         goto out;
239                 if (frag->split_by == 0) {
240                         /* tree leaf, remove */
241                         dout("fill_dirfrag removed %llx.%llx frag %x"
242                              " (no ref)\n", ceph_vinop(inode), id);
243                         rb_erase(&frag->node, &ci->i_fragtree);
244                         kfree(frag);
245                 } else {
246                         /* tree branch, keep and clear */
247                         dout("fill_dirfrag cleared %llx.%llx frag %x"
248                              " referral\n", ceph_vinop(inode), id);
249                         frag->mds = -1;
250                         frag->ndist = 0;
251                 }
252                 goto out;
253         }
254
255
256         /* find/add this frag to store mds delegation info */
257         frag = __get_or_create_frag(ci, id);
258         if (IS_ERR(frag)) {
259                 /* this is not the end of the world; we can continue
260                    with bad/inaccurate delegation info */
261                 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
262                        ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
263                 err = -ENOMEM;
264                 goto out;
265         }
266
267         frag->mds = mds;
268         frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
269         for (i = 0; i < frag->ndist; i++)
270                 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
271         dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
272              ceph_vinop(inode), frag->frag, frag->ndist);
273
274 out:
275         mutex_unlock(&ci->i_fragtree_mutex);
276         return err;
277 }
278
279
280 /*
281  * initialize a newly allocated inode.
282  */
283 struct inode *ceph_alloc_inode(struct super_block *sb)
284 {
285         struct ceph_inode_info *ci;
286         int i;
287
288         ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
289         if (!ci)
290                 return NULL;
291
292         dout("alloc_inode %p\n", &ci->vfs_inode);
293
294         ci->i_version = 0;
295         ci->i_time_warp_seq = 0;
296         ci->i_ceph_flags = 0;
297         ci->i_release_count = 0;
298         ci->i_symlink = NULL;
299
300         memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
301
302         ci->i_fragtree = RB_ROOT;
303         mutex_init(&ci->i_fragtree_mutex);
304
305         ci->i_xattrs.blob = NULL;
306         ci->i_xattrs.prealloc_blob = NULL;
307         ci->i_xattrs.dirty = false;
308         ci->i_xattrs.index = RB_ROOT;
309         ci->i_xattrs.count = 0;
310         ci->i_xattrs.names_size = 0;
311         ci->i_xattrs.vals_size = 0;
312         ci->i_xattrs.version = 0;
313         ci->i_xattrs.index_version = 0;
314
315         ci->i_caps = RB_ROOT;
316         ci->i_auth_cap = NULL;
317         ci->i_dirty_caps = 0;
318         ci->i_flushing_caps = 0;
319         INIT_LIST_HEAD(&ci->i_dirty_item);
320         INIT_LIST_HEAD(&ci->i_flushing_item);
321         ci->i_cap_flush_seq = 0;
322         ci->i_cap_flush_last_tid = 0;
323         memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
324         init_waitqueue_head(&ci->i_cap_wq);
325         ci->i_hold_caps_min = 0;
326         ci->i_hold_caps_max = 0;
327         INIT_LIST_HEAD(&ci->i_cap_delay_list);
328         ci->i_cap_exporting_mds = 0;
329         ci->i_cap_exporting_mseq = 0;
330         ci->i_cap_exporting_issued = 0;
331         INIT_LIST_HEAD(&ci->i_cap_snaps);
332         ci->i_head_snapc = NULL;
333         ci->i_snap_caps = 0;
334
335         for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
336                 ci->i_nr_by_mode[i] = 0;
337
338         ci->i_truncate_seq = 0;
339         ci->i_truncate_size = 0;
340         ci->i_truncate_pending = 0;
341
342         ci->i_max_size = 0;
343         ci->i_reported_size = 0;
344         ci->i_wanted_max_size = 0;
345         ci->i_requested_max_size = 0;
346
347         ci->i_pin_ref = 0;
348         ci->i_rd_ref = 0;
349         ci->i_rdcache_ref = 0;
350         ci->i_wr_ref = 0;
351         ci->i_wrbuffer_ref = 0;
352         ci->i_wrbuffer_ref_head = 0;
353         ci->i_shared_gen = 0;
354         ci->i_rdcache_gen = 0;
355         ci->i_rdcache_revoking = 0;
356
357         INIT_LIST_HEAD(&ci->i_unsafe_writes);
358         INIT_LIST_HEAD(&ci->i_unsafe_dirops);
359         spin_lock_init(&ci->i_unsafe_lock);
360
361         ci->i_snap_realm = NULL;
362         INIT_LIST_HEAD(&ci->i_snap_realm_item);
363         INIT_LIST_HEAD(&ci->i_snap_flush_item);
364
365         INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
366         INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
367
368         INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
369
370         return &ci->vfs_inode;
371 }
372
373 static void ceph_i_callback(struct rcu_head *head)
374 {
375         struct inode *inode = container_of(head, struct inode, i_rcu);
376         struct ceph_inode_info *ci = ceph_inode(inode);
377
378         INIT_LIST_HEAD(&inode->i_dentry);
379         kmem_cache_free(ceph_inode_cachep, ci);
380 }
381
382 void ceph_destroy_inode(struct inode *inode)
383 {
384         struct ceph_inode_info *ci = ceph_inode(inode);
385         struct ceph_inode_frag *frag;
386         struct rb_node *n;
387
388         dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
389
390         ceph_queue_caps_release(inode);
391
392         /*
393          * we may still have a snap_realm reference if there are stray
394          * caps in i_cap_exporting_issued or i_snap_caps.
395          */
396         if (ci->i_snap_realm) {
397                 struct ceph_mds_client *mdsc =
398                         ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
399                 struct ceph_snap_realm *realm = ci->i_snap_realm;
400
401                 dout(" dropping residual ref to snap realm %p\n", realm);
402                 spin_lock(&realm->inodes_with_caps_lock);
403                 list_del_init(&ci->i_snap_realm_item);
404                 spin_unlock(&realm->inodes_with_caps_lock);
405                 ceph_put_snap_realm(mdsc, realm);
406         }
407
408         kfree(ci->i_symlink);
409         while ((n = rb_first(&ci->i_fragtree)) != NULL) {
410                 frag = rb_entry(n, struct ceph_inode_frag, node);
411                 rb_erase(n, &ci->i_fragtree);
412                 kfree(frag);
413         }
414
415         __ceph_destroy_xattrs(ci);
416         if (ci->i_xattrs.blob)
417                 ceph_buffer_put(ci->i_xattrs.blob);
418         if (ci->i_xattrs.prealloc_blob)
419                 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
420
421         call_rcu(&inode->i_rcu, ceph_i_callback);
422 }
423
424
425 /*
426  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
427  * careful because either the client or MDS may have more up to date
428  * info, depending on which capabilities are held, and whether
429  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
430  * and size are monotonically increasing, except when utimes() or
431  * truncate() increments the corresponding _seq values.)
432  */
433 int ceph_fill_file_size(struct inode *inode, int issued,
434                         u32 truncate_seq, u64 truncate_size, u64 size)
435 {
436         struct ceph_inode_info *ci = ceph_inode(inode);
437         int queue_trunc = 0;
438
439         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
440             (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
441                 dout("size %lld -> %llu\n", inode->i_size, size);
442                 inode->i_size = size;
443                 inode->i_blocks = (size + (1<<9) - 1) >> 9;
444                 ci->i_reported_size = size;
445                 if (truncate_seq != ci->i_truncate_seq) {
446                         dout("truncate_seq %u -> %u\n",
447                              ci->i_truncate_seq, truncate_seq);
448                         ci->i_truncate_seq = truncate_seq;
449                         /*
450                          * If we hold relevant caps, or in the case where we're
451                          * not the only client referencing this file and we
452                          * don't hold those caps, then we need to check whether
453                          * the file is either opened or mmaped
454                          */
455                         if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
456                                        CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
457                                        CEPH_CAP_FILE_EXCL|
458                                        CEPH_CAP_FILE_LAZYIO)) ||
459                             mapping_mapped(inode->i_mapping) ||
460                             __ceph_caps_file_wanted(ci)) {
461                                 ci->i_truncate_pending++;
462                                 queue_trunc = 1;
463                         }
464                 }
465         }
466         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
467             ci->i_truncate_size != truncate_size) {
468                 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
469                      truncate_size);
470                 ci->i_truncate_size = truncate_size;
471         }
472         return queue_trunc;
473 }
474
475 void ceph_fill_file_time(struct inode *inode, int issued,
476                          u64 time_warp_seq, struct timespec *ctime,
477                          struct timespec *mtime, struct timespec *atime)
478 {
479         struct ceph_inode_info *ci = ceph_inode(inode);
480         int warn = 0;
481
482         if (issued & (CEPH_CAP_FILE_EXCL|
483                       CEPH_CAP_FILE_WR|
484                       CEPH_CAP_FILE_BUFFER|
485                       CEPH_CAP_AUTH_EXCL|
486                       CEPH_CAP_XATTR_EXCL)) {
487                 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
488                         dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
489                              inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
490                              ctime->tv_sec, ctime->tv_nsec);
491                         inode->i_ctime = *ctime;
492                 }
493                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
494                         /* the MDS did a utimes() */
495                         dout("mtime %ld.%09ld -> %ld.%09ld "
496                              "tw %d -> %d\n",
497                              inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
498                              mtime->tv_sec, mtime->tv_nsec,
499                              ci->i_time_warp_seq, (int)time_warp_seq);
500
501                         inode->i_mtime = *mtime;
502                         inode->i_atime = *atime;
503                         ci->i_time_warp_seq = time_warp_seq;
504                 } else if (time_warp_seq == ci->i_time_warp_seq) {
505                         /* nobody did utimes(); take the max */
506                         if (timespec_compare(mtime, &inode->i_mtime) > 0) {
507                                 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
508                                      inode->i_mtime.tv_sec,
509                                      inode->i_mtime.tv_nsec,
510                                      mtime->tv_sec, mtime->tv_nsec);
511                                 inode->i_mtime = *mtime;
512                         }
513                         if (timespec_compare(atime, &inode->i_atime) > 0) {
514                                 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
515                                      inode->i_atime.tv_sec,
516                                      inode->i_atime.tv_nsec,
517                                      atime->tv_sec, atime->tv_nsec);
518                                 inode->i_atime = *atime;
519                         }
520                 } else if (issued & CEPH_CAP_FILE_EXCL) {
521                         /* we did a utimes(); ignore mds values */
522                 } else {
523                         warn = 1;
524                 }
525         } else {
526                 /* we have no write|excl caps; whatever the MDS says is true */
527                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
528                         inode->i_ctime = *ctime;
529                         inode->i_mtime = *mtime;
530                         inode->i_atime = *atime;
531                         ci->i_time_warp_seq = time_warp_seq;
532                 } else {
533                         warn = 1;
534                 }
535         }
536         if (warn) /* time_warp_seq shouldn't go backwards */
537                 dout("%p mds time_warp_seq %llu < %u\n",
538                      inode, time_warp_seq, ci->i_time_warp_seq);
539 }
540
541 /*
542  * Populate an inode based on info from mds.  May be called on new or
543  * existing inodes.
544  */
545 static int fill_inode(struct inode *inode,
546                       struct ceph_mds_reply_info_in *iinfo,
547                       struct ceph_mds_reply_dirfrag *dirinfo,
548                       struct ceph_mds_session *session,
549                       unsigned long ttl_from, int cap_fmode,
550                       struct ceph_cap_reservation *caps_reservation)
551 {
552         struct ceph_mds_reply_inode *info = iinfo->in;
553         struct ceph_inode_info *ci = ceph_inode(inode);
554         int i;
555         int issued, implemented;
556         struct timespec mtime, atime, ctime;
557         u32 nsplits;
558         struct ceph_buffer *xattr_blob = NULL;
559         int err = 0;
560         int queue_trunc = 0;
561
562         dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
563              inode, ceph_vinop(inode), le64_to_cpu(info->version),
564              ci->i_version);
565
566         /*
567          * prealloc xattr data, if it looks like we'll need it.  only
568          * if len > 4 (meaning there are actually xattrs; the first 4
569          * bytes are the xattr count).
570          */
571         if (iinfo->xattr_len > 4) {
572                 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
573                 if (!xattr_blob)
574                         pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
575                                iinfo->xattr_len);
576         }
577
578         spin_lock(&inode->i_lock);
579
580         /*
581          * provided version will be odd if inode value is projected,
582          * even if stable.  skip the update if we have newer stable
583          * info (ours>=theirs, e.g. due to racing mds replies), unless
584          * we are getting projected (unstable) info (in which case the
585          * version is odd, and we want ours>theirs).
586          *   us   them
587          *   2    2     skip
588          *   3    2     skip
589          *   3    3     update
590          */
591         if (le64_to_cpu(info->version) > 0 &&
592             (ci->i_version & ~1) >= le64_to_cpu(info->version))
593                 goto no_change;
594
595         issued = __ceph_caps_issued(ci, &implemented);
596         issued |= implemented | __ceph_caps_dirty(ci);
597
598         /* update inode */
599         ci->i_version = le64_to_cpu(info->version);
600         inode->i_version++;
601         inode->i_rdev = le32_to_cpu(info->rdev);
602
603         if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
604                 inode->i_mode = le32_to_cpu(info->mode);
605                 inode->i_uid = le32_to_cpu(info->uid);
606                 inode->i_gid = le32_to_cpu(info->gid);
607                 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
608                      inode->i_uid, inode->i_gid);
609         }
610
611         if ((issued & CEPH_CAP_LINK_EXCL) == 0)
612                 inode->i_nlink = le32_to_cpu(info->nlink);
613
614         /* be careful with mtime, atime, size */
615         ceph_decode_timespec(&atime, &info->atime);
616         ceph_decode_timespec(&mtime, &info->mtime);
617         ceph_decode_timespec(&ctime, &info->ctime);
618         queue_trunc = ceph_fill_file_size(inode, issued,
619                                           le32_to_cpu(info->truncate_seq),
620                                           le64_to_cpu(info->truncate_size),
621                                           le64_to_cpu(info->size));
622         ceph_fill_file_time(inode, issued,
623                             le32_to_cpu(info->time_warp_seq),
624                             &ctime, &mtime, &atime);
625
626         /* only update max_size on auth cap */
627         if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
628             ci->i_max_size != le64_to_cpu(info->max_size)) {
629                 dout("max_size %lld -> %llu\n", ci->i_max_size,
630                      le64_to_cpu(info->max_size));
631                 ci->i_max_size = le64_to_cpu(info->max_size);
632         }
633
634         ci->i_layout = info->layout;
635         inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
636
637         /* xattrs */
638         /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
639         if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
640             le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
641                 if (ci->i_xattrs.blob)
642                         ceph_buffer_put(ci->i_xattrs.blob);
643                 ci->i_xattrs.blob = xattr_blob;
644                 if (xattr_blob)
645                         memcpy(ci->i_xattrs.blob->vec.iov_base,
646                                iinfo->xattr_data, iinfo->xattr_len);
647                 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
648                 xattr_blob = NULL;
649         }
650
651         inode->i_mapping->a_ops = &ceph_aops;
652         inode->i_mapping->backing_dev_info =
653                 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
654
655         switch (inode->i_mode & S_IFMT) {
656         case S_IFIFO:
657         case S_IFBLK:
658         case S_IFCHR:
659         case S_IFSOCK:
660                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
661                 inode->i_op = &ceph_file_iops;
662                 break;
663         case S_IFREG:
664                 inode->i_op = &ceph_file_iops;
665                 inode->i_fop = &ceph_file_fops;
666                 break;
667         case S_IFLNK:
668                 inode->i_op = &ceph_symlink_iops;
669                 if (!ci->i_symlink) {
670                         int symlen = iinfo->symlink_len;
671                         char *sym;
672
673                         BUG_ON(symlen != inode->i_size);
674                         spin_unlock(&inode->i_lock);
675
676                         err = -ENOMEM;
677                         sym = kmalloc(symlen+1, GFP_NOFS);
678                         if (!sym)
679                                 goto out;
680                         memcpy(sym, iinfo->symlink, symlen);
681                         sym[symlen] = 0;
682
683                         spin_lock(&inode->i_lock);
684                         if (!ci->i_symlink)
685                                 ci->i_symlink = sym;
686                         else
687                                 kfree(sym); /* lost a race */
688                 }
689                 break;
690         case S_IFDIR:
691                 inode->i_op = &ceph_dir_iops;
692                 inode->i_fop = &ceph_dir_fops;
693
694                 ci->i_dir_layout = iinfo->dir_layout;
695
696                 ci->i_files = le64_to_cpu(info->files);
697                 ci->i_subdirs = le64_to_cpu(info->subdirs);
698                 ci->i_rbytes = le64_to_cpu(info->rbytes);
699                 ci->i_rfiles = le64_to_cpu(info->rfiles);
700                 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
701                 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
702
703                 /* set dir completion flag? */
704                 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
705                     ceph_snap(inode) == CEPH_NOSNAP &&
706                     (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
707                     (issued & CEPH_CAP_FILE_EXCL) == 0 &&
708                     (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
709                         dout(" marking %p complete (empty)\n", inode);
710                         ci->i_ceph_flags |= CEPH_I_COMPLETE;
711                         ci->i_max_offset = 2;
712                 }
713
714                 /* it may be better to set st_size in getattr instead? */
715                 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
716                         inode->i_size = ci->i_rbytes;
717                 break;
718         default:
719                 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
720                        ceph_vinop(inode), inode->i_mode);
721         }
722
723 no_change:
724         spin_unlock(&inode->i_lock);
725
726         /* queue truncate if we saw i_size decrease */
727         if (queue_trunc)
728                 ceph_queue_vmtruncate(inode);
729
730         /* populate frag tree */
731         /* FIXME: move me up, if/when version reflects fragtree changes */
732         nsplits = le32_to_cpu(info->fragtree.nsplits);
733         mutex_lock(&ci->i_fragtree_mutex);
734         for (i = 0; i < nsplits; i++) {
735                 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
736                 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
737
738                 if (IS_ERR(frag))
739                         continue;
740                 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
741                 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
742         }
743         mutex_unlock(&ci->i_fragtree_mutex);
744
745         /* were we issued a capability? */
746         if (info->cap.caps) {
747                 if (ceph_snap(inode) == CEPH_NOSNAP) {
748                         ceph_add_cap(inode, session,
749                                      le64_to_cpu(info->cap.cap_id),
750                                      cap_fmode,
751                                      le32_to_cpu(info->cap.caps),
752                                      le32_to_cpu(info->cap.wanted),
753                                      le32_to_cpu(info->cap.seq),
754                                      le32_to_cpu(info->cap.mseq),
755                                      le64_to_cpu(info->cap.realm),
756                                      info->cap.flags,
757                                      caps_reservation);
758                 } else {
759                         spin_lock(&inode->i_lock);
760                         dout(" %p got snap_caps %s\n", inode,
761                              ceph_cap_string(le32_to_cpu(info->cap.caps)));
762                         ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
763                         if (cap_fmode >= 0)
764                                 __ceph_get_fmode(ci, cap_fmode);
765                         spin_unlock(&inode->i_lock);
766                 }
767         } else if (cap_fmode >= 0) {
768                 pr_warning("mds issued no caps on %llx.%llx\n",
769                            ceph_vinop(inode));
770                 __ceph_get_fmode(ci, cap_fmode);
771         }
772
773         /* update delegation info? */
774         if (dirinfo)
775                 ceph_fill_dirfrag(inode, dirinfo);
776
777         err = 0;
778
779 out:
780         if (xattr_blob)
781                 ceph_buffer_put(xattr_blob);
782         return err;
783 }
784
785 /*
786  * caller should hold session s_mutex.
787  */
788 static void update_dentry_lease(struct dentry *dentry,
789                                 struct ceph_mds_reply_lease *lease,
790                                 struct ceph_mds_session *session,
791                                 unsigned long from_time)
792 {
793         struct ceph_dentry_info *di = ceph_dentry(dentry);
794         long unsigned duration = le32_to_cpu(lease->duration_ms);
795         long unsigned ttl = from_time + (duration * HZ) / 1000;
796         long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
797         struct inode *dir;
798
799         /* only track leases on regular dentries */
800         if (dentry->d_op != &ceph_dentry_ops)
801                 return;
802
803         spin_lock(&dentry->d_lock);
804         dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
805              dentry, le16_to_cpu(lease->mask), duration, ttl);
806
807         /* make lease_rdcache_gen match directory */
808         dir = dentry->d_parent->d_inode;
809         di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
810
811         if (lease->mask == 0)
812                 goto out_unlock;
813
814         if (di->lease_gen == session->s_cap_gen &&
815             time_before(ttl, dentry->d_time))
816                 goto out_unlock;  /* we already have a newer lease. */
817
818         if (di->lease_session && di->lease_session != session)
819                 goto out_unlock;
820
821         ceph_dentry_lru_touch(dentry);
822
823         if (!di->lease_session)
824                 di->lease_session = ceph_get_mds_session(session);
825         di->lease_gen = session->s_cap_gen;
826         di->lease_seq = le32_to_cpu(lease->seq);
827         di->lease_renew_after = half_ttl;
828         di->lease_renew_from = 0;
829         dentry->d_time = ttl;
830 out_unlock:
831         spin_unlock(&dentry->d_lock);
832         return;
833 }
834
835 /*
836  * Set dentry's directory position based on the current dir's max, and
837  * order it in d_subdirs, so that dcache_readdir behaves.
838  */
839 static void ceph_set_dentry_offset(struct dentry *dn)
840 {
841         struct dentry *dir = dn->d_parent;
842         struct inode *inode = dn->d_parent->d_inode;
843         struct ceph_dentry_info *di;
844
845         BUG_ON(!inode);
846
847         di = ceph_dentry(dn);
848
849         spin_lock(&inode->i_lock);
850         if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
851                 spin_unlock(&inode->i_lock);
852                 return;
853         }
854         di->offset = ceph_inode(inode)->i_max_offset++;
855         spin_unlock(&inode->i_lock);
856
857         spin_lock(&dir->d_lock);
858         spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
859         list_move(&dn->d_u.d_child, &dir->d_subdirs);
860         dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
861              dn->d_u.d_child.prev, dn->d_u.d_child.next);
862         spin_unlock(&dn->d_lock);
863         spin_unlock(&dir->d_lock);
864 }
865
866 /*
867  * splice a dentry to an inode.
868  * caller must hold directory i_mutex for this to be safe.
869  *
870  * we will only rehash the resulting dentry if @prehash is
871  * true; @prehash will be set to false (for the benefit of
872  * the caller) if we fail.
873  */
874 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
875                                     bool *prehash, bool set_offset)
876 {
877         struct dentry *realdn;
878
879         BUG_ON(dn->d_inode);
880
881         /* dn must be unhashed */
882         if (!d_unhashed(dn))
883                 d_drop(dn);
884         realdn = d_materialise_unique(dn, in);
885         if (IS_ERR(realdn)) {
886                 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
887                        PTR_ERR(realdn), dn, in, ceph_vinop(in));
888                 if (prehash)
889                         *prehash = false; /* don't rehash on error */
890                 dn = realdn; /* note realdn contains the error */
891                 goto out;
892         } else if (realdn) {
893                 dout("dn %p (%d) spliced with %p (%d) "
894                      "inode %p ino %llx.%llx\n",
895                      dn, dn->d_count,
896                      realdn, realdn->d_count,
897                      realdn->d_inode, ceph_vinop(realdn->d_inode));
898                 dput(dn);
899                 dn = realdn;
900         } else {
901                 BUG_ON(!ceph_dentry(dn));
902                 dout("dn %p attached to %p ino %llx.%llx\n",
903                      dn, dn->d_inode, ceph_vinop(dn->d_inode));
904         }
905         if ((!prehash || *prehash) && d_unhashed(dn))
906                 d_rehash(dn);
907         if (set_offset)
908                 ceph_set_dentry_offset(dn);
909 out:
910         return dn;
911 }
912
913 /*
914  * Incorporate results into the local cache.  This is either just
915  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
916  * after a lookup).
917  *
918  * A reply may contain
919  *         a directory inode along with a dentry.
920  *  and/or a target inode
921  *
922  * Called with snap_rwsem (read).
923  */
924 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
925                     struct ceph_mds_session *session)
926 {
927         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
928         struct inode *in = NULL;
929         struct ceph_mds_reply_inode *ininfo;
930         struct ceph_vino vino;
931         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
932         int i = 0;
933         int err = 0;
934
935         dout("fill_trace %p is_dentry %d is_target %d\n", req,
936              rinfo->head->is_dentry, rinfo->head->is_target);
937
938 #if 0
939         /*
940          * Debugging hook:
941          *
942          * If we resend completed ops to a recovering mds, we get no
943          * trace.  Since that is very rare, pretend this is the case
944          * to ensure the 'no trace' handlers in the callers behave.
945          *
946          * Fill in inodes unconditionally to avoid breaking cap
947          * invariants.
948          */
949         if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
950                 pr_info("fill_trace faking empty trace on %lld %s\n",
951                         req->r_tid, ceph_mds_op_name(rinfo->head->op));
952                 if (rinfo->head->is_dentry) {
953                         rinfo->head->is_dentry = 0;
954                         err = fill_inode(req->r_locked_dir,
955                                          &rinfo->diri, rinfo->dirfrag,
956                                          session, req->r_request_started, -1);
957                 }
958                 if (rinfo->head->is_target) {
959                         rinfo->head->is_target = 0;
960                         ininfo = rinfo->targeti.in;
961                         vino.ino = le64_to_cpu(ininfo->ino);
962                         vino.snap = le64_to_cpu(ininfo->snapid);
963                         in = ceph_get_inode(sb, vino);
964                         err = fill_inode(in, &rinfo->targeti, NULL,
965                                          session, req->r_request_started,
966                                          req->r_fmode);
967                         iput(in);
968                 }
969         }
970 #endif
971
972         if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
973                 dout("fill_trace reply is empty!\n");
974                 if (rinfo->head->result == 0 && req->r_locked_dir)
975                         ceph_invalidate_dir_request(req);
976                 return 0;
977         }
978
979         if (rinfo->head->is_dentry) {
980                 struct inode *dir = req->r_locked_dir;
981
982                 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
983                                  session, req->r_request_started, -1,
984                                  &req->r_caps_reservation);
985                 if (err < 0)
986                         return err;
987         }
988
989         /*
990          * ignore null lease/binding on snapdir ENOENT, or else we
991          * will have trouble splicing in the virtual snapdir later
992          */
993         if (rinfo->head->is_dentry && !req->r_aborted &&
994             (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
995                                                fsc->mount_options->snapdir_name,
996                                                req->r_dentry->d_name.len))) {
997                 /*
998                  * lookup link rename   : null -> possibly existing inode
999                  * mknod symlink mkdir  : null -> new inode
1000                  * unlink               : linked -> null
1001                  */
1002                 struct inode *dir = req->r_locked_dir;
1003                 struct dentry *dn = req->r_dentry;
1004                 bool have_dir_cap, have_lease;
1005
1006                 BUG_ON(!dn);
1007                 BUG_ON(!dir);
1008                 BUG_ON(dn->d_parent->d_inode != dir);
1009                 BUG_ON(ceph_ino(dir) !=
1010                        le64_to_cpu(rinfo->diri.in->ino));
1011                 BUG_ON(ceph_snap(dir) !=
1012                        le64_to_cpu(rinfo->diri.in->snapid));
1013
1014                 /* do we have a lease on the whole dir? */
1015                 have_dir_cap =
1016                         (le32_to_cpu(rinfo->diri.in->cap.caps) &
1017                          CEPH_CAP_FILE_SHARED);
1018
1019                 /* do we have a dn lease? */
1020                 have_lease = have_dir_cap ||
1021                         (le16_to_cpu(rinfo->dlease->mask) &
1022                          CEPH_LOCK_DN);
1023
1024                 if (!have_lease)
1025                         dout("fill_trace  no dentry lease or dir cap\n");
1026
1027                 /* rename? */
1028                 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1029                         dout(" src %p '%.*s' dst %p '%.*s'\n",
1030                              req->r_old_dentry,
1031                              req->r_old_dentry->d_name.len,
1032                              req->r_old_dentry->d_name.name,
1033                              dn, dn->d_name.len, dn->d_name.name);
1034                         dout("fill_trace doing d_move %p -> %p\n",
1035                              req->r_old_dentry, dn);
1036
1037                         /* d_move screws up d_subdirs order */
1038                         ceph_i_clear(dir, CEPH_I_COMPLETE);
1039
1040                         d_move(req->r_old_dentry, dn);
1041                         dout(" src %p '%.*s' dst %p '%.*s'\n",
1042                              req->r_old_dentry,
1043                              req->r_old_dentry->d_name.len,
1044                              req->r_old_dentry->d_name.name,
1045                              dn, dn->d_name.len, dn->d_name.name);
1046
1047                         /* ensure target dentry is invalidated, despite
1048                            rehashing bug in vfs_rename_dir */
1049                         ceph_invalidate_dentry_lease(dn);
1050
1051                         /* take overwritten dentry's readdir offset */
1052                         dout("dn %p gets %p offset %lld (old offset %lld)\n",
1053                              req->r_old_dentry, dn, ceph_dentry(dn)->offset,
1054                              ceph_dentry(req->r_old_dentry)->offset);
1055                         ceph_dentry(req->r_old_dentry)->offset =
1056                                 ceph_dentry(dn)->offset;
1057
1058                         dn = req->r_old_dentry;  /* use old_dentry */
1059                         in = dn->d_inode;
1060                 }
1061
1062                 /* null dentry? */
1063                 if (!rinfo->head->is_target) {
1064                         dout("fill_trace null dentry\n");
1065                         if (dn->d_inode) {
1066                                 dout("d_delete %p\n", dn);
1067                                 d_delete(dn);
1068                         } else {
1069                                 dout("d_instantiate %p NULL\n", dn);
1070                                 d_instantiate(dn, NULL);
1071                                 if (have_lease && d_unhashed(dn))
1072                                         d_rehash(dn);
1073                                 update_dentry_lease(dn, rinfo->dlease,
1074                                                     session,
1075                                                     req->r_request_started);
1076                         }
1077                         goto done;
1078                 }
1079
1080                 /* attach proper inode */
1081                 ininfo = rinfo->targeti.in;
1082                 vino.ino = le64_to_cpu(ininfo->ino);
1083                 vino.snap = le64_to_cpu(ininfo->snapid);
1084                 in = dn->d_inode;
1085                 if (!in) {
1086                         in = ceph_get_inode(sb, vino);
1087                         if (IS_ERR(in)) {
1088                                 pr_err("fill_trace bad get_inode "
1089                                        "%llx.%llx\n", vino.ino, vino.snap);
1090                                 err = PTR_ERR(in);
1091                                 d_delete(dn);
1092                                 goto done;
1093                         }
1094                         dn = splice_dentry(dn, in, &have_lease, true);
1095                         if (IS_ERR(dn)) {
1096                                 err = PTR_ERR(dn);
1097                                 goto done;
1098                         }
1099                         req->r_dentry = dn;  /* may have spliced */
1100                         igrab(in);
1101                 } else if (ceph_ino(in) == vino.ino &&
1102                            ceph_snap(in) == vino.snap) {
1103                         igrab(in);
1104                 } else {
1105                         dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1106                              dn, in, ceph_ino(in), ceph_snap(in),
1107                              vino.ino, vino.snap);
1108                         have_lease = false;
1109                         in = NULL;
1110                 }
1111
1112                 if (have_lease)
1113                         update_dentry_lease(dn, rinfo->dlease, session,
1114                                             req->r_request_started);
1115                 dout(" final dn %p\n", dn);
1116                 i++;
1117         } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1118                    req->r_op == CEPH_MDS_OP_MKSNAP) {
1119                 struct dentry *dn = req->r_dentry;
1120
1121                 /* fill out a snapdir LOOKUPSNAP dentry */
1122                 BUG_ON(!dn);
1123                 BUG_ON(!req->r_locked_dir);
1124                 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1125                 ininfo = rinfo->targeti.in;
1126                 vino.ino = le64_to_cpu(ininfo->ino);
1127                 vino.snap = le64_to_cpu(ininfo->snapid);
1128                 in = ceph_get_inode(sb, vino);
1129                 if (IS_ERR(in)) {
1130                         pr_err("fill_inode get_inode badness %llx.%llx\n",
1131                                vino.ino, vino.snap);
1132                         err = PTR_ERR(in);
1133                         d_delete(dn);
1134                         goto done;
1135                 }
1136                 dout(" linking snapped dir %p to dn %p\n", in, dn);
1137                 dn = splice_dentry(dn, in, NULL, true);
1138                 if (IS_ERR(dn)) {
1139                         err = PTR_ERR(dn);
1140                         goto done;
1141                 }
1142                 req->r_dentry = dn;  /* may have spliced */
1143                 igrab(in);
1144                 rinfo->head->is_dentry = 1;  /* fool notrace handlers */
1145         }
1146
1147         if (rinfo->head->is_target) {
1148                 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1149                 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1150
1151                 if (in == NULL || ceph_ino(in) != vino.ino ||
1152                     ceph_snap(in) != vino.snap) {
1153                         in = ceph_get_inode(sb, vino);
1154                         if (IS_ERR(in)) {
1155                                 err = PTR_ERR(in);
1156                                 goto done;
1157                         }
1158                 }
1159                 req->r_target_inode = in;
1160
1161                 err = fill_inode(in,
1162                                  &rinfo->targeti, NULL,
1163                                  session, req->r_request_started,
1164                                  (le32_to_cpu(rinfo->head->result) == 0) ?
1165                                  req->r_fmode : -1,
1166                                  &req->r_caps_reservation);
1167                 if (err < 0) {
1168                         pr_err("fill_inode badness %p %llx.%llx\n",
1169                                in, ceph_vinop(in));
1170                         goto done;
1171                 }
1172         }
1173
1174 done:
1175         dout("fill_trace done err=%d\n", err);
1176         return err;
1177 }
1178
1179 /*
1180  * Prepopulate our cache with readdir results, leases, etc.
1181  */
1182 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1183                              struct ceph_mds_session *session)
1184 {
1185         struct dentry *parent = req->r_dentry;
1186         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1187         struct qstr dname;
1188         struct dentry *dn;
1189         struct inode *in;
1190         int err = 0, i;
1191         struct inode *snapdir = NULL;
1192         struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1193         u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1194         struct ceph_dentry_info *di;
1195
1196         if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1197                 snapdir = ceph_get_snapdir(parent->d_inode);
1198                 parent = d_find_alias(snapdir);
1199                 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1200                      rinfo->dir_nr, parent);
1201         } else {
1202                 dout("readdir_prepopulate %d items under dn %p\n",
1203                      rinfo->dir_nr, parent);
1204                 if (rinfo->dir_dir)
1205                         ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1206         }
1207
1208         for (i = 0; i < rinfo->dir_nr; i++) {
1209                 struct ceph_vino vino;
1210
1211                 dname.name = rinfo->dir_dname[i];
1212                 dname.len = rinfo->dir_dname_len[i];
1213                 dname.hash = full_name_hash(dname.name, dname.len);
1214
1215                 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1216                 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1217
1218 retry_lookup:
1219                 dn = d_lookup(parent, &dname);
1220                 dout("d_lookup on parent=%p name=%.*s got %p\n",
1221                      parent, dname.len, dname.name, dn);
1222
1223                 if (!dn) {
1224                         dn = d_alloc(parent, &dname);
1225                         dout("d_alloc %p '%.*s' = %p\n", parent,
1226                              dname.len, dname.name, dn);
1227                         if (dn == NULL) {
1228                                 dout("d_alloc badness\n");
1229                                 err = -ENOMEM;
1230                                 goto out;
1231                         }
1232                         err = ceph_init_dentry(dn);
1233                         if (err < 0) {
1234                                 dput(dn);
1235                                 goto out;
1236                         }
1237                 } else if (dn->d_inode &&
1238                            (ceph_ino(dn->d_inode) != vino.ino ||
1239                             ceph_snap(dn->d_inode) != vino.snap)) {
1240                         dout(" dn %p points to wrong inode %p\n",
1241                              dn, dn->d_inode);
1242                         d_delete(dn);
1243                         dput(dn);
1244                         goto retry_lookup;
1245                 } else {
1246                         /* reorder parent's d_subdirs */
1247                         spin_lock(&parent->d_lock);
1248                         spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1249                         list_move(&dn->d_u.d_child, &parent->d_subdirs);
1250                         spin_unlock(&dn->d_lock);
1251                         spin_unlock(&parent->d_lock);
1252                 }
1253
1254                 di = dn->d_fsdata;
1255                 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1256
1257                 /* inode */
1258                 if (dn->d_inode) {
1259                         in = dn->d_inode;
1260                 } else {
1261                         in = ceph_get_inode(parent->d_sb, vino);
1262                         if (IS_ERR(in)) {
1263                                 dout("new_inode badness\n");
1264                                 d_delete(dn);
1265                                 dput(dn);
1266                                 err = PTR_ERR(in);
1267                                 goto out;
1268                         }
1269                         dn = splice_dentry(dn, in, NULL, false);
1270                         if (IS_ERR(dn))
1271                                 dn = NULL;
1272                 }
1273
1274                 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1275                                req->r_request_started, -1,
1276                                &req->r_caps_reservation) < 0) {
1277                         pr_err("fill_inode badness on %p\n", in);
1278                         goto next_item;
1279                 }
1280                 if (dn)
1281                         update_dentry_lease(dn, rinfo->dir_dlease[i],
1282                                             req->r_session,
1283                                             req->r_request_started);
1284 next_item:
1285                 if (dn)
1286                         dput(dn);
1287         }
1288         req->r_did_prepopulate = true;
1289
1290 out:
1291         if (snapdir) {
1292                 iput(snapdir);
1293                 dput(parent);
1294         }
1295         dout("readdir_prepopulate done\n");
1296         return err;
1297 }
1298
1299 int ceph_inode_set_size(struct inode *inode, loff_t size)
1300 {
1301         struct ceph_inode_info *ci = ceph_inode(inode);
1302         int ret = 0;
1303
1304         spin_lock(&inode->i_lock);
1305         dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1306         inode->i_size = size;
1307         inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1308
1309         /* tell the MDS if we are approaching max_size */
1310         if ((size << 1) >= ci->i_max_size &&
1311             (ci->i_reported_size << 1) < ci->i_max_size)
1312                 ret = 1;
1313
1314         spin_unlock(&inode->i_lock);
1315         return ret;
1316 }
1317
1318 /*
1319  * Write back inode data in a worker thread.  (This can't be done
1320  * in the message handler context.)
1321  */
1322 void ceph_queue_writeback(struct inode *inode)
1323 {
1324         if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1325                        &ceph_inode(inode)->i_wb_work)) {
1326                 dout("ceph_queue_writeback %p\n", inode);
1327                 igrab(inode);
1328         } else {
1329                 dout("ceph_queue_writeback %p failed\n", inode);
1330         }
1331 }
1332
1333 static void ceph_writeback_work(struct work_struct *work)
1334 {
1335         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1336                                                   i_wb_work);
1337         struct inode *inode = &ci->vfs_inode;
1338
1339         dout("writeback %p\n", inode);
1340         filemap_fdatawrite(&inode->i_data);
1341         iput(inode);
1342 }
1343
1344 /*
1345  * queue an async invalidation
1346  */
1347 void ceph_queue_invalidate(struct inode *inode)
1348 {
1349         if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1350                        &ceph_inode(inode)->i_pg_inv_work)) {
1351                 dout("ceph_queue_invalidate %p\n", inode);
1352                 igrab(inode);
1353         } else {
1354                 dout("ceph_queue_invalidate %p failed\n", inode);
1355         }
1356 }
1357
1358 /*
1359  * invalidate any pages that are not dirty or under writeback.  this
1360  * includes pages that are clean and mapped.
1361  */
1362 static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1363 {
1364         struct pagevec pvec;
1365         pgoff_t next = 0;
1366         int i;
1367
1368         pagevec_init(&pvec, 0);
1369         while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1370                 for (i = 0; i < pagevec_count(&pvec); i++) {
1371                         struct page *page = pvec.pages[i];
1372                         pgoff_t index;
1373                         int skip_page =
1374                                 (PageDirty(page) || PageWriteback(page));
1375
1376                         if (!skip_page)
1377                                 skip_page = !trylock_page(page);
1378
1379                         /*
1380                          * We really shouldn't be looking at the ->index of an
1381                          * unlocked page.  But we're not allowed to lock these
1382                          * pages.  So we rely upon nobody altering the ->index
1383                          * of this (pinned-by-us) page.
1384                          */
1385                         index = page->index;
1386                         if (index > next)
1387                                 next = index;
1388                         next++;
1389
1390                         if (skip_page)
1391                                 continue;
1392
1393                         generic_error_remove_page(mapping, page);
1394                         unlock_page(page);
1395                 }
1396                 pagevec_release(&pvec);
1397                 cond_resched();
1398         }
1399 }
1400
1401 /*
1402  * Invalidate inode pages in a worker thread.  (This can't be done
1403  * in the message handler context.)
1404  */
1405 static void ceph_invalidate_work(struct work_struct *work)
1406 {
1407         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1408                                                   i_pg_inv_work);
1409         struct inode *inode = &ci->vfs_inode;
1410         u32 orig_gen;
1411         int check = 0;
1412
1413         spin_lock(&inode->i_lock);
1414         dout("invalidate_pages %p gen %d revoking %d\n", inode,
1415              ci->i_rdcache_gen, ci->i_rdcache_revoking);
1416         if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1417                 /* nevermind! */
1418                 spin_unlock(&inode->i_lock);
1419                 goto out;
1420         }
1421         orig_gen = ci->i_rdcache_gen;
1422         spin_unlock(&inode->i_lock);
1423
1424         ceph_invalidate_nondirty_pages(inode->i_mapping);
1425
1426         spin_lock(&inode->i_lock);
1427         if (orig_gen == ci->i_rdcache_gen &&
1428             orig_gen == ci->i_rdcache_revoking) {
1429                 dout("invalidate_pages %p gen %d successful\n", inode,
1430                      ci->i_rdcache_gen);
1431                 ci->i_rdcache_revoking--;
1432                 check = 1;
1433         } else {
1434                 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1435                      inode, orig_gen, ci->i_rdcache_gen,
1436                      ci->i_rdcache_revoking);
1437         }
1438         spin_unlock(&inode->i_lock);
1439
1440         if (check)
1441                 ceph_check_caps(ci, 0, NULL);
1442 out:
1443         iput(inode);
1444 }
1445
1446
1447 /*
1448  * called by trunc_wq; take i_mutex ourselves
1449  *
1450  * We also truncate in a separate thread as well.
1451  */
1452 static void ceph_vmtruncate_work(struct work_struct *work)
1453 {
1454         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1455                                                   i_vmtruncate_work);
1456         struct inode *inode = &ci->vfs_inode;
1457
1458         dout("vmtruncate_work %p\n", inode);
1459         mutex_lock(&inode->i_mutex);
1460         __ceph_do_pending_vmtruncate(inode);
1461         mutex_unlock(&inode->i_mutex);
1462         iput(inode);
1463 }
1464
1465 /*
1466  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1467  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1468  */
1469 void ceph_queue_vmtruncate(struct inode *inode)
1470 {
1471         struct ceph_inode_info *ci = ceph_inode(inode);
1472
1473         if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1474                        &ci->i_vmtruncate_work)) {
1475                 dout("ceph_queue_vmtruncate %p\n", inode);
1476                 igrab(inode);
1477         } else {
1478                 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1479                      inode, ci->i_truncate_pending);
1480         }
1481 }
1482
1483 /*
1484  * called with i_mutex held.
1485  *
1486  * Make sure any pending truncation is applied before doing anything
1487  * that may depend on it.
1488  */
1489 void __ceph_do_pending_vmtruncate(struct inode *inode)
1490 {
1491         struct ceph_inode_info *ci = ceph_inode(inode);
1492         u64 to;
1493         int wrbuffer_refs, wake = 0;
1494
1495 retry:
1496         spin_lock(&inode->i_lock);
1497         if (ci->i_truncate_pending == 0) {
1498                 dout("__do_pending_vmtruncate %p none pending\n", inode);
1499                 spin_unlock(&inode->i_lock);
1500                 return;
1501         }
1502
1503         /*
1504          * make sure any dirty snapped pages are flushed before we
1505          * possibly truncate them.. so write AND block!
1506          */
1507         if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1508                 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1509                      inode);
1510                 spin_unlock(&inode->i_lock);
1511                 filemap_write_and_wait_range(&inode->i_data, 0,
1512                                              inode->i_sb->s_maxbytes);
1513                 goto retry;
1514         }
1515
1516         to = ci->i_truncate_size;
1517         wrbuffer_refs = ci->i_wrbuffer_ref;
1518         dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1519              ci->i_truncate_pending, to);
1520         spin_unlock(&inode->i_lock);
1521
1522         truncate_inode_pages(inode->i_mapping, to);
1523
1524         spin_lock(&inode->i_lock);
1525         ci->i_truncate_pending--;
1526         if (ci->i_truncate_pending == 0)
1527                 wake = 1;
1528         spin_unlock(&inode->i_lock);
1529
1530         if (wrbuffer_refs == 0)
1531                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1532         if (wake)
1533                 wake_up_all(&ci->i_cap_wq);
1534 }
1535
1536
1537 /*
1538  * symlinks
1539  */
1540 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1541 {
1542         struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1543         nd_set_link(nd, ci->i_symlink);
1544         return NULL;
1545 }
1546
1547 static const struct inode_operations ceph_symlink_iops = {
1548         .readlink = generic_readlink,
1549         .follow_link = ceph_sym_follow_link,
1550 };
1551
1552 /*
1553  * setattr
1554  */
1555 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1556 {
1557         struct inode *inode = dentry->d_inode;
1558         struct ceph_inode_info *ci = ceph_inode(inode);
1559         struct inode *parent_inode = dentry->d_parent->d_inode;
1560         const unsigned int ia_valid = attr->ia_valid;
1561         struct ceph_mds_request *req;
1562         struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1563         int issued;
1564         int release = 0, dirtied = 0;
1565         int mask = 0;
1566         int err = 0;
1567
1568         if (ceph_snap(inode) != CEPH_NOSNAP)
1569                 return -EROFS;
1570
1571         __ceph_do_pending_vmtruncate(inode);
1572
1573         err = inode_change_ok(inode, attr);
1574         if (err != 0)
1575                 return err;
1576
1577         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1578                                        USE_AUTH_MDS);
1579         if (IS_ERR(req))
1580                 return PTR_ERR(req);
1581
1582         spin_lock(&inode->i_lock);
1583         issued = __ceph_caps_issued(ci, NULL);
1584         dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1585
1586         if (ia_valid & ATTR_UID) {
1587                 dout("setattr %p uid %d -> %d\n", inode,
1588                      inode->i_uid, attr->ia_uid);
1589                 if (issued & CEPH_CAP_AUTH_EXCL) {
1590                         inode->i_uid = attr->ia_uid;
1591                         dirtied |= CEPH_CAP_AUTH_EXCL;
1592                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1593                            attr->ia_uid != inode->i_uid) {
1594                         req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1595                         mask |= CEPH_SETATTR_UID;
1596                         release |= CEPH_CAP_AUTH_SHARED;
1597                 }
1598         }
1599         if (ia_valid & ATTR_GID) {
1600                 dout("setattr %p gid %d -> %d\n", inode,
1601                      inode->i_gid, attr->ia_gid);
1602                 if (issued & CEPH_CAP_AUTH_EXCL) {
1603                         inode->i_gid = attr->ia_gid;
1604                         dirtied |= CEPH_CAP_AUTH_EXCL;
1605                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1606                            attr->ia_gid != inode->i_gid) {
1607                         req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1608                         mask |= CEPH_SETATTR_GID;
1609                         release |= CEPH_CAP_AUTH_SHARED;
1610                 }
1611         }
1612         if (ia_valid & ATTR_MODE) {
1613                 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1614                      attr->ia_mode);
1615                 if (issued & CEPH_CAP_AUTH_EXCL) {
1616                         inode->i_mode = attr->ia_mode;
1617                         dirtied |= CEPH_CAP_AUTH_EXCL;
1618                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1619                            attr->ia_mode != inode->i_mode) {
1620                         req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1621                         mask |= CEPH_SETATTR_MODE;
1622                         release |= CEPH_CAP_AUTH_SHARED;
1623                 }
1624         }
1625
1626         if (ia_valid & ATTR_ATIME) {
1627                 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1628                      inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1629                      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1630                 if (issued & CEPH_CAP_FILE_EXCL) {
1631                         ci->i_time_warp_seq++;
1632                         inode->i_atime = attr->ia_atime;
1633                         dirtied |= CEPH_CAP_FILE_EXCL;
1634                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1635                            timespec_compare(&inode->i_atime,
1636                                             &attr->ia_atime) < 0) {
1637                         inode->i_atime = attr->ia_atime;
1638                         dirtied |= CEPH_CAP_FILE_WR;
1639                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1640                            !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1641                         ceph_encode_timespec(&req->r_args.setattr.atime,
1642                                              &attr->ia_atime);
1643                         mask |= CEPH_SETATTR_ATIME;
1644                         release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1645                                 CEPH_CAP_FILE_WR;
1646                 }
1647         }
1648         if (ia_valid & ATTR_MTIME) {
1649                 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1650                      inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1651                      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1652                 if (issued & CEPH_CAP_FILE_EXCL) {
1653                         ci->i_time_warp_seq++;
1654                         inode->i_mtime = attr->ia_mtime;
1655                         dirtied |= CEPH_CAP_FILE_EXCL;
1656                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1657                            timespec_compare(&inode->i_mtime,
1658                                             &attr->ia_mtime) < 0) {
1659                         inode->i_mtime = attr->ia_mtime;
1660                         dirtied |= CEPH_CAP_FILE_WR;
1661                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1662                            !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1663                         ceph_encode_timespec(&req->r_args.setattr.mtime,
1664                                              &attr->ia_mtime);
1665                         mask |= CEPH_SETATTR_MTIME;
1666                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1667                                 CEPH_CAP_FILE_WR;
1668                 }
1669         }
1670         if (ia_valid & ATTR_SIZE) {
1671                 dout("setattr %p size %lld -> %lld\n", inode,
1672                      inode->i_size, attr->ia_size);
1673                 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1674                         err = -EINVAL;
1675                         goto out;
1676                 }
1677                 if ((issued & CEPH_CAP_FILE_EXCL) &&
1678                     attr->ia_size > inode->i_size) {
1679                         inode->i_size = attr->ia_size;
1680                         inode->i_blocks =
1681                                 (attr->ia_size + (1 << 9) - 1) >> 9;
1682                         inode->i_ctime = attr->ia_ctime;
1683                         ci->i_reported_size = attr->ia_size;
1684                         dirtied |= CEPH_CAP_FILE_EXCL;
1685                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1686                            attr->ia_size != inode->i_size) {
1687                         req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1688                         req->r_args.setattr.old_size =
1689                                 cpu_to_le64(inode->i_size);
1690                         mask |= CEPH_SETATTR_SIZE;
1691                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1692                                 CEPH_CAP_FILE_WR;
1693                 }
1694         }
1695
1696         /* these do nothing */
1697         if (ia_valid & ATTR_CTIME) {
1698                 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1699                                          ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1700                 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1701                      inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1702                      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1703                      only ? "ctime only" : "ignored");
1704                 inode->i_ctime = attr->ia_ctime;
1705                 if (only) {
1706                         /*
1707                          * if kernel wants to dirty ctime but nothing else,
1708                          * we need to choose a cap to dirty under, or do
1709                          * a almost-no-op setattr
1710                          */
1711                         if (issued & CEPH_CAP_AUTH_EXCL)
1712                                 dirtied |= CEPH_CAP_AUTH_EXCL;
1713                         else if (issued & CEPH_CAP_FILE_EXCL)
1714                                 dirtied |= CEPH_CAP_FILE_EXCL;
1715                         else if (issued & CEPH_CAP_XATTR_EXCL)
1716                                 dirtied |= CEPH_CAP_XATTR_EXCL;
1717                         else
1718                                 mask |= CEPH_SETATTR_CTIME;
1719                 }
1720         }
1721         if (ia_valid & ATTR_FILE)
1722                 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1723
1724         if (dirtied) {
1725                 __ceph_mark_dirty_caps(ci, dirtied);
1726                 inode->i_ctime = CURRENT_TIME;
1727         }
1728
1729         release &= issued;
1730         spin_unlock(&inode->i_lock);
1731
1732         if (mask) {
1733                 req->r_inode = igrab(inode);
1734                 req->r_inode_drop = release;
1735                 req->r_args.setattr.mask = cpu_to_le32(mask);
1736                 req->r_num_caps = 1;
1737                 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1738         }
1739         dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1740              ceph_cap_string(dirtied), mask);
1741
1742         ceph_mdsc_put_request(req);
1743         __ceph_do_pending_vmtruncate(inode);
1744         return err;
1745 out:
1746         spin_unlock(&inode->i_lock);
1747         ceph_mdsc_put_request(req);
1748         return err;
1749 }
1750
1751 /*
1752  * Verify that we have a lease on the given mask.  If not,
1753  * do a getattr against an mds.
1754  */
1755 int ceph_do_getattr(struct inode *inode, int mask)
1756 {
1757         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1758         struct ceph_mds_client *mdsc = fsc->mdsc;
1759         struct ceph_mds_request *req;
1760         int err;
1761
1762         if (ceph_snap(inode) == CEPH_SNAPDIR) {
1763                 dout("do_getattr inode %p SNAPDIR\n", inode);
1764                 return 0;
1765         }
1766
1767         dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1768         if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1769                 return 0;
1770
1771         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1772         if (IS_ERR(req))
1773                 return PTR_ERR(req);
1774         req->r_inode = igrab(inode);
1775         req->r_num_caps = 1;
1776         req->r_args.getattr.mask = cpu_to_le32(mask);
1777         err = ceph_mdsc_do_request(mdsc, NULL, req);
1778         ceph_mdsc_put_request(req);
1779         dout("do_getattr result=%d\n", err);
1780         return err;
1781 }
1782
1783
1784 /*
1785  * Check inode permissions.  We verify we have a valid value for
1786  * the AUTH cap, then call the generic handler.
1787  */
1788 int ceph_permission(struct inode *inode, int mask, unsigned int flags)
1789 {
1790         int err;
1791
1792         if (flags & IPERM_FLAG_RCU)
1793                 return -ECHILD;
1794
1795         err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1796
1797         if (!err)
1798                 err = generic_permission(inode, mask, flags, NULL);
1799         return err;
1800 }
1801
1802 /*
1803  * Get all attributes.  Hopefully somedata we'll have a statlite()
1804  * and can limit the fields we require to be accurate.
1805  */
1806 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1807                  struct kstat *stat)
1808 {
1809         struct inode *inode = dentry->d_inode;
1810         struct ceph_inode_info *ci = ceph_inode(inode);
1811         int err;
1812
1813         err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1814         if (!err) {
1815                 generic_fillattr(inode, stat);
1816                 stat->ino = inode->i_ino;
1817                 if (ceph_snap(inode) != CEPH_NOSNAP)
1818                         stat->dev = ceph_snap(inode);
1819                 else
1820                         stat->dev = 0;
1821                 if (S_ISDIR(inode->i_mode)) {
1822                         stat->size = ci->i_rbytes;
1823                         stat->blocks = 0;
1824                         stat->blksize = 65536;
1825                 }
1826         }
1827         return err;
1828 }