fs: don't use igrab() while holding i_lock
[linux-2.6.git] / fs / ceph / inode.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/pagevec.h>
13
14 #include "super.h"
15 #include "mds_client.h"
16 #include <linux/ceph/decode.h>
17
18 /*
19  * Ceph inode operations
20  *
21  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22  * setattr, etc.), xattr helpers, and helpers for assimilating
23  * metadata returned by the MDS into our cache.
24  *
25  * Also define helpers for doing asynchronous writeback, invalidation,
26  * and truncation for the benefit of those who can't afford to block
27  * (typically because they are in the message handler path).
28  */
29
30 static const struct inode_operations ceph_symlink_iops;
31
32 static void ceph_invalidate_work(struct work_struct *work);
33 static void ceph_writeback_work(struct work_struct *work);
34 static void ceph_vmtruncate_work(struct work_struct *work);
35
36 /*
37  * find or create an inode, given the ceph ino number
38  */
39 static int ceph_set_ino_cb(struct inode *inode, void *data)
40 {
41         ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
42         inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
43         return 0;
44 }
45
46 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
47 {
48         struct inode *inode;
49         ino_t t = ceph_vino_to_ino(vino);
50
51         inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
52         if (inode == NULL)
53                 return ERR_PTR(-ENOMEM);
54         if (inode->i_state & I_NEW) {
55                 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
56                      inode, ceph_vinop(inode), (u64)inode->i_ino);
57                 unlock_new_inode(inode);
58         }
59
60         dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
61              vino.snap, inode);
62         return inode;
63 }
64
65 /*
66  * get/constuct snapdir inode for a given directory
67  */
68 struct inode *ceph_get_snapdir(struct inode *parent)
69 {
70         struct ceph_vino vino = {
71                 .ino = ceph_ino(parent),
72                 .snap = CEPH_SNAPDIR,
73         };
74         struct inode *inode = ceph_get_inode(parent->i_sb, vino);
75         struct ceph_inode_info *ci = ceph_inode(inode);
76
77         BUG_ON(!S_ISDIR(parent->i_mode));
78         if (IS_ERR(inode))
79                 return inode;
80         inode->i_mode = parent->i_mode;
81         inode->i_uid = parent->i_uid;
82         inode->i_gid = parent->i_gid;
83         inode->i_op = &ceph_dir_iops;
84         inode->i_fop = &ceph_dir_fops;
85         ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
86         ci->i_rbytes = 0;
87         return inode;
88 }
89
90 const struct inode_operations ceph_file_iops = {
91         .permission = ceph_permission,
92         .setattr = ceph_setattr,
93         .getattr = ceph_getattr,
94         .setxattr = ceph_setxattr,
95         .getxattr = ceph_getxattr,
96         .listxattr = ceph_listxattr,
97         .removexattr = ceph_removexattr,
98 };
99
100
101 /*
102  * We use a 'frag tree' to keep track of the MDS's directory fragments
103  * for a given inode (usually there is just a single fragment).  We
104  * need to know when a child frag is delegated to a new MDS, or when
105  * it is flagged as replicated, so we can direct our requests
106  * accordingly.
107  */
108
109 /*
110  * find/create a frag in the tree
111  */
112 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
113                                                     u32 f)
114 {
115         struct rb_node **p;
116         struct rb_node *parent = NULL;
117         struct ceph_inode_frag *frag;
118         int c;
119
120         p = &ci->i_fragtree.rb_node;
121         while (*p) {
122                 parent = *p;
123                 frag = rb_entry(parent, struct ceph_inode_frag, node);
124                 c = ceph_frag_compare(f, frag->frag);
125                 if (c < 0)
126                         p = &(*p)->rb_left;
127                 else if (c > 0)
128                         p = &(*p)->rb_right;
129                 else
130                         return frag;
131         }
132
133         frag = kmalloc(sizeof(*frag), GFP_NOFS);
134         if (!frag) {
135                 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
136                        "frag %x\n", &ci->vfs_inode,
137                        ceph_vinop(&ci->vfs_inode), f);
138                 return ERR_PTR(-ENOMEM);
139         }
140         frag->frag = f;
141         frag->split_by = 0;
142         frag->mds = -1;
143         frag->ndist = 0;
144
145         rb_link_node(&frag->node, parent, p);
146         rb_insert_color(&frag->node, &ci->i_fragtree);
147
148         dout("get_or_create_frag added %llx.%llx frag %x\n",
149              ceph_vinop(&ci->vfs_inode), f);
150         return frag;
151 }
152
153 /*
154  * find a specific frag @f
155  */
156 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
157 {
158         struct rb_node *n = ci->i_fragtree.rb_node;
159
160         while (n) {
161                 struct ceph_inode_frag *frag =
162                         rb_entry(n, struct ceph_inode_frag, node);
163                 int c = ceph_frag_compare(f, frag->frag);
164                 if (c < 0)
165                         n = n->rb_left;
166                 else if (c > 0)
167                         n = n->rb_right;
168                 else
169                         return frag;
170         }
171         return NULL;
172 }
173
174 /*
175  * Choose frag containing the given value @v.  If @pfrag is
176  * specified, copy the frag delegation info to the caller if
177  * it is present.
178  */
179 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
180                      struct ceph_inode_frag *pfrag,
181                      int *found)
182 {
183         u32 t = ceph_frag_make(0, 0);
184         struct ceph_inode_frag *frag;
185         unsigned nway, i;
186         u32 n;
187
188         if (found)
189                 *found = 0;
190
191         mutex_lock(&ci->i_fragtree_mutex);
192         while (1) {
193                 WARN_ON(!ceph_frag_contains_value(t, v));
194                 frag = __ceph_find_frag(ci, t);
195                 if (!frag)
196                         break; /* t is a leaf */
197                 if (frag->split_by == 0) {
198                         if (pfrag)
199                                 memcpy(pfrag, frag, sizeof(*pfrag));
200                         if (found)
201                                 *found = 1;
202                         break;
203                 }
204
205                 /* choose child */
206                 nway = 1 << frag->split_by;
207                 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
208                      frag->split_by, nway);
209                 for (i = 0; i < nway; i++) {
210                         n = ceph_frag_make_child(t, frag->split_by, i);
211                         if (ceph_frag_contains_value(n, v)) {
212                                 t = n;
213                                 break;
214                         }
215                 }
216                 BUG_ON(i == nway);
217         }
218         dout("choose_frag(%x) = %x\n", v, t);
219
220         mutex_unlock(&ci->i_fragtree_mutex);
221         return t;
222 }
223
224 /*
225  * Process dirfrag (delegation) info from the mds.  Include leaf
226  * fragment in tree ONLY if ndist > 0.  Otherwise, only
227  * branches/splits are included in i_fragtree)
228  */
229 static int ceph_fill_dirfrag(struct inode *inode,
230                              struct ceph_mds_reply_dirfrag *dirinfo)
231 {
232         struct ceph_inode_info *ci = ceph_inode(inode);
233         struct ceph_inode_frag *frag;
234         u32 id = le32_to_cpu(dirinfo->frag);
235         int mds = le32_to_cpu(dirinfo->auth);
236         int ndist = le32_to_cpu(dirinfo->ndist);
237         int i;
238         int err = 0;
239
240         mutex_lock(&ci->i_fragtree_mutex);
241         if (ndist == 0) {
242                 /* no delegation info needed. */
243                 frag = __ceph_find_frag(ci, id);
244                 if (!frag)
245                         goto out;
246                 if (frag->split_by == 0) {
247                         /* tree leaf, remove */
248                         dout("fill_dirfrag removed %llx.%llx frag %x"
249                              " (no ref)\n", ceph_vinop(inode), id);
250                         rb_erase(&frag->node, &ci->i_fragtree);
251                         kfree(frag);
252                 } else {
253                         /* tree branch, keep and clear */
254                         dout("fill_dirfrag cleared %llx.%llx frag %x"
255                              " referral\n", ceph_vinop(inode), id);
256                         frag->mds = -1;
257                         frag->ndist = 0;
258                 }
259                 goto out;
260         }
261
262
263         /* find/add this frag to store mds delegation info */
264         frag = __get_or_create_frag(ci, id);
265         if (IS_ERR(frag)) {
266                 /* this is not the end of the world; we can continue
267                    with bad/inaccurate delegation info */
268                 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
269                        ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
270                 err = -ENOMEM;
271                 goto out;
272         }
273
274         frag->mds = mds;
275         frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
276         for (i = 0; i < frag->ndist; i++)
277                 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
278         dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
279              ceph_vinop(inode), frag->frag, frag->ndist);
280
281 out:
282         mutex_unlock(&ci->i_fragtree_mutex);
283         return err;
284 }
285
286
287 /*
288  * initialize a newly allocated inode.
289  */
290 struct inode *ceph_alloc_inode(struct super_block *sb)
291 {
292         struct ceph_inode_info *ci;
293         int i;
294
295         ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
296         if (!ci)
297                 return NULL;
298
299         dout("alloc_inode %p\n", &ci->vfs_inode);
300
301         ci->i_version = 0;
302         ci->i_time_warp_seq = 0;
303         ci->i_ceph_flags = 0;
304         ci->i_release_count = 0;
305         ci->i_symlink = NULL;
306
307         memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
308
309         ci->i_fragtree = RB_ROOT;
310         mutex_init(&ci->i_fragtree_mutex);
311
312         ci->i_xattrs.blob = NULL;
313         ci->i_xattrs.prealloc_blob = NULL;
314         ci->i_xattrs.dirty = false;
315         ci->i_xattrs.index = RB_ROOT;
316         ci->i_xattrs.count = 0;
317         ci->i_xattrs.names_size = 0;
318         ci->i_xattrs.vals_size = 0;
319         ci->i_xattrs.version = 0;
320         ci->i_xattrs.index_version = 0;
321
322         ci->i_caps = RB_ROOT;
323         ci->i_auth_cap = NULL;
324         ci->i_dirty_caps = 0;
325         ci->i_flushing_caps = 0;
326         INIT_LIST_HEAD(&ci->i_dirty_item);
327         INIT_LIST_HEAD(&ci->i_flushing_item);
328         ci->i_cap_flush_seq = 0;
329         ci->i_cap_flush_last_tid = 0;
330         memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
331         init_waitqueue_head(&ci->i_cap_wq);
332         ci->i_hold_caps_min = 0;
333         ci->i_hold_caps_max = 0;
334         INIT_LIST_HEAD(&ci->i_cap_delay_list);
335         ci->i_cap_exporting_mds = 0;
336         ci->i_cap_exporting_mseq = 0;
337         ci->i_cap_exporting_issued = 0;
338         INIT_LIST_HEAD(&ci->i_cap_snaps);
339         ci->i_head_snapc = NULL;
340         ci->i_snap_caps = 0;
341
342         for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
343                 ci->i_nr_by_mode[i] = 0;
344
345         ci->i_truncate_seq = 0;
346         ci->i_truncate_size = 0;
347         ci->i_truncate_pending = 0;
348
349         ci->i_max_size = 0;
350         ci->i_reported_size = 0;
351         ci->i_wanted_max_size = 0;
352         ci->i_requested_max_size = 0;
353
354         ci->i_pin_ref = 0;
355         ci->i_rd_ref = 0;
356         ci->i_rdcache_ref = 0;
357         ci->i_wr_ref = 0;
358         ci->i_wrbuffer_ref = 0;
359         ci->i_wrbuffer_ref_head = 0;
360         ci->i_shared_gen = 0;
361         ci->i_rdcache_gen = 0;
362         ci->i_rdcache_revoking = 0;
363
364         INIT_LIST_HEAD(&ci->i_unsafe_writes);
365         INIT_LIST_HEAD(&ci->i_unsafe_dirops);
366         spin_lock_init(&ci->i_unsafe_lock);
367
368         ci->i_snap_realm = NULL;
369         INIT_LIST_HEAD(&ci->i_snap_realm_item);
370         INIT_LIST_HEAD(&ci->i_snap_flush_item);
371
372         INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
373         INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
374
375         INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
376
377         return &ci->vfs_inode;
378 }
379
380 static void ceph_i_callback(struct rcu_head *head)
381 {
382         struct inode *inode = container_of(head, struct inode, i_rcu);
383         struct ceph_inode_info *ci = ceph_inode(inode);
384
385         INIT_LIST_HEAD(&inode->i_dentry);
386         kmem_cache_free(ceph_inode_cachep, ci);
387 }
388
389 void ceph_destroy_inode(struct inode *inode)
390 {
391         struct ceph_inode_info *ci = ceph_inode(inode);
392         struct ceph_inode_frag *frag;
393         struct rb_node *n;
394
395         dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
396
397         ceph_queue_caps_release(inode);
398
399         /*
400          * we may still have a snap_realm reference if there are stray
401          * caps in i_cap_exporting_issued or i_snap_caps.
402          */
403         if (ci->i_snap_realm) {
404                 struct ceph_mds_client *mdsc =
405                         ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
406                 struct ceph_snap_realm *realm = ci->i_snap_realm;
407
408                 dout(" dropping residual ref to snap realm %p\n", realm);
409                 spin_lock(&realm->inodes_with_caps_lock);
410                 list_del_init(&ci->i_snap_realm_item);
411                 spin_unlock(&realm->inodes_with_caps_lock);
412                 ceph_put_snap_realm(mdsc, realm);
413         }
414
415         kfree(ci->i_symlink);
416         while ((n = rb_first(&ci->i_fragtree)) != NULL) {
417                 frag = rb_entry(n, struct ceph_inode_frag, node);
418                 rb_erase(n, &ci->i_fragtree);
419                 kfree(frag);
420         }
421
422         __ceph_destroy_xattrs(ci);
423         if (ci->i_xattrs.blob)
424                 ceph_buffer_put(ci->i_xattrs.blob);
425         if (ci->i_xattrs.prealloc_blob)
426                 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
427
428         call_rcu(&inode->i_rcu, ceph_i_callback);
429 }
430
431
432 /*
433  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
434  * careful because either the client or MDS may have more up to date
435  * info, depending on which capabilities are held, and whether
436  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
437  * and size are monotonically increasing, except when utimes() or
438  * truncate() increments the corresponding _seq values.)
439  */
440 int ceph_fill_file_size(struct inode *inode, int issued,
441                         u32 truncate_seq, u64 truncate_size, u64 size)
442 {
443         struct ceph_inode_info *ci = ceph_inode(inode);
444         int queue_trunc = 0;
445
446         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
447             (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
448                 dout("size %lld -> %llu\n", inode->i_size, size);
449                 inode->i_size = size;
450                 inode->i_blocks = (size + (1<<9) - 1) >> 9;
451                 ci->i_reported_size = size;
452                 if (truncate_seq != ci->i_truncate_seq) {
453                         dout("truncate_seq %u -> %u\n",
454                              ci->i_truncate_seq, truncate_seq);
455                         ci->i_truncate_seq = truncate_seq;
456                         /*
457                          * If we hold relevant caps, or in the case where we're
458                          * not the only client referencing this file and we
459                          * don't hold those caps, then we need to check whether
460                          * the file is either opened or mmaped
461                          */
462                         if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
463                                        CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
464                                        CEPH_CAP_FILE_EXCL|
465                                        CEPH_CAP_FILE_LAZYIO)) ||
466                             mapping_mapped(inode->i_mapping) ||
467                             __ceph_caps_file_wanted(ci)) {
468                                 ci->i_truncate_pending++;
469                                 queue_trunc = 1;
470                         }
471                 }
472         }
473         if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
474             ci->i_truncate_size != truncate_size) {
475                 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
476                      truncate_size);
477                 ci->i_truncate_size = truncate_size;
478         }
479         return queue_trunc;
480 }
481
482 void ceph_fill_file_time(struct inode *inode, int issued,
483                          u64 time_warp_seq, struct timespec *ctime,
484                          struct timespec *mtime, struct timespec *atime)
485 {
486         struct ceph_inode_info *ci = ceph_inode(inode);
487         int warn = 0;
488
489         if (issued & (CEPH_CAP_FILE_EXCL|
490                       CEPH_CAP_FILE_WR|
491                       CEPH_CAP_FILE_BUFFER|
492                       CEPH_CAP_AUTH_EXCL|
493                       CEPH_CAP_XATTR_EXCL)) {
494                 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
495                         dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
496                              inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
497                              ctime->tv_sec, ctime->tv_nsec);
498                         inode->i_ctime = *ctime;
499                 }
500                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
501                         /* the MDS did a utimes() */
502                         dout("mtime %ld.%09ld -> %ld.%09ld "
503                              "tw %d -> %d\n",
504                              inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
505                              mtime->tv_sec, mtime->tv_nsec,
506                              ci->i_time_warp_seq, (int)time_warp_seq);
507
508                         inode->i_mtime = *mtime;
509                         inode->i_atime = *atime;
510                         ci->i_time_warp_seq = time_warp_seq;
511                 } else if (time_warp_seq == ci->i_time_warp_seq) {
512                         /* nobody did utimes(); take the max */
513                         if (timespec_compare(mtime, &inode->i_mtime) > 0) {
514                                 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
515                                      inode->i_mtime.tv_sec,
516                                      inode->i_mtime.tv_nsec,
517                                      mtime->tv_sec, mtime->tv_nsec);
518                                 inode->i_mtime = *mtime;
519                         }
520                         if (timespec_compare(atime, &inode->i_atime) > 0) {
521                                 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
522                                      inode->i_atime.tv_sec,
523                                      inode->i_atime.tv_nsec,
524                                      atime->tv_sec, atime->tv_nsec);
525                                 inode->i_atime = *atime;
526                         }
527                 } else if (issued & CEPH_CAP_FILE_EXCL) {
528                         /* we did a utimes(); ignore mds values */
529                 } else {
530                         warn = 1;
531                 }
532         } else {
533                 /* we have no write|excl caps; whatever the MDS says is true */
534                 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
535                         inode->i_ctime = *ctime;
536                         inode->i_mtime = *mtime;
537                         inode->i_atime = *atime;
538                         ci->i_time_warp_seq = time_warp_seq;
539                 } else {
540                         warn = 1;
541                 }
542         }
543         if (warn) /* time_warp_seq shouldn't go backwards */
544                 dout("%p mds time_warp_seq %llu < %u\n",
545                      inode, time_warp_seq, ci->i_time_warp_seq);
546 }
547
548 /*
549  * Populate an inode based on info from mds.  May be called on new or
550  * existing inodes.
551  */
552 static int fill_inode(struct inode *inode,
553                       struct ceph_mds_reply_info_in *iinfo,
554                       struct ceph_mds_reply_dirfrag *dirinfo,
555                       struct ceph_mds_session *session,
556                       unsigned long ttl_from, int cap_fmode,
557                       struct ceph_cap_reservation *caps_reservation)
558 {
559         struct ceph_mds_reply_inode *info = iinfo->in;
560         struct ceph_inode_info *ci = ceph_inode(inode);
561         int i;
562         int issued, implemented;
563         struct timespec mtime, atime, ctime;
564         u32 nsplits;
565         struct ceph_buffer *xattr_blob = NULL;
566         int err = 0;
567         int queue_trunc = 0;
568
569         dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
570              inode, ceph_vinop(inode), le64_to_cpu(info->version),
571              ci->i_version);
572
573         /*
574          * prealloc xattr data, if it looks like we'll need it.  only
575          * if len > 4 (meaning there are actually xattrs; the first 4
576          * bytes are the xattr count).
577          */
578         if (iinfo->xattr_len > 4) {
579                 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
580                 if (!xattr_blob)
581                         pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
582                                iinfo->xattr_len);
583         }
584
585         spin_lock(&inode->i_lock);
586
587         /*
588          * provided version will be odd if inode value is projected,
589          * even if stable.  skip the update if we have newer stable
590          * info (ours>=theirs, e.g. due to racing mds replies), unless
591          * we are getting projected (unstable) info (in which case the
592          * version is odd, and we want ours>theirs).
593          *   us   them
594          *   2    2     skip
595          *   3    2     skip
596          *   3    3     update
597          */
598         if (le64_to_cpu(info->version) > 0 &&
599             (ci->i_version & ~1) >= le64_to_cpu(info->version))
600                 goto no_change;
601
602         issued = __ceph_caps_issued(ci, &implemented);
603         issued |= implemented | __ceph_caps_dirty(ci);
604
605         /* update inode */
606         ci->i_version = le64_to_cpu(info->version);
607         inode->i_version++;
608         inode->i_rdev = le32_to_cpu(info->rdev);
609
610         if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
611                 inode->i_mode = le32_to_cpu(info->mode);
612                 inode->i_uid = le32_to_cpu(info->uid);
613                 inode->i_gid = le32_to_cpu(info->gid);
614                 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
615                      inode->i_uid, inode->i_gid);
616         }
617
618         if ((issued & CEPH_CAP_LINK_EXCL) == 0)
619                 inode->i_nlink = le32_to_cpu(info->nlink);
620
621         /* be careful with mtime, atime, size */
622         ceph_decode_timespec(&atime, &info->atime);
623         ceph_decode_timespec(&mtime, &info->mtime);
624         ceph_decode_timespec(&ctime, &info->ctime);
625         queue_trunc = ceph_fill_file_size(inode, issued,
626                                           le32_to_cpu(info->truncate_seq),
627                                           le64_to_cpu(info->truncate_size),
628                                           le64_to_cpu(info->size));
629         ceph_fill_file_time(inode, issued,
630                             le32_to_cpu(info->time_warp_seq),
631                             &ctime, &mtime, &atime);
632
633         /* only update max_size on auth cap */
634         if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
635             ci->i_max_size != le64_to_cpu(info->max_size)) {
636                 dout("max_size %lld -> %llu\n", ci->i_max_size,
637                      le64_to_cpu(info->max_size));
638                 ci->i_max_size = le64_to_cpu(info->max_size);
639         }
640
641         ci->i_layout = info->layout;
642         inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
643
644         /* xattrs */
645         /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
646         if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
647             le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
648                 if (ci->i_xattrs.blob)
649                         ceph_buffer_put(ci->i_xattrs.blob);
650                 ci->i_xattrs.blob = xattr_blob;
651                 if (xattr_blob)
652                         memcpy(ci->i_xattrs.blob->vec.iov_base,
653                                iinfo->xattr_data, iinfo->xattr_len);
654                 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
655                 xattr_blob = NULL;
656         }
657
658         inode->i_mapping->a_ops = &ceph_aops;
659         inode->i_mapping->backing_dev_info =
660                 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
661
662         switch (inode->i_mode & S_IFMT) {
663         case S_IFIFO:
664         case S_IFBLK:
665         case S_IFCHR:
666         case S_IFSOCK:
667                 init_special_inode(inode, inode->i_mode, inode->i_rdev);
668                 inode->i_op = &ceph_file_iops;
669                 break;
670         case S_IFREG:
671                 inode->i_op = &ceph_file_iops;
672                 inode->i_fop = &ceph_file_fops;
673                 break;
674         case S_IFLNK:
675                 inode->i_op = &ceph_symlink_iops;
676                 if (!ci->i_symlink) {
677                         int symlen = iinfo->symlink_len;
678                         char *sym;
679
680                         BUG_ON(symlen != inode->i_size);
681                         spin_unlock(&inode->i_lock);
682
683                         err = -ENOMEM;
684                         sym = kmalloc(symlen+1, GFP_NOFS);
685                         if (!sym)
686                                 goto out;
687                         memcpy(sym, iinfo->symlink, symlen);
688                         sym[symlen] = 0;
689
690                         spin_lock(&inode->i_lock);
691                         if (!ci->i_symlink)
692                                 ci->i_symlink = sym;
693                         else
694                                 kfree(sym); /* lost a race */
695                 }
696                 break;
697         case S_IFDIR:
698                 inode->i_op = &ceph_dir_iops;
699                 inode->i_fop = &ceph_dir_fops;
700
701                 ci->i_dir_layout = iinfo->dir_layout;
702
703                 ci->i_files = le64_to_cpu(info->files);
704                 ci->i_subdirs = le64_to_cpu(info->subdirs);
705                 ci->i_rbytes = le64_to_cpu(info->rbytes);
706                 ci->i_rfiles = le64_to_cpu(info->rfiles);
707                 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
708                 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
709
710                 /* set dir completion flag? */
711                 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
712                     ceph_snap(inode) == CEPH_NOSNAP &&
713                     (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
714                     (issued & CEPH_CAP_FILE_EXCL) == 0 &&
715                     (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
716                         dout(" marking %p complete (empty)\n", inode);
717                         /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
718                         ci->i_max_offset = 2;
719                 }
720                 break;
721         default:
722                 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
723                        ceph_vinop(inode), inode->i_mode);
724         }
725
726 no_change:
727         spin_unlock(&inode->i_lock);
728
729         /* queue truncate if we saw i_size decrease */
730         if (queue_trunc)
731                 ceph_queue_vmtruncate(inode);
732
733         /* populate frag tree */
734         /* FIXME: move me up, if/when version reflects fragtree changes */
735         nsplits = le32_to_cpu(info->fragtree.nsplits);
736         mutex_lock(&ci->i_fragtree_mutex);
737         for (i = 0; i < nsplits; i++) {
738                 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
739                 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
740
741                 if (IS_ERR(frag))
742                         continue;
743                 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
744                 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
745         }
746         mutex_unlock(&ci->i_fragtree_mutex);
747
748         /* were we issued a capability? */
749         if (info->cap.caps) {
750                 if (ceph_snap(inode) == CEPH_NOSNAP) {
751                         ceph_add_cap(inode, session,
752                                      le64_to_cpu(info->cap.cap_id),
753                                      cap_fmode,
754                                      le32_to_cpu(info->cap.caps),
755                                      le32_to_cpu(info->cap.wanted),
756                                      le32_to_cpu(info->cap.seq),
757                                      le32_to_cpu(info->cap.mseq),
758                                      le64_to_cpu(info->cap.realm),
759                                      info->cap.flags,
760                                      caps_reservation);
761                 } else {
762                         spin_lock(&inode->i_lock);
763                         dout(" %p got snap_caps %s\n", inode,
764                              ceph_cap_string(le32_to_cpu(info->cap.caps)));
765                         ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
766                         if (cap_fmode >= 0)
767                                 __ceph_get_fmode(ci, cap_fmode);
768                         spin_unlock(&inode->i_lock);
769                 }
770         } else if (cap_fmode >= 0) {
771                 pr_warning("mds issued no caps on %llx.%llx\n",
772                            ceph_vinop(inode));
773                 __ceph_get_fmode(ci, cap_fmode);
774         }
775
776         /* update delegation info? */
777         if (dirinfo)
778                 ceph_fill_dirfrag(inode, dirinfo);
779
780         err = 0;
781
782 out:
783         if (xattr_blob)
784                 ceph_buffer_put(xattr_blob);
785         return err;
786 }
787
788 /*
789  * caller should hold session s_mutex.
790  */
791 static void update_dentry_lease(struct dentry *dentry,
792                                 struct ceph_mds_reply_lease *lease,
793                                 struct ceph_mds_session *session,
794                                 unsigned long from_time)
795 {
796         struct ceph_dentry_info *di = ceph_dentry(dentry);
797         long unsigned duration = le32_to_cpu(lease->duration_ms);
798         long unsigned ttl = from_time + (duration * HZ) / 1000;
799         long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
800         struct inode *dir;
801
802         /* only track leases on regular dentries */
803         if (dentry->d_op != &ceph_dentry_ops)
804                 return;
805
806         spin_lock(&dentry->d_lock);
807         dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
808              dentry, le16_to_cpu(lease->mask), duration, ttl);
809
810         /* make lease_rdcache_gen match directory */
811         dir = dentry->d_parent->d_inode;
812         di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
813
814         if (lease->mask == 0)
815                 goto out_unlock;
816
817         if (di->lease_gen == session->s_cap_gen &&
818             time_before(ttl, dentry->d_time))
819                 goto out_unlock;  /* we already have a newer lease. */
820
821         if (di->lease_session && di->lease_session != session)
822                 goto out_unlock;
823
824         ceph_dentry_lru_touch(dentry);
825
826         if (!di->lease_session)
827                 di->lease_session = ceph_get_mds_session(session);
828         di->lease_gen = session->s_cap_gen;
829         di->lease_seq = le32_to_cpu(lease->seq);
830         di->lease_renew_after = half_ttl;
831         di->lease_renew_from = 0;
832         dentry->d_time = ttl;
833 out_unlock:
834         spin_unlock(&dentry->d_lock);
835         return;
836 }
837
838 /*
839  * Set dentry's directory position based on the current dir's max, and
840  * order it in d_subdirs, so that dcache_readdir behaves.
841  */
842 static void ceph_set_dentry_offset(struct dentry *dn)
843 {
844         struct dentry *dir = dn->d_parent;
845         struct inode *inode = dn->d_parent->d_inode;
846         struct ceph_dentry_info *di;
847
848         BUG_ON(!inode);
849
850         di = ceph_dentry(dn);
851
852         spin_lock(&inode->i_lock);
853         if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
854                 spin_unlock(&inode->i_lock);
855                 return;
856         }
857         di->offset = ceph_inode(inode)->i_max_offset++;
858         spin_unlock(&inode->i_lock);
859
860         spin_lock(&dir->d_lock);
861         spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
862         list_move(&dn->d_u.d_child, &dir->d_subdirs);
863         dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
864              dn->d_u.d_child.prev, dn->d_u.d_child.next);
865         spin_unlock(&dn->d_lock);
866         spin_unlock(&dir->d_lock);
867 }
868
869 /*
870  * splice a dentry to an inode.
871  * caller must hold directory i_mutex for this to be safe.
872  *
873  * we will only rehash the resulting dentry if @prehash is
874  * true; @prehash will be set to false (for the benefit of
875  * the caller) if we fail.
876  */
877 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
878                                     bool *prehash, bool set_offset)
879 {
880         struct dentry *realdn;
881
882         BUG_ON(dn->d_inode);
883
884         /* dn must be unhashed */
885         if (!d_unhashed(dn))
886                 d_drop(dn);
887         realdn = d_materialise_unique(dn, in);
888         if (IS_ERR(realdn)) {
889                 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
890                        PTR_ERR(realdn), dn, in, ceph_vinop(in));
891                 if (prehash)
892                         *prehash = false; /* don't rehash on error */
893                 dn = realdn; /* note realdn contains the error */
894                 goto out;
895         } else if (realdn) {
896                 dout("dn %p (%d) spliced with %p (%d) "
897                      "inode %p ino %llx.%llx\n",
898                      dn, dn->d_count,
899                      realdn, realdn->d_count,
900                      realdn->d_inode, ceph_vinop(realdn->d_inode));
901                 dput(dn);
902                 dn = realdn;
903         } else {
904                 BUG_ON(!ceph_dentry(dn));
905                 dout("dn %p attached to %p ino %llx.%llx\n",
906                      dn, dn->d_inode, ceph_vinop(dn->d_inode));
907         }
908         if ((!prehash || *prehash) && d_unhashed(dn))
909                 d_rehash(dn);
910         if (set_offset)
911                 ceph_set_dentry_offset(dn);
912 out:
913         return dn;
914 }
915
916 /*
917  * Incorporate results into the local cache.  This is either just
918  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
919  * after a lookup).
920  *
921  * A reply may contain
922  *         a directory inode along with a dentry.
923  *  and/or a target inode
924  *
925  * Called with snap_rwsem (read).
926  */
927 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
928                     struct ceph_mds_session *session)
929 {
930         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
931         struct inode *in = NULL;
932         struct ceph_mds_reply_inode *ininfo;
933         struct ceph_vino vino;
934         struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
935         int i = 0;
936         int err = 0;
937
938         dout("fill_trace %p is_dentry %d is_target %d\n", req,
939              rinfo->head->is_dentry, rinfo->head->is_target);
940
941 #if 0
942         /*
943          * Debugging hook:
944          *
945          * If we resend completed ops to a recovering mds, we get no
946          * trace.  Since that is very rare, pretend this is the case
947          * to ensure the 'no trace' handlers in the callers behave.
948          *
949          * Fill in inodes unconditionally to avoid breaking cap
950          * invariants.
951          */
952         if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
953                 pr_info("fill_trace faking empty trace on %lld %s\n",
954                         req->r_tid, ceph_mds_op_name(rinfo->head->op));
955                 if (rinfo->head->is_dentry) {
956                         rinfo->head->is_dentry = 0;
957                         err = fill_inode(req->r_locked_dir,
958                                          &rinfo->diri, rinfo->dirfrag,
959                                          session, req->r_request_started, -1);
960                 }
961                 if (rinfo->head->is_target) {
962                         rinfo->head->is_target = 0;
963                         ininfo = rinfo->targeti.in;
964                         vino.ino = le64_to_cpu(ininfo->ino);
965                         vino.snap = le64_to_cpu(ininfo->snapid);
966                         in = ceph_get_inode(sb, vino);
967                         err = fill_inode(in, &rinfo->targeti, NULL,
968                                          session, req->r_request_started,
969                                          req->r_fmode);
970                         iput(in);
971                 }
972         }
973 #endif
974
975         if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
976                 dout("fill_trace reply is empty!\n");
977                 if (rinfo->head->result == 0 && req->r_locked_dir)
978                         ceph_invalidate_dir_request(req);
979                 return 0;
980         }
981
982         if (rinfo->head->is_dentry) {
983                 struct inode *dir = req->r_locked_dir;
984
985                 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
986                                  session, req->r_request_started, -1,
987                                  &req->r_caps_reservation);
988                 if (err < 0)
989                         return err;
990         }
991
992         /*
993          * ignore null lease/binding on snapdir ENOENT, or else we
994          * will have trouble splicing in the virtual snapdir later
995          */
996         if (rinfo->head->is_dentry && !req->r_aborted &&
997             (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
998                                                fsc->mount_options->snapdir_name,
999                                                req->r_dentry->d_name.len))) {
1000                 /*
1001                  * lookup link rename   : null -> possibly existing inode
1002                  * mknod symlink mkdir  : null -> new inode
1003                  * unlink               : linked -> null
1004                  */
1005                 struct inode *dir = req->r_locked_dir;
1006                 struct dentry *dn = req->r_dentry;
1007                 bool have_dir_cap, have_lease;
1008
1009                 BUG_ON(!dn);
1010                 BUG_ON(!dir);
1011                 BUG_ON(dn->d_parent->d_inode != dir);
1012                 BUG_ON(ceph_ino(dir) !=
1013                        le64_to_cpu(rinfo->diri.in->ino));
1014                 BUG_ON(ceph_snap(dir) !=
1015                        le64_to_cpu(rinfo->diri.in->snapid));
1016
1017                 /* do we have a lease on the whole dir? */
1018                 have_dir_cap =
1019                         (le32_to_cpu(rinfo->diri.in->cap.caps) &
1020                          CEPH_CAP_FILE_SHARED);
1021
1022                 /* do we have a dn lease? */
1023                 have_lease = have_dir_cap ||
1024                         (le16_to_cpu(rinfo->dlease->mask) &
1025                          CEPH_LOCK_DN);
1026
1027                 if (!have_lease)
1028                         dout("fill_trace  no dentry lease or dir cap\n");
1029
1030                 /* rename? */
1031                 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1032                         dout(" src %p '%.*s' dst %p '%.*s'\n",
1033                              req->r_old_dentry,
1034                              req->r_old_dentry->d_name.len,
1035                              req->r_old_dentry->d_name.name,
1036                              dn, dn->d_name.len, dn->d_name.name);
1037                         dout("fill_trace doing d_move %p -> %p\n",
1038                              req->r_old_dentry, dn);
1039
1040                         d_move(req->r_old_dentry, dn);
1041                         dout(" src %p '%.*s' dst %p '%.*s'\n",
1042                              req->r_old_dentry,
1043                              req->r_old_dentry->d_name.len,
1044                              req->r_old_dentry->d_name.name,
1045                              dn, dn->d_name.len, dn->d_name.name);
1046
1047                         /* ensure target dentry is invalidated, despite
1048                            rehashing bug in vfs_rename_dir */
1049                         ceph_invalidate_dentry_lease(dn);
1050
1051                         /*
1052                          * d_move() puts the renamed dentry at the end of
1053                          * d_subdirs.  We need to assign it an appropriate
1054                          * directory offset so we can behave when holding
1055                          * I_COMPLETE.
1056                          */
1057                         ceph_set_dentry_offset(req->r_old_dentry);
1058                         dout("dn %p gets new offset %lld\n", req->r_old_dentry, 
1059                              ceph_dentry(req->r_old_dentry)->offset);
1060
1061                         dn = req->r_old_dentry;  /* use old_dentry */
1062                         in = dn->d_inode;
1063                 }
1064
1065                 /* null dentry? */
1066                 if (!rinfo->head->is_target) {
1067                         dout("fill_trace null dentry\n");
1068                         if (dn->d_inode) {
1069                                 dout("d_delete %p\n", dn);
1070                                 d_delete(dn);
1071                         } else {
1072                                 dout("d_instantiate %p NULL\n", dn);
1073                                 d_instantiate(dn, NULL);
1074                                 if (have_lease && d_unhashed(dn))
1075                                         d_rehash(dn);
1076                                 update_dentry_lease(dn, rinfo->dlease,
1077                                                     session,
1078                                                     req->r_request_started);
1079                         }
1080                         goto done;
1081                 }
1082
1083                 /* attach proper inode */
1084                 ininfo = rinfo->targeti.in;
1085                 vino.ino = le64_to_cpu(ininfo->ino);
1086                 vino.snap = le64_to_cpu(ininfo->snapid);
1087                 in = dn->d_inode;
1088                 if (!in) {
1089                         in = ceph_get_inode(sb, vino);
1090                         if (IS_ERR(in)) {
1091                                 pr_err("fill_trace bad get_inode "
1092                                        "%llx.%llx\n", vino.ino, vino.snap);
1093                                 err = PTR_ERR(in);
1094                                 d_delete(dn);
1095                                 goto done;
1096                         }
1097                         dn = splice_dentry(dn, in, &have_lease, true);
1098                         if (IS_ERR(dn)) {
1099                                 err = PTR_ERR(dn);
1100                                 goto done;
1101                         }
1102                         req->r_dentry = dn;  /* may have spliced */
1103                         igrab(in);
1104                 } else if (ceph_ino(in) == vino.ino &&
1105                            ceph_snap(in) == vino.snap) {
1106                         igrab(in);
1107                 } else {
1108                         dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1109                              dn, in, ceph_ino(in), ceph_snap(in),
1110                              vino.ino, vino.snap);
1111                         have_lease = false;
1112                         in = NULL;
1113                 }
1114
1115                 if (have_lease)
1116                         update_dentry_lease(dn, rinfo->dlease, session,
1117                                             req->r_request_started);
1118                 dout(" final dn %p\n", dn);
1119                 i++;
1120         } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1121                    req->r_op == CEPH_MDS_OP_MKSNAP) {
1122                 struct dentry *dn = req->r_dentry;
1123
1124                 /* fill out a snapdir LOOKUPSNAP dentry */
1125                 BUG_ON(!dn);
1126                 BUG_ON(!req->r_locked_dir);
1127                 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1128                 ininfo = rinfo->targeti.in;
1129                 vino.ino = le64_to_cpu(ininfo->ino);
1130                 vino.snap = le64_to_cpu(ininfo->snapid);
1131                 in = ceph_get_inode(sb, vino);
1132                 if (IS_ERR(in)) {
1133                         pr_err("fill_inode get_inode badness %llx.%llx\n",
1134                                vino.ino, vino.snap);
1135                         err = PTR_ERR(in);
1136                         d_delete(dn);
1137                         goto done;
1138                 }
1139                 dout(" linking snapped dir %p to dn %p\n", in, dn);
1140                 dn = splice_dentry(dn, in, NULL, true);
1141                 if (IS_ERR(dn)) {
1142                         err = PTR_ERR(dn);
1143                         goto done;
1144                 }
1145                 req->r_dentry = dn;  /* may have spliced */
1146                 igrab(in);
1147                 rinfo->head->is_dentry = 1;  /* fool notrace handlers */
1148         }
1149
1150         if (rinfo->head->is_target) {
1151                 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1152                 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1153
1154                 if (in == NULL || ceph_ino(in) != vino.ino ||
1155                     ceph_snap(in) != vino.snap) {
1156                         in = ceph_get_inode(sb, vino);
1157                         if (IS_ERR(in)) {
1158                                 err = PTR_ERR(in);
1159                                 goto done;
1160                         }
1161                 }
1162                 req->r_target_inode = in;
1163
1164                 err = fill_inode(in,
1165                                  &rinfo->targeti, NULL,
1166                                  session, req->r_request_started,
1167                                  (le32_to_cpu(rinfo->head->result) == 0) ?
1168                                  req->r_fmode : -1,
1169                                  &req->r_caps_reservation);
1170                 if (err < 0) {
1171                         pr_err("fill_inode badness %p %llx.%llx\n",
1172                                in, ceph_vinop(in));
1173                         goto done;
1174                 }
1175         }
1176
1177 done:
1178         dout("fill_trace done err=%d\n", err);
1179         return err;
1180 }
1181
1182 /*
1183  * Prepopulate our cache with readdir results, leases, etc.
1184  */
1185 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1186                              struct ceph_mds_session *session)
1187 {
1188         struct dentry *parent = req->r_dentry;
1189         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1190         struct qstr dname;
1191         struct dentry *dn;
1192         struct inode *in;
1193         int err = 0, i;
1194         struct inode *snapdir = NULL;
1195         struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1196         u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1197         struct ceph_dentry_info *di;
1198
1199         if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1200                 snapdir = ceph_get_snapdir(parent->d_inode);
1201                 parent = d_find_alias(snapdir);
1202                 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1203                      rinfo->dir_nr, parent);
1204         } else {
1205                 dout("readdir_prepopulate %d items under dn %p\n",
1206                      rinfo->dir_nr, parent);
1207                 if (rinfo->dir_dir)
1208                         ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1209         }
1210
1211         for (i = 0; i < rinfo->dir_nr; i++) {
1212                 struct ceph_vino vino;
1213
1214                 dname.name = rinfo->dir_dname[i];
1215                 dname.len = rinfo->dir_dname_len[i];
1216                 dname.hash = full_name_hash(dname.name, dname.len);
1217
1218                 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1219                 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1220
1221 retry_lookup:
1222                 dn = d_lookup(parent, &dname);
1223                 dout("d_lookup on parent=%p name=%.*s got %p\n",
1224                      parent, dname.len, dname.name, dn);
1225
1226                 if (!dn) {
1227                         dn = d_alloc(parent, &dname);
1228                         dout("d_alloc %p '%.*s' = %p\n", parent,
1229                              dname.len, dname.name, dn);
1230                         if (dn == NULL) {
1231                                 dout("d_alloc badness\n");
1232                                 err = -ENOMEM;
1233                                 goto out;
1234                         }
1235                         err = ceph_init_dentry(dn);
1236                         if (err < 0) {
1237                                 dput(dn);
1238                                 goto out;
1239                         }
1240                 } else if (dn->d_inode &&
1241                            (ceph_ino(dn->d_inode) != vino.ino ||
1242                             ceph_snap(dn->d_inode) != vino.snap)) {
1243                         dout(" dn %p points to wrong inode %p\n",
1244                              dn, dn->d_inode);
1245                         d_delete(dn);
1246                         dput(dn);
1247                         goto retry_lookup;
1248                 } else {
1249                         /* reorder parent's d_subdirs */
1250                         spin_lock(&parent->d_lock);
1251                         spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1252                         list_move(&dn->d_u.d_child, &parent->d_subdirs);
1253                         spin_unlock(&dn->d_lock);
1254                         spin_unlock(&parent->d_lock);
1255                 }
1256
1257                 di = dn->d_fsdata;
1258                 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1259
1260                 /* inode */
1261                 if (dn->d_inode) {
1262                         in = dn->d_inode;
1263                 } else {
1264                         in = ceph_get_inode(parent->d_sb, vino);
1265                         if (IS_ERR(in)) {
1266                                 dout("new_inode badness\n");
1267                                 d_delete(dn);
1268                                 dput(dn);
1269                                 err = PTR_ERR(in);
1270                                 goto out;
1271                         }
1272                         dn = splice_dentry(dn, in, NULL, false);
1273                         if (IS_ERR(dn))
1274                                 dn = NULL;
1275                 }
1276
1277                 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1278                                req->r_request_started, -1,
1279                                &req->r_caps_reservation) < 0) {
1280                         pr_err("fill_inode badness on %p\n", in);
1281                         goto next_item;
1282                 }
1283                 if (dn)
1284                         update_dentry_lease(dn, rinfo->dir_dlease[i],
1285                                             req->r_session,
1286                                             req->r_request_started);
1287 next_item:
1288                 if (dn)
1289                         dput(dn);
1290         }
1291         req->r_did_prepopulate = true;
1292
1293 out:
1294         if (snapdir) {
1295                 iput(snapdir);
1296                 dput(parent);
1297         }
1298         dout("readdir_prepopulate done\n");
1299         return err;
1300 }
1301
1302 int ceph_inode_set_size(struct inode *inode, loff_t size)
1303 {
1304         struct ceph_inode_info *ci = ceph_inode(inode);
1305         int ret = 0;
1306
1307         spin_lock(&inode->i_lock);
1308         dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1309         inode->i_size = size;
1310         inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1311
1312         /* tell the MDS if we are approaching max_size */
1313         if ((size << 1) >= ci->i_max_size &&
1314             (ci->i_reported_size << 1) < ci->i_max_size)
1315                 ret = 1;
1316
1317         spin_unlock(&inode->i_lock);
1318         return ret;
1319 }
1320
1321 /*
1322  * Write back inode data in a worker thread.  (This can't be done
1323  * in the message handler context.)
1324  */
1325 void ceph_queue_writeback(struct inode *inode)
1326 {
1327         if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1328                        &ceph_inode(inode)->i_wb_work)) {
1329                 dout("ceph_queue_writeback %p\n", inode);
1330                 igrab(inode);
1331         } else {
1332                 dout("ceph_queue_writeback %p failed\n", inode);
1333         }
1334 }
1335
1336 static void ceph_writeback_work(struct work_struct *work)
1337 {
1338         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1339                                                   i_wb_work);
1340         struct inode *inode = &ci->vfs_inode;
1341
1342         dout("writeback %p\n", inode);
1343         filemap_fdatawrite(&inode->i_data);
1344         iput(inode);
1345 }
1346
1347 /*
1348  * queue an async invalidation
1349  */
1350 void ceph_queue_invalidate(struct inode *inode)
1351 {
1352         if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1353                        &ceph_inode(inode)->i_pg_inv_work)) {
1354                 dout("ceph_queue_invalidate %p\n", inode);
1355                 igrab(inode);
1356         } else {
1357                 dout("ceph_queue_invalidate %p failed\n", inode);
1358         }
1359 }
1360
1361 /*
1362  * invalidate any pages that are not dirty or under writeback.  this
1363  * includes pages that are clean and mapped.
1364  */
1365 static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1366 {
1367         struct pagevec pvec;
1368         pgoff_t next = 0;
1369         int i;
1370
1371         pagevec_init(&pvec, 0);
1372         while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1373                 for (i = 0; i < pagevec_count(&pvec); i++) {
1374                         struct page *page = pvec.pages[i];
1375                         pgoff_t index;
1376                         int skip_page =
1377                                 (PageDirty(page) || PageWriteback(page));
1378
1379                         if (!skip_page)
1380                                 skip_page = !trylock_page(page);
1381
1382                         /*
1383                          * We really shouldn't be looking at the ->index of an
1384                          * unlocked page.  But we're not allowed to lock these
1385                          * pages.  So we rely upon nobody altering the ->index
1386                          * of this (pinned-by-us) page.
1387                          */
1388                         index = page->index;
1389                         if (index > next)
1390                                 next = index;
1391                         next++;
1392
1393                         if (skip_page)
1394                                 continue;
1395
1396                         generic_error_remove_page(mapping, page);
1397                         unlock_page(page);
1398                 }
1399                 pagevec_release(&pvec);
1400                 cond_resched();
1401         }
1402 }
1403
1404 /*
1405  * Invalidate inode pages in a worker thread.  (This can't be done
1406  * in the message handler context.)
1407  */
1408 static void ceph_invalidate_work(struct work_struct *work)
1409 {
1410         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1411                                                   i_pg_inv_work);
1412         struct inode *inode = &ci->vfs_inode;
1413         u32 orig_gen;
1414         int check = 0;
1415
1416         spin_lock(&inode->i_lock);
1417         dout("invalidate_pages %p gen %d revoking %d\n", inode,
1418              ci->i_rdcache_gen, ci->i_rdcache_revoking);
1419         if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1420                 /* nevermind! */
1421                 spin_unlock(&inode->i_lock);
1422                 goto out;
1423         }
1424         orig_gen = ci->i_rdcache_gen;
1425         spin_unlock(&inode->i_lock);
1426
1427         ceph_invalidate_nondirty_pages(inode->i_mapping);
1428
1429         spin_lock(&inode->i_lock);
1430         if (orig_gen == ci->i_rdcache_gen &&
1431             orig_gen == ci->i_rdcache_revoking) {
1432                 dout("invalidate_pages %p gen %d successful\n", inode,
1433                      ci->i_rdcache_gen);
1434                 ci->i_rdcache_revoking--;
1435                 check = 1;
1436         } else {
1437                 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1438                      inode, orig_gen, ci->i_rdcache_gen,
1439                      ci->i_rdcache_revoking);
1440         }
1441         spin_unlock(&inode->i_lock);
1442
1443         if (check)
1444                 ceph_check_caps(ci, 0, NULL);
1445 out:
1446         iput(inode);
1447 }
1448
1449
1450 /*
1451  * called by trunc_wq; take i_mutex ourselves
1452  *
1453  * We also truncate in a separate thread as well.
1454  */
1455 static void ceph_vmtruncate_work(struct work_struct *work)
1456 {
1457         struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1458                                                   i_vmtruncate_work);
1459         struct inode *inode = &ci->vfs_inode;
1460
1461         dout("vmtruncate_work %p\n", inode);
1462         mutex_lock(&inode->i_mutex);
1463         __ceph_do_pending_vmtruncate(inode);
1464         mutex_unlock(&inode->i_mutex);
1465         iput(inode);
1466 }
1467
1468 /*
1469  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1470  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1471  */
1472 void ceph_queue_vmtruncate(struct inode *inode)
1473 {
1474         struct ceph_inode_info *ci = ceph_inode(inode);
1475
1476         if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1477                        &ci->i_vmtruncate_work)) {
1478                 dout("ceph_queue_vmtruncate %p\n", inode);
1479                 igrab(inode);
1480         } else {
1481                 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1482                      inode, ci->i_truncate_pending);
1483         }
1484 }
1485
1486 /*
1487  * called with i_mutex held.
1488  *
1489  * Make sure any pending truncation is applied before doing anything
1490  * that may depend on it.
1491  */
1492 void __ceph_do_pending_vmtruncate(struct inode *inode)
1493 {
1494         struct ceph_inode_info *ci = ceph_inode(inode);
1495         u64 to;
1496         int wrbuffer_refs, wake = 0;
1497
1498 retry:
1499         spin_lock(&inode->i_lock);
1500         if (ci->i_truncate_pending == 0) {
1501                 dout("__do_pending_vmtruncate %p none pending\n", inode);
1502                 spin_unlock(&inode->i_lock);
1503                 return;
1504         }
1505
1506         /*
1507          * make sure any dirty snapped pages are flushed before we
1508          * possibly truncate them.. so write AND block!
1509          */
1510         if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1511                 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1512                      inode);
1513                 spin_unlock(&inode->i_lock);
1514                 filemap_write_and_wait_range(&inode->i_data, 0,
1515                                              inode->i_sb->s_maxbytes);
1516                 goto retry;
1517         }
1518
1519         to = ci->i_truncate_size;
1520         wrbuffer_refs = ci->i_wrbuffer_ref;
1521         dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1522              ci->i_truncate_pending, to);
1523         spin_unlock(&inode->i_lock);
1524
1525         truncate_inode_pages(inode->i_mapping, to);
1526
1527         spin_lock(&inode->i_lock);
1528         ci->i_truncate_pending--;
1529         if (ci->i_truncate_pending == 0)
1530                 wake = 1;
1531         spin_unlock(&inode->i_lock);
1532
1533         if (wrbuffer_refs == 0)
1534                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1535         if (wake)
1536                 wake_up_all(&ci->i_cap_wq);
1537 }
1538
1539
1540 /*
1541  * symlinks
1542  */
1543 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1544 {
1545         struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1546         nd_set_link(nd, ci->i_symlink);
1547         return NULL;
1548 }
1549
1550 static const struct inode_operations ceph_symlink_iops = {
1551         .readlink = generic_readlink,
1552         .follow_link = ceph_sym_follow_link,
1553 };
1554
1555 /*
1556  * setattr
1557  */
1558 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1559 {
1560         struct inode *inode = dentry->d_inode;
1561         struct ceph_inode_info *ci = ceph_inode(inode);
1562         struct inode *parent_inode = dentry->d_parent->d_inode;
1563         const unsigned int ia_valid = attr->ia_valid;
1564         struct ceph_mds_request *req;
1565         struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1566         int issued;
1567         int release = 0, dirtied = 0;
1568         int mask = 0;
1569         int err = 0;
1570
1571         if (ceph_snap(inode) != CEPH_NOSNAP)
1572                 return -EROFS;
1573
1574         __ceph_do_pending_vmtruncate(inode);
1575
1576         err = inode_change_ok(inode, attr);
1577         if (err != 0)
1578                 return err;
1579
1580         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1581                                        USE_AUTH_MDS);
1582         if (IS_ERR(req))
1583                 return PTR_ERR(req);
1584
1585         spin_lock(&inode->i_lock);
1586         issued = __ceph_caps_issued(ci, NULL);
1587         dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1588
1589         if (ia_valid & ATTR_UID) {
1590                 dout("setattr %p uid %d -> %d\n", inode,
1591                      inode->i_uid, attr->ia_uid);
1592                 if (issued & CEPH_CAP_AUTH_EXCL) {
1593                         inode->i_uid = attr->ia_uid;
1594                         dirtied |= CEPH_CAP_AUTH_EXCL;
1595                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1596                            attr->ia_uid != inode->i_uid) {
1597                         req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1598                         mask |= CEPH_SETATTR_UID;
1599                         release |= CEPH_CAP_AUTH_SHARED;
1600                 }
1601         }
1602         if (ia_valid & ATTR_GID) {
1603                 dout("setattr %p gid %d -> %d\n", inode,
1604                      inode->i_gid, attr->ia_gid);
1605                 if (issued & CEPH_CAP_AUTH_EXCL) {
1606                         inode->i_gid = attr->ia_gid;
1607                         dirtied |= CEPH_CAP_AUTH_EXCL;
1608                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1609                            attr->ia_gid != inode->i_gid) {
1610                         req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1611                         mask |= CEPH_SETATTR_GID;
1612                         release |= CEPH_CAP_AUTH_SHARED;
1613                 }
1614         }
1615         if (ia_valid & ATTR_MODE) {
1616                 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1617                      attr->ia_mode);
1618                 if (issued & CEPH_CAP_AUTH_EXCL) {
1619                         inode->i_mode = attr->ia_mode;
1620                         dirtied |= CEPH_CAP_AUTH_EXCL;
1621                 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1622                            attr->ia_mode != inode->i_mode) {
1623                         req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1624                         mask |= CEPH_SETATTR_MODE;
1625                         release |= CEPH_CAP_AUTH_SHARED;
1626                 }
1627         }
1628
1629         if (ia_valid & ATTR_ATIME) {
1630                 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1631                      inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1632                      attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1633                 if (issued & CEPH_CAP_FILE_EXCL) {
1634                         ci->i_time_warp_seq++;
1635                         inode->i_atime = attr->ia_atime;
1636                         dirtied |= CEPH_CAP_FILE_EXCL;
1637                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1638                            timespec_compare(&inode->i_atime,
1639                                             &attr->ia_atime) < 0) {
1640                         inode->i_atime = attr->ia_atime;
1641                         dirtied |= CEPH_CAP_FILE_WR;
1642                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1643                            !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1644                         ceph_encode_timespec(&req->r_args.setattr.atime,
1645                                              &attr->ia_atime);
1646                         mask |= CEPH_SETATTR_ATIME;
1647                         release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1648                                 CEPH_CAP_FILE_WR;
1649                 }
1650         }
1651         if (ia_valid & ATTR_MTIME) {
1652                 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1653                      inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1654                      attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1655                 if (issued & CEPH_CAP_FILE_EXCL) {
1656                         ci->i_time_warp_seq++;
1657                         inode->i_mtime = attr->ia_mtime;
1658                         dirtied |= CEPH_CAP_FILE_EXCL;
1659                 } else if ((issued & CEPH_CAP_FILE_WR) &&
1660                            timespec_compare(&inode->i_mtime,
1661                                             &attr->ia_mtime) < 0) {
1662                         inode->i_mtime = attr->ia_mtime;
1663                         dirtied |= CEPH_CAP_FILE_WR;
1664                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1665                            !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1666                         ceph_encode_timespec(&req->r_args.setattr.mtime,
1667                                              &attr->ia_mtime);
1668                         mask |= CEPH_SETATTR_MTIME;
1669                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1670                                 CEPH_CAP_FILE_WR;
1671                 }
1672         }
1673         if (ia_valid & ATTR_SIZE) {
1674                 dout("setattr %p size %lld -> %lld\n", inode,
1675                      inode->i_size, attr->ia_size);
1676                 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1677                         err = -EINVAL;
1678                         goto out;
1679                 }
1680                 if ((issued & CEPH_CAP_FILE_EXCL) &&
1681                     attr->ia_size > inode->i_size) {
1682                         inode->i_size = attr->ia_size;
1683                         inode->i_blocks =
1684                                 (attr->ia_size + (1 << 9) - 1) >> 9;
1685                         inode->i_ctime = attr->ia_ctime;
1686                         ci->i_reported_size = attr->ia_size;
1687                         dirtied |= CEPH_CAP_FILE_EXCL;
1688                 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1689                            attr->ia_size != inode->i_size) {
1690                         req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1691                         req->r_args.setattr.old_size =
1692                                 cpu_to_le64(inode->i_size);
1693                         mask |= CEPH_SETATTR_SIZE;
1694                         release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1695                                 CEPH_CAP_FILE_WR;
1696                 }
1697         }
1698
1699         /* these do nothing */
1700         if (ia_valid & ATTR_CTIME) {
1701                 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1702                                          ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1703                 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1704                      inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1705                      attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1706                      only ? "ctime only" : "ignored");
1707                 inode->i_ctime = attr->ia_ctime;
1708                 if (only) {
1709                         /*
1710                          * if kernel wants to dirty ctime but nothing else,
1711                          * we need to choose a cap to dirty under, or do
1712                          * a almost-no-op setattr
1713                          */
1714                         if (issued & CEPH_CAP_AUTH_EXCL)
1715                                 dirtied |= CEPH_CAP_AUTH_EXCL;
1716                         else if (issued & CEPH_CAP_FILE_EXCL)
1717                                 dirtied |= CEPH_CAP_FILE_EXCL;
1718                         else if (issued & CEPH_CAP_XATTR_EXCL)
1719                                 dirtied |= CEPH_CAP_XATTR_EXCL;
1720                         else
1721                                 mask |= CEPH_SETATTR_CTIME;
1722                 }
1723         }
1724         if (ia_valid & ATTR_FILE)
1725                 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1726
1727         if (dirtied) {
1728                 __ceph_mark_dirty_caps(ci, dirtied);
1729                 inode->i_ctime = CURRENT_TIME;
1730         }
1731
1732         release &= issued;
1733         spin_unlock(&inode->i_lock);
1734
1735         if (mask) {
1736                 req->r_inode = igrab(inode);
1737                 req->r_inode_drop = release;
1738                 req->r_args.setattr.mask = cpu_to_le32(mask);
1739                 req->r_num_caps = 1;
1740                 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1741         }
1742         dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1743              ceph_cap_string(dirtied), mask);
1744
1745         ceph_mdsc_put_request(req);
1746         __ceph_do_pending_vmtruncate(inode);
1747         return err;
1748 out:
1749         spin_unlock(&inode->i_lock);
1750         ceph_mdsc_put_request(req);
1751         return err;
1752 }
1753
1754 /*
1755  * Verify that we have a lease on the given mask.  If not,
1756  * do a getattr against an mds.
1757  */
1758 int ceph_do_getattr(struct inode *inode, int mask)
1759 {
1760         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1761         struct ceph_mds_client *mdsc = fsc->mdsc;
1762         struct ceph_mds_request *req;
1763         int err;
1764
1765         if (ceph_snap(inode) == CEPH_SNAPDIR) {
1766                 dout("do_getattr inode %p SNAPDIR\n", inode);
1767                 return 0;
1768         }
1769
1770         dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1771         if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1772                 return 0;
1773
1774         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1775         if (IS_ERR(req))
1776                 return PTR_ERR(req);
1777         req->r_inode = igrab(inode);
1778         req->r_num_caps = 1;
1779         req->r_args.getattr.mask = cpu_to_le32(mask);
1780         err = ceph_mdsc_do_request(mdsc, NULL, req);
1781         ceph_mdsc_put_request(req);
1782         dout("do_getattr result=%d\n", err);
1783         return err;
1784 }
1785
1786
1787 /*
1788  * Check inode permissions.  We verify we have a valid value for
1789  * the AUTH cap, then call the generic handler.
1790  */
1791 int ceph_permission(struct inode *inode, int mask, unsigned int flags)
1792 {
1793         int err;
1794
1795         if (flags & IPERM_FLAG_RCU)
1796                 return -ECHILD;
1797
1798         err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1799
1800         if (!err)
1801                 err = generic_permission(inode, mask, flags, NULL);
1802         return err;
1803 }
1804
1805 /*
1806  * Get all attributes.  Hopefully somedata we'll have a statlite()
1807  * and can limit the fields we require to be accurate.
1808  */
1809 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1810                  struct kstat *stat)
1811 {
1812         struct inode *inode = dentry->d_inode;
1813         struct ceph_inode_info *ci = ceph_inode(inode);
1814         int err;
1815
1816         err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1817         if (!err) {
1818                 generic_fillattr(inode, stat);
1819                 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
1820                 if (ceph_snap(inode) != CEPH_NOSNAP)
1821                         stat->dev = ceph_snap(inode);
1822                 else
1823                         stat->dev = 0;
1824                 if (S_ISDIR(inode->i_mode)) {
1825                         if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1826                                                 RBYTES))
1827                                 stat->size = ci->i_rbytes;
1828                         else
1829                                 stat->size = ci->i_files + ci->i_subdirs;
1830                         stat->blocks = 0;
1831                         stat->blksize = 65536;
1832                 }
1833         }
1834         return err;
1835 }