43416470e07b75f8d582424d180753b7681f80fb
[linux-2.6.git] / fs / nilfs2 / inode.c
1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36
37 struct nilfs_iget_args {
38         u64 ino;
39         __u64 cno;
40         struct nilfs_root *root;
41         int for_gc;
42 };
43
44 /**
45  * nilfs_get_block() - get a file block on the filesystem (callback function)
46  * @inode - inode struct of the target file
47  * @blkoff - file block number
48  * @bh_result - buffer head to be mapped on
49  * @create - indicate whether allocating the block or not when it has not
50  *      been allocated yet.
51  *
52  * This function does not issue actual read request of the specified data
53  * block. It is done by VFS.
54  */
55 int nilfs_get_block(struct inode *inode, sector_t blkoff,
56                     struct buffer_head *bh_result, int create)
57 {
58         struct nilfs_inode_info *ii = NILFS_I(inode);
59         __u64 blknum = 0;
60         int err = 0, ret;
61         struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode));
62         unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
63
64         down_read(&NILFS_MDT(dat)->mi_sem);
65         ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
66         up_read(&NILFS_MDT(dat)->mi_sem);
67         if (ret >= 0) { /* found */
68                 map_bh(bh_result, inode->i_sb, blknum);
69                 if (ret > 0)
70                         bh_result->b_size = (ret << inode->i_blkbits);
71                 goto out;
72         }
73         /* data block was not found */
74         if (ret == -ENOENT && create) {
75                 struct nilfs_transaction_info ti;
76
77                 bh_result->b_blocknr = 0;
78                 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
79                 if (unlikely(err))
80                         goto out;
81                 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
82                                         (unsigned long)bh_result);
83                 if (unlikely(err != 0)) {
84                         if (err == -EEXIST) {
85                                 /*
86                                  * The get_block() function could be called
87                                  * from multiple callers for an inode.
88                                  * However, the page having this block must
89                                  * be locked in this case.
90                                  */
91                                 printk(KERN_WARNING
92                                        "nilfs_get_block: a race condition "
93                                        "while inserting a data block. "
94                                        "(inode number=%lu, file block "
95                                        "offset=%llu)\n",
96                                        inode->i_ino,
97                                        (unsigned long long)blkoff);
98                                 err = 0;
99                         }
100                         nilfs_transaction_abort(inode->i_sb);
101                         goto out;
102                 }
103                 nilfs_mark_inode_dirty(inode);
104                 nilfs_transaction_commit(inode->i_sb); /* never fails */
105                 /* Error handling should be detailed */
106                 set_buffer_new(bh_result);
107                 set_buffer_delay(bh_result);
108                 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
109                                                       to proper value */
110         } else if (ret == -ENOENT) {
111                 /* not found is not error (e.g. hole); must return without
112                    the mapped state flag. */
113                 ;
114         } else {
115                 err = ret;
116         }
117
118  out:
119         return err;
120 }
121
122 /**
123  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
124  * address_space_operations.
125  * @file - file struct of the file to be read
126  * @page - the page to be read
127  */
128 static int nilfs_readpage(struct file *file, struct page *page)
129 {
130         return mpage_readpage(page, nilfs_get_block);
131 }
132
133 /**
134  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
135  * address_space_operations.
136  * @file - file struct of the file to be read
137  * @mapping - address_space struct used for reading multiple pages
138  * @pages - the pages to be read
139  * @nr_pages - number of pages to be read
140  */
141 static int nilfs_readpages(struct file *file, struct address_space *mapping,
142                            struct list_head *pages, unsigned nr_pages)
143 {
144         return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
145 }
146
147 static int nilfs_writepages(struct address_space *mapping,
148                             struct writeback_control *wbc)
149 {
150         struct inode *inode = mapping->host;
151         int err = 0;
152
153         if (wbc->sync_mode == WB_SYNC_ALL)
154                 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
155                                                     wbc->range_start,
156                                                     wbc->range_end);
157         return err;
158 }
159
160 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
161 {
162         struct inode *inode = page->mapping->host;
163         int err;
164
165         redirty_page_for_writepage(wbc, page);
166         unlock_page(page);
167
168         if (wbc->sync_mode == WB_SYNC_ALL) {
169                 err = nilfs_construct_segment(inode->i_sb);
170                 if (unlikely(err))
171                         return err;
172         } else if (wbc->for_reclaim)
173                 nilfs_flush_segment(inode->i_sb, inode->i_ino);
174
175         return 0;
176 }
177
178 static int nilfs_set_page_dirty(struct page *page)
179 {
180         int ret = __set_page_dirty_buffers(page);
181
182         if (ret) {
183                 struct inode *inode = page->mapping->host;
184                 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
185
186                 nilfs_set_file_dirty(inode, nr_dirty);
187         }
188         return ret;
189 }
190
191 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
192                              loff_t pos, unsigned len, unsigned flags,
193                              struct page **pagep, void **fsdata)
194
195 {
196         struct inode *inode = mapping->host;
197         int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
198
199         if (unlikely(err))
200                 return err;
201
202         err = block_write_begin(mapping, pos, len, flags, pagep,
203                                 nilfs_get_block);
204         if (unlikely(err)) {
205                 loff_t isize = mapping->host->i_size;
206                 if (pos + len > isize)
207                         vmtruncate(mapping->host, isize);
208
209                 nilfs_transaction_abort(inode->i_sb);
210         }
211         return err;
212 }
213
214 static int nilfs_write_end(struct file *file, struct address_space *mapping,
215                            loff_t pos, unsigned len, unsigned copied,
216                            struct page *page, void *fsdata)
217 {
218         struct inode *inode = mapping->host;
219         unsigned start = pos & (PAGE_CACHE_SIZE - 1);
220         unsigned nr_dirty;
221         int err;
222
223         nr_dirty = nilfs_page_count_clean_buffers(page, start,
224                                                   start + copied);
225         copied = generic_write_end(file, mapping, pos, len, copied, page,
226                                    fsdata);
227         nilfs_set_file_dirty(inode, nr_dirty);
228         err = nilfs_transaction_commit(inode->i_sb);
229         return err ? : copied;
230 }
231
232 static ssize_t
233 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
234                 loff_t offset, unsigned long nr_segs)
235 {
236         struct file *file = iocb->ki_filp;
237         struct inode *inode = file->f_mapping->host;
238         ssize_t size;
239
240         if (rw == WRITE)
241                 return 0;
242
243         /* Needs synchronization with the cleaner */
244         size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
245                                   offset, nr_segs, nilfs_get_block, NULL);
246
247         /*
248          * In case of error extending write may have instantiated a few
249          * blocks outside i_size. Trim these off again.
250          */
251         if (unlikely((rw & WRITE) && size < 0)) {
252                 loff_t isize = i_size_read(inode);
253                 loff_t end = offset + iov_length(iov, nr_segs);
254
255                 if (end > isize)
256                         vmtruncate(inode, isize);
257         }
258
259         return size;
260 }
261
262 const struct address_space_operations nilfs_aops = {
263         .writepage              = nilfs_writepage,
264         .readpage               = nilfs_readpage,
265         .sync_page              = block_sync_page,
266         .writepages             = nilfs_writepages,
267         .set_page_dirty         = nilfs_set_page_dirty,
268         .readpages              = nilfs_readpages,
269         .write_begin            = nilfs_write_begin,
270         .write_end              = nilfs_write_end,
271         /* .releasepage         = nilfs_releasepage, */
272         .invalidatepage         = block_invalidatepage,
273         .direct_IO              = nilfs_direct_IO,
274         .is_partially_uptodate  = block_is_partially_uptodate,
275 };
276
277 struct inode *nilfs_new_inode(struct inode *dir, int mode)
278 {
279         struct super_block *sb = dir->i_sb;
280         struct nilfs_sb_info *sbi = NILFS_SB(sb);
281         struct inode *inode;
282         struct nilfs_inode_info *ii;
283         struct nilfs_root *root;
284         int err = -ENOMEM;
285         ino_t ino;
286
287         inode = new_inode(sb);
288         if (unlikely(!inode))
289                 goto failed;
290
291         mapping_set_gfp_mask(inode->i_mapping,
292                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
293
294         root = NILFS_I(dir)->i_root;
295         ii = NILFS_I(inode);
296         ii->i_state = 1 << NILFS_I_NEW;
297         ii->i_root = root;
298
299         err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
300         if (unlikely(err))
301                 goto failed_ifile_create_inode;
302         /* reference count of i_bh inherits from nilfs_mdt_read_block() */
303
304         atomic_inc(&root->inodes_count);
305         inode_init_owner(inode, dir, mode);
306         inode->i_ino = ino;
307         inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
308
309         if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
310                 err = nilfs_bmap_read(ii->i_bmap, NULL);
311                 if (err < 0)
312                         goto failed_bmap;
313
314                 set_bit(NILFS_I_BMAP, &ii->i_state);
315                 /* No lock is needed; iget() ensures it. */
316         }
317
318         ii->i_flags = NILFS_I(dir)->i_flags;
319         if (S_ISLNK(mode))
320                 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
321         if (!S_ISDIR(mode))
322                 ii->i_flags &= ~NILFS_DIRSYNC_FL;
323
324         /* ii->i_file_acl = 0; */
325         /* ii->i_dir_acl = 0; */
326         ii->i_dir_start_lookup = 0;
327         nilfs_set_inode_flags(inode);
328         spin_lock(&sbi->s_next_gen_lock);
329         inode->i_generation = sbi->s_next_generation++;
330         spin_unlock(&sbi->s_next_gen_lock);
331         insert_inode_hash(inode);
332
333         err = nilfs_init_acl(inode, dir);
334         if (unlikely(err))
335                 goto failed_acl; /* never occur. When supporting
336                                     nilfs_init_acl(), proper cancellation of
337                                     above jobs should be considered */
338
339         return inode;
340
341  failed_acl:
342  failed_bmap:
343         inode->i_nlink = 0;
344         iput(inode);  /* raw_inode will be deleted through
345                          generic_delete_inode() */
346         goto failed;
347
348  failed_ifile_create_inode:
349         make_bad_inode(inode);
350         iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
351                          called */
352  failed:
353         return ERR_PTR(err);
354 }
355
356 void nilfs_set_inode_flags(struct inode *inode)
357 {
358         unsigned int flags = NILFS_I(inode)->i_flags;
359
360         inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
361                             S_DIRSYNC);
362         if (flags & NILFS_SYNC_FL)
363                 inode->i_flags |= S_SYNC;
364         if (flags & NILFS_APPEND_FL)
365                 inode->i_flags |= S_APPEND;
366         if (flags & NILFS_IMMUTABLE_FL)
367                 inode->i_flags |= S_IMMUTABLE;
368 #ifndef NILFS_ATIME_DISABLE
369         if (flags & NILFS_NOATIME_FL)
370 #endif
371                 inode->i_flags |= S_NOATIME;
372         if (flags & NILFS_DIRSYNC_FL)
373                 inode->i_flags |= S_DIRSYNC;
374         mapping_set_gfp_mask(inode->i_mapping,
375                              mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
376 }
377
378 int nilfs_read_inode_common(struct inode *inode,
379                             struct nilfs_inode *raw_inode)
380 {
381         struct nilfs_inode_info *ii = NILFS_I(inode);
382         int err;
383
384         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
385         inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
386         inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
387         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
388         inode->i_size = le64_to_cpu(raw_inode->i_size);
389         inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
390         inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
391         inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
392         inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
393         inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
394         inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
395         if (inode->i_nlink == 0 && inode->i_mode == 0)
396                 return -EINVAL; /* this inode is deleted */
397
398         inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
399         ii->i_flags = le32_to_cpu(raw_inode->i_flags);
400 #if 0
401         ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
402         ii->i_dir_acl = S_ISREG(inode->i_mode) ?
403                 0 : le32_to_cpu(raw_inode->i_dir_acl);
404 #endif
405         ii->i_dir_start_lookup = 0;
406         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
407
408         if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
409             S_ISLNK(inode->i_mode)) {
410                 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
411                 if (err < 0)
412                         return err;
413                 set_bit(NILFS_I_BMAP, &ii->i_state);
414                 /* No lock is needed; iget() ensures it. */
415         }
416         return 0;
417 }
418
419 static int __nilfs_read_inode(struct super_block *sb,
420                               struct nilfs_root *root, unsigned long ino,
421                               struct inode *inode)
422 {
423         struct nilfs_sb_info *sbi = NILFS_SB(sb);
424         struct inode *dat = nilfs_dat_inode(sbi->s_nilfs);
425         struct buffer_head *bh;
426         struct nilfs_inode *raw_inode;
427         int err;
428
429         down_read(&NILFS_MDT(dat)->mi_sem);     /* XXX */
430         err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
431         if (unlikely(err))
432                 goto bad_inode;
433
434         raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
435
436         err = nilfs_read_inode_common(inode, raw_inode);
437         if (err)
438                 goto failed_unmap;
439
440         if (S_ISREG(inode->i_mode)) {
441                 inode->i_op = &nilfs_file_inode_operations;
442                 inode->i_fop = &nilfs_file_operations;
443                 inode->i_mapping->a_ops = &nilfs_aops;
444         } else if (S_ISDIR(inode->i_mode)) {
445                 inode->i_op = &nilfs_dir_inode_operations;
446                 inode->i_fop = &nilfs_dir_operations;
447                 inode->i_mapping->a_ops = &nilfs_aops;
448         } else if (S_ISLNK(inode->i_mode)) {
449                 inode->i_op = &nilfs_symlink_inode_operations;
450                 inode->i_mapping->a_ops = &nilfs_aops;
451         } else {
452                 inode->i_op = &nilfs_special_inode_operations;
453                 init_special_inode(
454                         inode, inode->i_mode,
455                         huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
456         }
457         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
458         brelse(bh);
459         up_read(&NILFS_MDT(dat)->mi_sem);       /* XXX */
460         nilfs_set_inode_flags(inode);
461         return 0;
462
463  failed_unmap:
464         nilfs_ifile_unmap_inode(root->ifile, ino, bh);
465         brelse(bh);
466
467  bad_inode:
468         up_read(&NILFS_MDT(dat)->mi_sem);       /* XXX */
469         return err;
470 }
471
472 static int nilfs_iget_test(struct inode *inode, void *opaque)
473 {
474         struct nilfs_iget_args *args = opaque;
475         struct nilfs_inode_info *ii;
476
477         if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
478                 return 0;
479
480         ii = NILFS_I(inode);
481         if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
482                 return !args->for_gc;
483
484         return args->for_gc && args->cno == ii->i_cno;
485 }
486
487 static int nilfs_iget_set(struct inode *inode, void *opaque)
488 {
489         struct nilfs_iget_args *args = opaque;
490
491         inode->i_ino = args->ino;
492         if (args->for_gc) {
493                 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
494                 NILFS_I(inode)->i_cno = args->cno;
495                 NILFS_I(inode)->i_root = NULL;
496         } else {
497                 if (args->root && args->ino == NILFS_ROOT_INO)
498                         nilfs_get_root(args->root);
499                 NILFS_I(inode)->i_root = args->root;
500         }
501         return 0;
502 }
503
504 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
505                             unsigned long ino)
506 {
507         struct nilfs_iget_args args = {
508                 .ino = ino, .root = root, .cno = 0, .for_gc = 0
509         };
510
511         return ilookup5(sb, ino, nilfs_iget_test, &args);
512 }
513
514 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
515                                 unsigned long ino)
516 {
517         struct nilfs_iget_args args = {
518                 .ino = ino, .root = root, .cno = 0, .for_gc = 0
519         };
520
521         return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
522 }
523
524 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
525                          unsigned long ino)
526 {
527         struct inode *inode;
528         int err;
529
530         inode = nilfs_iget_locked(sb, root, ino);
531         if (unlikely(!inode))
532                 return ERR_PTR(-ENOMEM);
533         if (!(inode->i_state & I_NEW))
534                 return inode;
535
536         err = __nilfs_read_inode(sb, root, ino, inode);
537         if (unlikely(err)) {
538                 iget_failed(inode);
539                 return ERR_PTR(err);
540         }
541         unlock_new_inode(inode);
542         return inode;
543 }
544
545 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
546                                 __u64 cno)
547 {
548         struct nilfs_iget_args args = {
549                 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
550         };
551         struct inode *inode;
552         int err;
553
554         inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
555         if (unlikely(!inode))
556                 return ERR_PTR(-ENOMEM);
557         if (!(inode->i_state & I_NEW))
558                 return inode;
559
560         err = nilfs_init_gcinode(inode);
561         if (unlikely(err)) {
562                 iget_failed(inode);
563                 return ERR_PTR(err);
564         }
565         unlock_new_inode(inode);
566         return inode;
567 }
568
569 void nilfs_write_inode_common(struct inode *inode,
570                               struct nilfs_inode *raw_inode, int has_bmap)
571 {
572         struct nilfs_inode_info *ii = NILFS_I(inode);
573
574         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
575         raw_inode->i_uid = cpu_to_le32(inode->i_uid);
576         raw_inode->i_gid = cpu_to_le32(inode->i_gid);
577         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
578         raw_inode->i_size = cpu_to_le64(inode->i_size);
579         raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
580         raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
581         raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
582         raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
583         raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
584
585         raw_inode->i_flags = cpu_to_le32(ii->i_flags);
586         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
587
588         if (has_bmap)
589                 nilfs_bmap_write(ii->i_bmap, raw_inode);
590         else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
591                 raw_inode->i_device_code =
592                         cpu_to_le64(huge_encode_dev(inode->i_rdev));
593         /* When extending inode, nilfs->ns_inode_size should be checked
594            for substitutions of appended fields */
595 }
596
597 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
598 {
599         ino_t ino = inode->i_ino;
600         struct nilfs_inode_info *ii = NILFS_I(inode);
601         struct inode *ifile = ii->i_root->ifile;
602         struct nilfs_inode *raw_inode;
603
604         raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
605
606         if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
607                 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
608         set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
609
610         nilfs_write_inode_common(inode, raw_inode, 0);
611                 /* XXX: call with has_bmap = 0 is a workaround to avoid
612                    deadlock of bmap. This delays update of i_bmap to just
613                    before writing */
614         nilfs_ifile_unmap_inode(ifile, ino, ibh);
615 }
616
617 #define NILFS_MAX_TRUNCATE_BLOCKS       16384  /* 64MB for 4KB block */
618
619 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
620                                 unsigned long from)
621 {
622         unsigned long b;
623         int ret;
624
625         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
626                 return;
627 repeat:
628         ret = nilfs_bmap_last_key(ii->i_bmap, &b);
629         if (ret == -ENOENT)
630                 return;
631         else if (ret < 0)
632                 goto failed;
633
634         if (b < from)
635                 return;
636
637         b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
638         ret = nilfs_bmap_truncate(ii->i_bmap, b);
639         nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
640         if (!ret || (ret == -ENOMEM &&
641                      nilfs_bmap_truncate(ii->i_bmap, b) == 0))
642                 goto repeat;
643
644 failed:
645         nilfs_warning(ii->vfs_inode.i_sb, __func__,
646                       "failed to truncate bmap (ino=%lu, err=%d)",
647                       ii->vfs_inode.i_ino, ret);
648 }
649
650 void nilfs_truncate(struct inode *inode)
651 {
652         unsigned long blkoff;
653         unsigned int blocksize;
654         struct nilfs_transaction_info ti;
655         struct super_block *sb = inode->i_sb;
656         struct nilfs_inode_info *ii = NILFS_I(inode);
657
658         if (!test_bit(NILFS_I_BMAP, &ii->i_state))
659                 return;
660         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
661                 return;
662
663         blocksize = sb->s_blocksize;
664         blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
665         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
666
667         block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
668
669         nilfs_truncate_bmap(ii, blkoff);
670
671         inode->i_mtime = inode->i_ctime = CURRENT_TIME;
672         if (IS_SYNC(inode))
673                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
674
675         nilfs_mark_inode_dirty(inode);
676         nilfs_set_file_dirty(inode, 0);
677         nilfs_transaction_commit(sb);
678         /* May construct a logical segment and may fail in sync mode.
679            But truncate has no return value. */
680 }
681
682 static void nilfs_clear_inode(struct inode *inode)
683 {
684         struct nilfs_inode_info *ii = NILFS_I(inode);
685         struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
686
687         /*
688          * Free resources allocated in nilfs_read_inode(), here.
689          */
690         BUG_ON(!list_empty(&ii->i_dirty));
691         brelse(ii->i_bh);
692         ii->i_bh = NULL;
693
694         if (mdi && mdi->mi_palloc_cache)
695                 nilfs_palloc_destroy_cache(inode);
696
697         if (test_bit(NILFS_I_BMAP, &ii->i_state))
698                 nilfs_bmap_clear(ii->i_bmap);
699
700         nilfs_btnode_cache_clear(&ii->i_btnode_cache);
701
702         if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
703                 nilfs_put_root(ii->i_root);
704 }
705
706 void nilfs_evict_inode(struct inode *inode)
707 {
708         struct nilfs_transaction_info ti;
709         struct super_block *sb = inode->i_sb;
710         struct nilfs_inode_info *ii = NILFS_I(inode);
711
712         if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
713                 if (inode->i_data.nrpages)
714                         truncate_inode_pages(&inode->i_data, 0);
715                 end_writeback(inode);
716                 nilfs_clear_inode(inode);
717                 return;
718         }
719         nilfs_transaction_begin(sb, &ti, 0); /* never fails */
720
721         if (inode->i_data.nrpages)
722                 truncate_inode_pages(&inode->i_data, 0);
723
724         /* TODO: some of the following operations may fail.  */
725         nilfs_truncate_bmap(ii, 0);
726         nilfs_mark_inode_dirty(inode);
727         end_writeback(inode);
728
729         nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
730         atomic_dec(&ii->i_root->inodes_count);
731
732         nilfs_clear_inode(inode);
733
734         if (IS_SYNC(inode))
735                 nilfs_set_transaction_flag(NILFS_TI_SYNC);
736         nilfs_transaction_commit(sb);
737         /* May construct a logical segment and may fail in sync mode.
738            But delete_inode has no return value. */
739 }
740
741 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
742 {
743         struct nilfs_transaction_info ti;
744         struct inode *inode = dentry->d_inode;
745         struct super_block *sb = inode->i_sb;
746         int err;
747
748         err = inode_change_ok(inode, iattr);
749         if (err)
750                 return err;
751
752         err = nilfs_transaction_begin(sb, &ti, 0);
753         if (unlikely(err))
754                 return err;
755
756         if ((iattr->ia_valid & ATTR_SIZE) &&
757             iattr->ia_size != i_size_read(inode)) {
758                 err = vmtruncate(inode, iattr->ia_size);
759                 if (unlikely(err))
760                         goto out_err;
761         }
762
763         setattr_copy(inode, iattr);
764         mark_inode_dirty(inode);
765
766         if (iattr->ia_valid & ATTR_MODE) {
767                 err = nilfs_acl_chmod(inode);
768                 if (unlikely(err))
769                         goto out_err;
770         }
771
772         return nilfs_transaction_commit(sb);
773
774 out_err:
775         nilfs_transaction_abort(sb);
776         return err;
777 }
778
779 int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
780 {
781         struct nilfs_root *root;
782
783         if (flags & IPERM_FLAG_RCU)
784                 return -ECHILD;
785
786         root = NILFS_I(inode)->i_root;
787         if ((mask & MAY_WRITE) && root &&
788             root->cno != NILFS_CPTREE_CURRENT_CNO)
789                 return -EROFS; /* snapshot is not writable */
790
791         return generic_permission(inode, mask, flags, NULL);
792 }
793
794 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
795 {
796         struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
797         struct nilfs_inode_info *ii = NILFS_I(inode);
798         int err;
799
800         spin_lock(&sbi->s_inode_lock);
801         if (ii->i_bh == NULL) {
802                 spin_unlock(&sbi->s_inode_lock);
803                 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
804                                                   inode->i_ino, pbh);
805                 if (unlikely(err))
806                         return err;
807                 spin_lock(&sbi->s_inode_lock);
808                 if (ii->i_bh == NULL)
809                         ii->i_bh = *pbh;
810                 else {
811                         brelse(*pbh);
812                         *pbh = ii->i_bh;
813                 }
814         } else
815                 *pbh = ii->i_bh;
816
817         get_bh(*pbh);
818         spin_unlock(&sbi->s_inode_lock);
819         return 0;
820 }
821
822 int nilfs_inode_dirty(struct inode *inode)
823 {
824         struct nilfs_inode_info *ii = NILFS_I(inode);
825         struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
826         int ret = 0;
827
828         if (!list_empty(&ii->i_dirty)) {
829                 spin_lock(&sbi->s_inode_lock);
830                 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
831                         test_bit(NILFS_I_BUSY, &ii->i_state);
832                 spin_unlock(&sbi->s_inode_lock);
833         }
834         return ret;
835 }
836
837 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
838 {
839         struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
840         struct nilfs_inode_info *ii = NILFS_I(inode);
841
842         atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
843
844         if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
845                 return 0;
846
847         spin_lock(&sbi->s_inode_lock);
848         if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
849             !test_bit(NILFS_I_BUSY, &ii->i_state)) {
850                 /* Because this routine may race with nilfs_dispose_list(),
851                    we have to check NILFS_I_QUEUED here, too. */
852                 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
853                         /* This will happen when somebody is freeing
854                            this inode. */
855                         nilfs_warning(sbi->s_super, __func__,
856                                       "cannot get inode (ino=%lu)\n",
857                                       inode->i_ino);
858                         spin_unlock(&sbi->s_inode_lock);
859                         return -EINVAL; /* NILFS_I_DIRTY may remain for
860                                            freeing inode */
861                 }
862                 list_del(&ii->i_dirty);
863                 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
864                 set_bit(NILFS_I_QUEUED, &ii->i_state);
865         }
866         spin_unlock(&sbi->s_inode_lock);
867         return 0;
868 }
869
870 int nilfs_mark_inode_dirty(struct inode *inode)
871 {
872         struct buffer_head *ibh;
873         int err;
874
875         err = nilfs_load_inode_block(inode, &ibh);
876         if (unlikely(err)) {
877                 nilfs_warning(inode->i_sb, __func__,
878                               "failed to reget inode block.\n");
879                 return err;
880         }
881         nilfs_update_inode(inode, ibh);
882         nilfs_mdt_mark_buffer_dirty(ibh);
883         nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
884         brelse(ibh);
885         return 0;
886 }
887
888 /**
889  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
890  * @inode: inode of the file to be registered.
891  *
892  * nilfs_dirty_inode() loads a inode block containing the specified
893  * @inode and copies data from a nilfs_inode to a corresponding inode
894  * entry in the inode block. This operation is excluded from the segment
895  * construction. This function can be called both as a single operation
896  * and as a part of indivisible file operations.
897  */
898 void nilfs_dirty_inode(struct inode *inode)
899 {
900         struct nilfs_transaction_info ti;
901         struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
902
903         if (is_bad_inode(inode)) {
904                 nilfs_warning(inode->i_sb, __func__,
905                               "tried to mark bad_inode dirty. ignored.\n");
906                 dump_stack();
907                 return;
908         }
909         if (mdi) {
910                 nilfs_mdt_mark_dirty(inode);
911                 return;
912         }
913         nilfs_transaction_begin(inode->i_sb, &ti, 0);
914         nilfs_mark_inode_dirty(inode);
915         nilfs_transaction_commit(inode->i_sb); /* never fails */
916 }
917
918 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
919                  __u64 start, __u64 len)
920 {
921         struct the_nilfs *nilfs = NILFS_I_NILFS(inode);
922         __u64 logical = 0, phys = 0, size = 0;
923         __u32 flags = 0;
924         loff_t isize;
925         sector_t blkoff, end_blkoff;
926         sector_t delalloc_blkoff;
927         unsigned long delalloc_blklen;
928         unsigned int blkbits = inode->i_blkbits;
929         int ret, n;
930
931         ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
932         if (ret)
933                 return ret;
934
935         mutex_lock(&inode->i_mutex);
936
937         isize = i_size_read(inode);
938
939         blkoff = start >> blkbits;
940         end_blkoff = (start + len - 1) >> blkbits;
941
942         delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
943                                                         &delalloc_blkoff);
944
945         do {
946                 __u64 blkphy;
947                 unsigned int maxblocks;
948
949                 if (delalloc_blklen && blkoff == delalloc_blkoff) {
950                         if (size) {
951                                 /* End of the current extent */
952                                 ret = fiemap_fill_next_extent(
953                                         fieinfo, logical, phys, size, flags);
954                                 if (ret)
955                                         break;
956                         }
957                         if (blkoff > end_blkoff)
958                                 break;
959
960                         flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
961                         logical = blkoff << blkbits;
962                         phys = 0;
963                         size = delalloc_blklen << blkbits;
964
965                         blkoff = delalloc_blkoff + delalloc_blklen;
966                         delalloc_blklen = nilfs_find_uncommitted_extent(
967                                 inode, blkoff, &delalloc_blkoff);
968                         continue;
969                 }
970
971                 /*
972                  * Limit the number of blocks that we look up so as
973                  * not to get into the next delayed allocation extent.
974                  */
975                 maxblocks = INT_MAX;
976                 if (delalloc_blklen)
977                         maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
978                                           maxblocks);
979                 blkphy = 0;
980
981                 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
982                 n = nilfs_bmap_lookup_contig(
983                         NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
984                 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
985
986                 if (n < 0) {
987                         int past_eof;
988
989                         if (unlikely(n != -ENOENT))
990                                 break; /* error */
991
992                         /* HOLE */
993                         blkoff++;
994                         past_eof = ((blkoff << blkbits) >= isize);
995
996                         if (size) {
997                                 /* End of the current extent */
998
999                                 if (past_eof)
1000                                         flags |= FIEMAP_EXTENT_LAST;
1001
1002                                 ret = fiemap_fill_next_extent(
1003                                         fieinfo, logical, phys, size, flags);
1004                                 if (ret)
1005                                         break;
1006                                 size = 0;
1007                         }
1008                         if (blkoff > end_blkoff || past_eof)
1009                                 break;
1010                 } else {
1011                         if (size) {
1012                                 if (phys && blkphy << blkbits == phys + size) {
1013                                         /* The current extent goes on */
1014                                         size += n << blkbits;
1015                                 } else {
1016                                         /* Terminate the current extent */
1017                                         ret = fiemap_fill_next_extent(
1018                                                 fieinfo, logical, phys, size,
1019                                                 flags);
1020                                         if (ret || blkoff > end_blkoff)
1021                                                 break;
1022
1023                                         /* Start another extent */
1024                                         flags = FIEMAP_EXTENT_MERGED;
1025                                         logical = blkoff << blkbits;
1026                                         phys = blkphy << blkbits;
1027                                         size = n << blkbits;
1028                                 }
1029                         } else {
1030                                 /* Start a new extent */
1031                                 flags = FIEMAP_EXTENT_MERGED;
1032                                 logical = blkoff << blkbits;
1033                                 phys = blkphy << blkbits;
1034                                 size = n << blkbits;
1035                         }
1036                         blkoff += n;
1037                 }
1038                 cond_resched();
1039         } while (true);
1040
1041         /* If ret is 1 then we just hit the end of the extent array */
1042         if (ret == 1)
1043                 ret = 0;
1044
1045         mutex_unlock(&inode->i_mutex);
1046         return ret;
1047 }