blob: 9facb4dc5c702777c0af9435229c60030ea197b0 [file] [log] [blame]
Dave Kleikampac27a0e2006-10-11 01:20:50 -07001/*
Mingming Cao617ba132006-10-11 01:20:53 -07002 * linux/fs/ext4/file.c
Dave Kleikampac27a0e2006-10-11 01:20:50 -07003 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/file.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
Mingming Cao617ba132006-10-11 01:20:53 -070015 * ext4 fs regular file handling primitives
Dave Kleikampac27a0e2006-10-11 01:20:50 -070016 *
17 * 64-bit file support on 64-bit platforms by Jakub Jelinek
18 * (jj@sunsite.ms.mff.cuni.cz)
19 */
20
21#include <linux/time.h>
22#include <linux/fs.h>
Theodore Ts'obc0b0d62009-06-13 10:09:48 -040023#include <linux/mount.h>
24#include <linux/path.h>
Matthew Wilcoxc94c2ac2015-09-08 14:58:40 -070025#include <linux/dax.h>
Christoph Hellwig871a2932010-03-03 09:05:07 -050026#include <linux/quotaops.h>
Zheng Liuc8c0df22012-11-08 21:57:40 -050027#include <linux/pagevec.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080028#include <linux/uio.h>
Christoph Hellwig3dcf5452008-04-29 18:13:32 -040029#include "ext4.h"
30#include "ext4_jbd2.h"
Dave Kleikampac27a0e2006-10-11 01:20:50 -070031#include "xattr.h"
32#include "acl.h"
33
34/*
35 * Called when an inode is released. Note that this is different
Mingming Cao617ba132006-10-11 01:20:53 -070036 * from ext4_file_open: open gets called at every open, but release
Dave Kleikampac27a0e2006-10-11 01:20:50 -070037 * gets called only when /all/ the files are closed.
38 */
Theodore Ts'oaf5bc922008-09-08 22:25:24 -040039static int ext4_release_file(struct inode *inode, struct file *filp)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070040{
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050041 if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050042 ext4_alloc_da_blocks(inode);
Theodore Ts'o19f5fb72010-01-24 14:34:07 -050043 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
Theodore Ts'o7d8f9f72009-02-24 08:21:14 -050044 }
Dave Kleikampac27a0e2006-10-11 01:20:50 -070045 /* if we are the last writer on the inode, drop the block reservation */
46 if ((filp->f_mode & FMODE_WRITE) &&
Aneesh Kumar K.Vd6014302009-03-27 22:36:43 -040047 (atomic_read(&inode->i_writecount) == 1) &&
48 !EXT4_I(inode)->i_reserved_data_blocks)
Dave Kleikampac27a0e2006-10-11 01:20:50 -070049 {
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050050 down_write(&EXT4_I(inode)->i_data_sem);
Theodore Ts'oc2ea3fd2008-10-10 09:40:52 -040051 ext4_discard_preallocations(inode);
Aneesh Kumar K.V0e855ac2008-01-28 23:58:26 -050052 up_write(&EXT4_I(inode)->i_data_sem);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070053 }
54 if (is_dx(inode) && filp->private_data)
Mingming Cao617ba132006-10-11 01:20:53 -070055 ext4_htree_free_dir_info(filp->private_data);
Dave Kleikampac27a0e2006-10-11 01:20:50 -070056
57 return 0;
58}
59
Stephen Hemmingerc1978552014-05-12 10:50:23 -040060static void ext4_unwritten_wait(struct inode *inode)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050061{
62 wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
Dmitry Monakhove27f41e2012-09-28 23:24:52 -040064 wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
Eric Sandeene9e3bce2011-02-12 08:17:34 -050065}
66
67/*
68 * This tests whether the IO in question is block-aligned or not.
69 * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70 * are converted to written only after the IO is complete. Until they are
71 * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72 * it needs to zero out portions of the start and/or end block. If 2 AIO
73 * threads are at work on the same unwritten block, they must be synchronized
74 * or one thread will zero the other's data, causing corruption.
75 */
76static int
Al Viro9b884162014-04-17 16:09:22 -040077ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050078{
79 struct super_block *sb = inode->i_sb;
80 int blockmask = sb->s_blocksize - 1;
Eric Sandeene9e3bce2011-02-12 08:17:34 -050081
Theodore Ts'o6e6358f2014-04-12 12:45:25 -040082 if (pos >= i_size_read(inode))
Eric Sandeene9e3bce2011-02-12 08:17:34 -050083 return 0;
84
Al Viro9b884162014-04-17 16:09:22 -040085 if ((pos | iov_iter_alignment(from)) & blockmask)
Eric Sandeene9e3bce2011-02-12 08:17:34 -050086 return 1;
87
88 return 0;
89}
90
Jan Kara213bcd92016-11-20 17:29:51 -050091/* Is IO overwriting allocated and initialized blocks? */
92static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
93{
94 struct ext4_map_blocks map;
95 unsigned int blkbits = inode->i_blkbits;
96 int err, blklen;
97
98 if (pos + len > i_size_read(inode))
99 return false;
100
101 map.m_lblk = pos >> blkbits;
102 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
103 blklen = map.m_len;
104
105 err = ext4_map_blocks(NULL, inode, &map, 0);
106 /*
107 * 'err==len' means that all of the blocks have been preallocated,
108 * regardless of whether they have been initialized or not. To exclude
109 * unwritten extents, we need to check m_flags.
110 */
111 return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
112}
113
114static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
115{
116 struct inode *inode = file_inode(iocb->ki_filp);
117 ssize_t ret;
118
119 ret = generic_write_checks(iocb, from);
120 if (ret <= 0)
121 return ret;
122 /*
123 * If we have encountered a bitmap-format file, the size limit
124 * is smaller than s_maxbytes, which is for extent-mapped files.
125 */
126 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
127 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
128
129 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
130 return -EFBIG;
131 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
132 }
133 return iov_iter_count(from);
134}
135
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700136static ssize_t
Al Viro9b884162014-04-17 16:09:22 -0400137ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700138{
Al Viro496ad9a2013-01-23 17:07:38 -0500139 struct inode *inode = file_inode(iocb->ki_filp);
Al Viro2ba48ce2015-04-09 13:52:01 -0400140 int o_direct = iocb->ki_flags & IOCB_DIRECT;
Jan Karae142d052016-03-08 22:44:50 -0500141 int unaligned_aio = 0;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400142 int overwrite = 0;
Zheng Liu85630002012-05-28 18:06:51 -0400143 ssize_t ret;
Theodore Ts'o7608e612014-04-21 14:26:28 -0400144
Al Viro59551022016-01-22 15:40:57 -0500145 inode_lock(inode);
Jan Kara213bcd92016-11-20 17:29:51 -0500146 ret = ext4_write_checks(iocb, from);
Al Viro3309dd02015-04-09 12:55:47 -0400147 if (ret <= 0)
Al Viroe768d7f2015-04-07 14:48:22 -0400148 goto out;
Theodore Ts'of5ccfe12014-04-21 14:37:52 -0400149
150 /*
Jan Karae142d052016-03-08 22:44:50 -0500151 * Unaligned direct AIO must be serialized among each other as zeroing
152 * of partial blocks of two competing unaligned AIOs can result in data
153 * corruption.
154 */
155 if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
156 !is_sync_kiocb(iocb) &&
157 ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
158 unaligned_aio = 1;
159 ext4_unwritten_wait(inode);
160 }
161
Dmitry Monakhova41537e2014-10-30 10:53:16 -0400162 iocb->private = &overwrite;
Jan Kara213bcd92016-11-20 17:29:51 -0500163 /* Check whether we do a DIO overwrite or not */
164 if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
165 ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
166 overwrite = 1;
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400167
Al Viro9b884162014-04-17 16:09:22 -0400168 ret = __generic_file_write_iter(iocb, from);
Al Viro59551022016-01-22 15:40:57 -0500169 inode_unlock(inode);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400170
Christoph Hellwige2592212016-04-07 08:52:01 -0700171 if (ret > 0)
172 ret = generic_write_sync(iocb, ret);
Theodore Ts'o8ad28502014-04-21 14:26:57 -0400173
Al Viroe768d7f2015-04-07 14:48:22 -0400174 return ret;
175
176out:
Al Viro59551022016-01-22 15:40:57 -0500177 inode_unlock(inode);
Eric Sandeene9e3bce2011-02-12 08:17:34 -0500178 return ret;
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700179}
180
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800181#ifdef CONFIG_FS_DAX
182static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
183{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700184 int result;
185 handle_t *handle = NULL;
Jan Karaea3d7202015-12-07 14:28:03 -0500186 struct inode *inode = file_inode(vma->vm_file);
187 struct super_block *sb = inode->i_sb;
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700188 bool write = vmf->flags & FAULT_FLAG_WRITE;
189
190 if (write) {
191 sb_start_pagefault(sb);
192 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500193 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700194 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
195 EXT4_DATA_TRANS_BLOCKS(sb));
Jan Karaea3d7202015-12-07 14:28:03 -0500196 } else
197 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700198
199 if (IS_ERR(handle))
200 result = VM_FAULT_SIGBUS;
201 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700202 result = dax_fault(vma, vmf, ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700203
204 if (write) {
205 if (!IS_ERR(handle))
206 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500207 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700208 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500209 } else
210 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700211
212 return result;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800213}
214
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700215static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
216 pmd_t *pmd, unsigned int flags)
217{
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700218 int result;
219 handle_t *handle = NULL;
220 struct inode *inode = file_inode(vma->vm_file);
221 struct super_block *sb = inode->i_sb;
222 bool write = flags & FAULT_FLAG_WRITE;
223
224 if (write) {
225 sb_start_pagefault(sb);
226 file_update_time(vma->vm_file);
Jan Karaea3d7202015-12-07 14:28:03 -0500227 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700228 handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
229 ext4_chunk_trans_blocks(inode,
230 PMD_SIZE / PAGE_SIZE));
Jan Karaea3d7202015-12-07 14:28:03 -0500231 } else
232 down_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700233
234 if (IS_ERR(handle))
235 result = VM_FAULT_SIGBUS;
236 else
Ross Zwisler6b524992016-07-26 15:21:05 -0700237 result = dax_pmd_fault(vma, addr, pmd, flags,
Jan Kara02fbd132016-05-11 11:58:48 +0200238 ext4_dax_get_block);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700239
240 if (write) {
241 if (!IS_ERR(handle))
242 ext4_journal_stop(handle);
Jan Karaea3d7202015-12-07 14:28:03 -0500243 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700244 sb_end_pagefault(sb);
Jan Karaea3d7202015-12-07 14:28:03 -0500245 } else
246 up_read(&EXT4_I(inode)->i_mmap_sem);
Matthew Wilcox01a33b42015-09-08 14:59:22 -0700247
248 return result;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700249}
250
Jan Karaea3d7202015-12-07 14:28:03 -0500251/*
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500252 * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
Jan Karaea3d7202015-12-07 14:28:03 -0500253 * handler we check for races agaist truncate. Note that since we cycle through
254 * i_mmap_sem, we are sure that also any hole punching that began before we
255 * were called is finished by now and so if it included part of the file we
256 * are working on, our pte will get unmapped and the check for pte_same() in
257 * wp_pfn_shared() fails. Thus fault gets retried and things work out as
258 * desired.
259 */
260static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
261 struct vm_fault *vmf)
262{
263 struct inode *inode = file_inode(vma->vm_file);
264 struct super_block *sb = inode->i_sb;
Jan Karaea3d7202015-12-07 14:28:03 -0500265 loff_t size;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800266 int ret;
Jan Karaea3d7202015-12-07 14:28:03 -0500267
268 sb_start_pagefault(sb);
269 file_update_time(vma->vm_file);
270 down_read(&EXT4_I(inode)->i_mmap_sem);
271 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
272 if (vmf->pgoff >= size)
273 ret = VM_FAULT_SIGBUS;
Ross Zwislerd5be7a02016-01-22 15:10:53 -0800274 else
275 ret = dax_pfn_mkwrite(vma, vmf);
Jan Karaea3d7202015-12-07 14:28:03 -0500276 up_read(&EXT4_I(inode)->i_mmap_sem);
277 sb_end_pagefault(sb);
278
279 return ret;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800280}
281
282static const struct vm_operations_struct ext4_dax_vm_ops = {
283 .fault = ext4_dax_fault,
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700284 .pmd_fault = ext4_dax_pmd_fault,
Ross Zwisler1e9d1802016-02-27 14:01:13 -0500285 .page_mkwrite = ext4_dax_fault,
Jan Karaea3d7202015-12-07 14:28:03 -0500286 .pfn_mkwrite = ext4_dax_pfn_mkwrite,
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800287};
288#else
289#define ext4_dax_vm_ops ext4_file_vm_ops
290#endif
291
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +0400292static const struct vm_operations_struct ext4_file_vm_ops = {
Jan Karaea3d7202015-12-07 14:28:03 -0500293 .fault = ext4_filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -0700294 .map_pages = filemap_map_pages,
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400295 .page_mkwrite = ext4_page_mkwrite,
296};
297
298static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
299{
Michael Halcrowc9c74292015-04-12 00:56:10 -0400300 struct inode *inode = file->f_mapping->host;
301
302 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400303 int err = fscrypt_get_encryption_info(inode);
Michael Halcrowc9c74292015-04-12 00:56:10 -0400304 if (err)
305 return 0;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400306 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400307 return -ENOKEY;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400308 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400309 file_accessed(file);
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800310 if (IS_DAX(file_inode(file))) {
311 vma->vm_ops = &ext4_dax_vm_ops;
Matthew Wilcox11bd1a92015-09-08 14:59:03 -0700312 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
Ross Zwisler923ae0f2015-02-16 15:59:38 -0800313 } else {
314 vma->vm_ops = &ext4_file_vm_ops;
315 }
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400316 return 0;
317}
318
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400319static int ext4_file_open(struct inode * inode, struct file * filp)
320{
321 struct super_block *sb = inode->i_sb;
322 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
323 struct vfsmount *mnt = filp->f_path.mnt;
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400324 struct dentry *dir;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400325 struct path path;
326 char buf[64], *cp;
Michael Halcrowc9c74292015-04-12 00:56:10 -0400327 int ret;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400328
329 if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
330 !(sb->s_flags & MS_RDONLY))) {
331 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
332 /*
333 * Sample where the filesystem has been mounted and
334 * store it in the superblock for sysadmin convenience
335 * when trying to sort through large numbers of block
336 * devices or filesystem images.
337 */
338 memset(buf, 0, sizeof(buf));
Al Viro38991672010-01-23 20:10:29 -0500339 path.mnt = mnt;
340 path.dentry = mnt->mnt_root;
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400341 cp = d_path(&path, buf, sizeof(buf));
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400342 if (!IS_ERR(cp)) {
Jan Kara044ce472012-07-22 20:31:31 -0400343 handle_t *handle;
344 int err;
345
Theodore Ts'o9924a922013-02-08 21:59:22 -0500346 handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
Jan Kara044ce472012-07-22 20:31:31 -0400347 if (IS_ERR(handle))
348 return PTR_ERR(handle);
liang xie5d601252014-05-12 22:06:43 -0400349 BUFFER_TRACE(sbi->s_sbh, "get_write_access");
Jan Kara044ce472012-07-22 20:31:31 -0400350 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
351 if (err) {
352 ext4_journal_stop(handle);
353 return err;
354 }
Darrick J. Wongcf803902011-10-25 09:18:41 -0400355 strlcpy(sbi->s_es->s_last_mounted, cp,
356 sizeof(sbi->s_es->s_last_mounted));
Jan Kara044ce472012-07-22 20:31:31 -0400357 ext4_handle_dirty_super(handle, sb);
358 ext4_journal_stop(handle);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400359 }
360 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400361 if (ext4_encrypted_inode(inode)) {
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400362 ret = fscrypt_get_encryption_info(inode);
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400363 if (ret)
364 return -EACCES;
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400365 if (!fscrypt_has_encryption_key(inode))
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400366 return -ENOKEY;
367 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400368
Miklos Szeredic0a37d4872016-03-26 16:14:42 -0400369 dir = dget_parent(file_dentry(filp));
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400370 if (ext4_encrypted_inode(d_inode(dir)) &&
Jaegeuk Kima7550b32016-07-10 14:01:03 -0400371 !fscrypt_has_permitted_context(d_inode(dir), inode)) {
Theodore Ts'off978b02016-02-08 00:54:26 -0500372 ext4_warning(inode->i_sb,
Jakub Wilk8d2ae1c2016-04-27 01:11:21 -0400373 "Inconsistent encryption contexts: %lu/%lu",
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400374 (unsigned long) d_inode(dir)->i_ino,
Theodore Ts'off978b02016-02-08 00:54:26 -0500375 (unsigned long) inode->i_ino);
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400376 dput(dir);
Theodore Ts'off978b02016-02-08 00:54:26 -0500377 return -EPERM;
378 }
Miklos Szeredi9dd78d82016-03-26 16:14:41 -0400379 dput(dir);
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500380 /*
381 * Set up the jbd2_inode if we are opening the inode for
382 * writing and the journal is present
383 */
Jan Karaa3612932013-08-16 21:19:41 -0400384 if (filp->f_mode & FMODE_WRITE) {
Michael Halcrowc9c74292015-04-12 00:56:10 -0400385 ret = ext4_inode_attach_jinode(inode);
Jan Karaa3612932013-08-16 21:19:41 -0400386 if (ret < 0)
387 return ret;
Theodore Ts'o8aefcd52011-01-10 12:29:43 -0500388 }
Theodore Ts'oabdd4382015-05-31 13:35:39 -0400389 return dquot_file_open(inode, filp);
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400390}
391
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400392/*
Zheng Liuc8c0df22012-11-08 21:57:40 -0500393 * Here we use ext4_map_blocks() to get a block mapping for a extent-based
394 * file rather than ext4_ext_walk_space() because we can introduce
395 * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
396 * function. When extent status tree has been fully implemented, it will
397 * track all extent status for a file and we can directly use it to
398 * retrieve the offset for SEEK_DATA/SEEK_HOLE.
399 */
400
401/*
402 * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
403 * lookup page cache to check whether or not there has some data between
404 * [startoff, endoff] because, if this range contains an unwritten extent,
405 * we determine this extent as a data or a hole according to whether the
406 * page cache has data or not.
407 */
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500408static int ext4_find_unwritten_pgoff(struct inode *inode,
409 int whence,
Jan Kara2d90c162016-03-09 23:11:13 -0500410 ext4_lblk_t end_blk,
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500411 loff_t *offset)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500412{
413 struct pagevec pvec;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500414 unsigned int blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500415 pgoff_t index;
416 pgoff_t end;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500417 loff_t endoff;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500418 loff_t startoff;
419 loff_t lastoff;
420 int found = 0;
421
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500422 blkbits = inode->i_sb->s_blocksize_bits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500423 startoff = *offset;
424 lastoff = startoff;
Jan Kara2d90c162016-03-09 23:11:13 -0500425 endoff = (loff_t)end_blk << blkbits;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500426
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300427 index = startoff >> PAGE_SHIFT;
428 end = endoff >> PAGE_SHIFT;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500429
430 pagevec_init(&pvec, 0);
431 do {
432 int i, num;
433 unsigned long nr_pages;
434
435 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
436 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
437 (pgoff_t)num);
438 if (nr_pages == 0) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800439 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500440 break;
441
Andrew Morton965c8e52012-12-17 15:59:39 -0800442 BUG_ON(whence != SEEK_HOLE);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500443 /*
444 * If this is the first time to go into the loop and
445 * offset is not beyond the end offset, it will be a
446 * hole at this offset
447 */
448 if (lastoff == startoff || lastoff < endoff)
449 found = 1;
450 break;
451 }
452
453 /*
454 * If this is the first time to go into the loop and
455 * offset is smaller than the first page offset, it will be a
456 * hole at this offset.
457 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800458 if (lastoff == startoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500459 lastoff < page_offset(pvec.pages[0])) {
460 found = 1;
461 break;
462 }
463
464 for (i = 0; i < nr_pages; i++) {
465 struct page *page = pvec.pages[i];
466 struct buffer_head *bh, *head;
467
468 /*
469 * If the current offset is not beyond the end of given
470 * range, it will be a hole.
471 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800472 if (lastoff < endoff && whence == SEEK_HOLE &&
Zheng Liuc8c0df22012-11-08 21:57:40 -0500473 page->index > end) {
474 found = 1;
475 *offset = lastoff;
476 goto out;
477 }
478
479 lock_page(page);
480
481 if (unlikely(page->mapping != inode->i_mapping)) {
482 unlock_page(page);
483 continue;
484 }
485
486 if (!page_has_buffers(page)) {
487 unlock_page(page);
488 continue;
489 }
490
491 if (page_has_buffers(page)) {
492 lastoff = page_offset(page);
493 bh = head = page_buffers(page);
494 do {
495 if (buffer_uptodate(bh) ||
496 buffer_unwritten(bh)) {
Andrew Morton965c8e52012-12-17 15:59:39 -0800497 if (whence == SEEK_DATA)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500498 found = 1;
499 } else {
Andrew Morton965c8e52012-12-17 15:59:39 -0800500 if (whence == SEEK_HOLE)
Zheng Liuc8c0df22012-11-08 21:57:40 -0500501 found = 1;
502 }
503 if (found) {
504 *offset = max_t(loff_t,
505 startoff, lastoff);
506 unlock_page(page);
507 goto out;
508 }
509 lastoff += bh->b_size;
510 bh = bh->b_this_page;
511 } while (bh != head);
512 }
513
514 lastoff = page_offset(page) + PAGE_SIZE;
515 unlock_page(page);
516 }
517
518 /*
519 * The no. of pages is less than our desired, that would be a
520 * hole in there.
521 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800522 if (nr_pages < num && whence == SEEK_HOLE) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500523 found = 1;
524 *offset = lastoff;
525 break;
526 }
527
528 index = pvec.pages[i - 1]->index + 1;
529 pagevec_release(&pvec);
530 } while (index <= end);
531
532out:
533 pagevec_release(&pvec);
534 return found;
535}
536
537/*
538 * ext4_seek_data() retrieves the offset for SEEK_DATA.
539 */
540static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
541{
542 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500543 struct extent_status es;
544 ext4_lblk_t start, last, end;
545 loff_t dataoff, isize;
546 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500547 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500548
Al Viro59551022016-01-22 15:40:57 -0500549 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500550
551 isize = i_size_read(inode);
552 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500553 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500554 return -ENXIO;
555 }
556
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500557 blkbits = inode->i_sb->s_blocksize_bits;
558 start = offset >> blkbits;
559 last = start;
560 end = isize >> blkbits;
561 dataoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500562
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500563 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500564 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
565 if (ret <= 0) {
566 /* No extent found -> no data */
567 if (ret == 0)
568 ret = -ENXIO;
569 inode_unlock(inode);
570 return ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500571 }
572
Jan Kara2d90c162016-03-09 23:11:13 -0500573 last = es.es_lblk;
574 if (last != start)
575 dataoff = (loff_t)last << blkbits;
576 if (!ext4_es_is_unwritten(&es))
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500577 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500578
579 /*
580 * If there is a unwritten extent at this offset,
581 * it will be as a data or a hole according to page
582 * cache that has data or not.
583 */
Jan Kara2d90c162016-03-09 23:11:13 -0500584 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
585 es.es_lblk + es.es_len, &dataoff))
586 break;
587 last += es.es_len;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500588 dataoff = (loff_t)last << blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500589 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500590 } while (last <= end);
591
Al Viro59551022016-01-22 15:40:57 -0500592 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500593
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500594 if (dataoff > isize)
595 return -ENXIO;
596
597 return vfs_setpos(file, dataoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500598}
599
600/*
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500601 * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
Zheng Liuc8c0df22012-11-08 21:57:40 -0500602 */
603static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
604{
605 struct inode *inode = file->f_mapping->host;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500606 struct extent_status es;
607 ext4_lblk_t start, last, end;
608 loff_t holeoff, isize;
609 int blkbits;
Jan Kara2d90c162016-03-09 23:11:13 -0500610 int ret;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500611
Al Viro59551022016-01-22 15:40:57 -0500612 inode_lock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500613
614 isize = i_size_read(inode);
615 if (offset >= isize) {
Al Viro59551022016-01-22 15:40:57 -0500616 inode_unlock(inode);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500617 return -ENXIO;
618 }
619
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500620 blkbits = inode->i_sb->s_blocksize_bits;
621 start = offset >> blkbits;
622 last = start;
623 end = isize >> blkbits;
624 holeoff = offset;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500625
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500626 do {
Jan Kara2d90c162016-03-09 23:11:13 -0500627 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
628 if (ret < 0) {
629 inode_unlock(inode);
630 return ret;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500631 }
Jan Kara2d90c162016-03-09 23:11:13 -0500632 /* Found a hole? */
633 if (ret == 0 || es.es_lblk > last) {
634 if (last != start)
635 holeoff = (loff_t)last << blkbits;
636 break;
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500637 }
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500638 /*
639 * If there is a unwritten extent at this offset,
640 * it will be as a data or a hole according to page
641 * cache that has data or not.
642 */
Jan Kara2d90c162016-03-09 23:11:13 -0500643 if (ext4_es_is_unwritten(&es) &&
644 ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
645 last + es.es_len, &holeoff))
646 break;
Zheng Liuc8c0df22012-11-08 21:57:40 -0500647
Jan Kara2d90c162016-03-09 23:11:13 -0500648 last += es.es_len;
649 holeoff = (loff_t)last << blkbits;
650 cond_resched();
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500651 } while (last <= end);
652
Al Viro59551022016-01-22 15:40:57 -0500653 inode_unlock(inode);
Theodore Ts'oad7fefb2015-01-02 15:16:00 -0500654
655 if (holeoff > isize)
656 holeoff = isize;
657
658 return vfs_setpos(file, holeoff, maxsize);
Zheng Liuc8c0df22012-11-08 21:57:40 -0500659}
660
661/*
Eric Sandeenec7268c2012-04-30 13:14:03 -0500662 * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
663 * by calling generic_file_llseek_size() with the appropriate maxbytes
664 * value for each.
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400665 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800666loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400667{
668 struct inode *inode = file->f_mapping->host;
669 loff_t maxbytes;
670
671 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
672 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
673 else
674 maxbytes = inode->i_sb->s_maxbytes;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400675
Andrew Morton965c8e52012-12-17 15:59:39 -0800676 switch (whence) {
Zheng Liuc8c0df22012-11-08 21:57:40 -0500677 case SEEK_SET:
678 case SEEK_CUR:
679 case SEEK_END:
Andrew Morton965c8e52012-12-17 15:59:39 -0800680 return generic_file_llseek_size(file, offset, whence,
Zheng Liuc8c0df22012-11-08 21:57:40 -0500681 maxbytes, i_size_read(inode));
682 case SEEK_DATA:
683 return ext4_seek_data(file, offset, maxbytes);
684 case SEEK_HOLE:
685 return ext4_seek_hole(file, offset, maxbytes);
686 }
687
688 return -EINVAL;
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400689}
690
Mingming Cao617ba132006-10-11 01:20:53 -0700691const struct file_operations ext4_file_operations = {
Toshiyuki Okajimae0d10bf2010-10-27 21:30:06 -0400692 .llseek = ext4_llseek,
Al Viroaad4f8b2014-04-02 14:33:16 -0400693 .read_iter = generic_file_read_iter,
Al Viro9b884162014-04-17 16:09:22 -0400694 .write_iter = ext4_file_write_iter,
Andi Kleen5cdd7b22008-04-29 22:03:54 -0400695 .unlocked_ioctl = ext4_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700696#ifdef CONFIG_COMPAT
Mingming Cao617ba132006-10-11 01:20:53 -0700697 .compat_ioctl = ext4_compat_ioctl,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700698#endif
Aneesh Kumar K.V2e9ee852008-07-11 19:27:31 -0400699 .mmap = ext4_file_mmap,
Theodore Ts'obc0b0d62009-06-13 10:09:48 -0400700 .open = ext4_file_open,
Mingming Cao617ba132006-10-11 01:20:53 -0700701 .release = ext4_release_file,
702 .fsync = ext4_sync_file,
Toshi Kanidbe6ec82016-10-07 16:59:59 -0700703 .get_unmapped_area = thp_get_unmapped_area,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700704 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -0400705 .splice_write = iter_file_splice_write,
Christoph Hellwig2fe17c12011-01-14 13:07:43 +0100706 .fallocate = ext4_fallocate,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700707};
708
Arjan van de Ven754661f2007-02-12 00:55:38 -0800709const struct inode_operations ext4_file_inode_operations = {
Mingming Cao617ba132006-10-11 01:20:53 -0700710 .setattr = ext4_setattr,
Mingming Cao3e3398a2008-07-11 19:27:31 -0400711 .getattr = ext4_getattr,
Mingming Cao617ba132006-10-11 01:20:53 -0700712 .listxattr = ext4_listxattr,
Christoph Hellwig4e34e712011-07-23 17:37:31 +0200713 .get_acl = ext4_get_acl,
Christoph Hellwig64e178a2013-12-20 05:16:44 -0800714 .set_acl = ext4_set_acl,
Eric Sandeen6873fa02008-10-07 00:46:36 -0400715 .fiemap = ext4_fiemap,
Dave Kleikampac27a0e2006-10-11 01:20:50 -0700716};
717