blob: a1f71e64ea4995c20bfe02de140c0740af94f977 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070023#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100024#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/backing-dev.h>
27#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110028#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100029#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010030#include <linux/sched/signal.h>
Darrick J. Wong67482122018-05-10 08:38:15 -070031#include <linux/swap.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010032
Christoph Hellwigae259a92016-06-21 09:23:11 +100033#include "internal.h"
34
Christoph Hellwigae259a92016-06-21 09:23:11 +100035/*
36 * Execute a iomap write on a segment of the mapping that spans a
37 * contiguous range of pages that have identical block mapping state.
38 *
39 * This avoids the need to map pages individually, do individual allocations
40 * for each page and most importantly avoid the need for filesystem specific
41 * locking per page. Instead, all the operations are amortised over the entire
42 * range of pages. It is assumed that the filesystems will lock whatever
43 * resources they require in the iomap_begin call, and release them in the
44 * iomap_end call.
45 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100046loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100047iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080048 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100049{
50 struct iomap iomap = { 0 };
51 loff_t written = 0, ret;
52
53 /*
54 * Need to map a range from start position for length bytes. This can
55 * span multiple pages - it is only guaranteed to return a range of a
56 * single type of pages (e.g. all into a hole, all mapped or all
57 * unwritten). Failure at this point has nothing to undo.
58 *
59 * If allocation is required for this range, reserve the space now so
60 * that the allocation is guaranteed to succeed later on. Once we copy
61 * the data into the page cache pages, then we cannot fail otherwise we
62 * expose transient stale data. If the reserve fails, we can safely
63 * back out at this point as there is nothing to undo.
64 */
65 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
66 if (ret)
67 return ret;
68 if (WARN_ON(iomap.offset > pos))
69 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080070 if (WARN_ON(iomap.length == 0))
71 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100072
73 /*
74 * Cut down the length to the one actually provided by the filesystem,
75 * as it might not be able to give us the whole size that we requested.
76 */
77 if (iomap.offset + iomap.length < pos + length)
78 length = iomap.offset + iomap.length - pos;
79
80 /*
81 * Now that we have guaranteed that the space allocation will succeed.
82 * we can do the copy-in page by page without having to worry about
83 * failures exposing transient data.
84 */
85 written = actor(inode, pos, length, data, &iomap);
86
87 /*
88 * Now the data has been copied, commit the range we've copied. This
89 * should not fail unless the filesystem has had a fatal error.
90 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100091 if (ops->iomap_end) {
92 ret = ops->iomap_end(inode, pos, length,
93 written > 0 ? written : 0,
94 flags, &iomap);
95 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100096
97 return written ? written : ret;
98}
99
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700100static sector_t
101iomap_sector(struct iomap *iomap, loff_t pos)
102{
103 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
104}
105
Christoph Hellwigae259a92016-06-21 09:23:11 +1000106static void
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700107iomap_read_inline_data(struct inode *inode, struct page *page,
108 struct iomap *iomap)
109{
110 size_t size = i_size_read(inode);
111 void *addr;
112
113 if (PageUptodate(page))
114 return;
115
116 BUG_ON(page->index);
117 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
118
119 addr = kmap_atomic(page);
120 memcpy(addr, iomap->inline_data, size);
121 memset(addr + size, 0, PAGE_SIZE - size);
122 kunmap_atomic(addr);
123 SetPageUptodate(page);
124}
125
126static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000127iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
128{
129 loff_t i_size = i_size_read(inode);
130
131 /*
132 * Only truncate newly allocated pages beyoned EOF, even if the
133 * write started inside the existing inode size.
134 */
135 if (pos + len > i_size)
136 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
137}
138
139static int
140iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
141 struct page **pagep, struct iomap *iomap)
142{
143 pgoff_t index = pos >> PAGE_SHIFT;
144 struct page *page;
145 int status = 0;
146
147 BUG_ON(pos + len > iomap->offset + iomap->length);
148
Michal Hockod1908f52017-02-03 13:13:26 -0800149 if (fatal_signal_pending(current))
150 return -EINTR;
151
Christoph Hellwigae259a92016-06-21 09:23:11 +1000152 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
153 if (!page)
154 return -ENOMEM;
155
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700156 if (iomap->type == IOMAP_INLINE)
157 iomap_read_inline_data(inode, page, iomap);
158 else
159 status = __block_write_begin_int(page, pos, len, NULL, iomap);
160
Christoph Hellwigae259a92016-06-21 09:23:11 +1000161 if (unlikely(status)) {
162 unlock_page(page);
163 put_page(page);
164 page = NULL;
165
166 iomap_write_failed(inode, pos, len);
167 }
168
169 *pagep = page;
170 return status;
171}
172
173static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700174iomap_write_end_inline(struct inode *inode, struct page *page,
175 struct iomap *iomap, loff_t pos, unsigned copied)
176{
177 void *addr;
178
179 WARN_ON_ONCE(!PageUptodate(page));
180 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
181
182 addr = kmap_atomic(page);
183 memcpy(iomap->inline_data + pos, addr + pos, copied);
184 kunmap_atomic(addr);
185
186 mark_inode_dirty(inode);
187 __generic_write_end(inode, pos, copied, page);
188 return copied;
189}
190
191static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000192iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700193 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000194{
195 int ret;
196
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700197 if (iomap->type == IOMAP_INLINE) {
198 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
199 } else {
200 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
201 copied, page, NULL);
202 }
203
Christoph Hellwig63899c62018-06-19 15:10:56 -0700204 if (iomap->page_done)
205 iomap->page_done(inode, pos, copied, page, iomap);
206
Christoph Hellwigae259a92016-06-21 09:23:11 +1000207 if (ret < len)
208 iomap_write_failed(inode, pos, len);
209 return ret;
210}
211
212static loff_t
213iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
214 struct iomap *iomap)
215{
216 struct iov_iter *i = data;
217 long status = 0;
218 ssize_t written = 0;
219 unsigned int flags = AOP_FLAG_NOFS;
220
Christoph Hellwigae259a92016-06-21 09:23:11 +1000221 do {
222 struct page *page;
223 unsigned long offset; /* Offset into pagecache page */
224 unsigned long bytes; /* Bytes to write to page */
225 size_t copied; /* Bytes copied from user */
226
227 offset = (pos & (PAGE_SIZE - 1));
228 bytes = min_t(unsigned long, PAGE_SIZE - offset,
229 iov_iter_count(i));
230again:
231 if (bytes > length)
232 bytes = length;
233
234 /*
235 * Bring in the user page that we will copy from _first_.
236 * Otherwise there's a nasty deadlock on copying from the
237 * same page as we're writing to, without it being marked
238 * up-to-date.
239 *
240 * Not only is this an optimisation, but it is also required
241 * to check that the address is actually valid, when atomic
242 * usercopies are used, below.
243 */
244 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
245 status = -EFAULT;
246 break;
247 }
248
249 status = iomap_write_begin(inode, pos, bytes, flags, &page,
250 iomap);
251 if (unlikely(status))
252 break;
253
254 if (mapping_writably_mapped(inode->i_mapping))
255 flush_dcache_page(page);
256
Christoph Hellwigae259a92016-06-21 09:23:11 +1000257 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000258
259 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000260
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700261 status = iomap_write_end(inode, pos, bytes, copied, page,
262 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000263 if (unlikely(status < 0))
264 break;
265 copied = status;
266
267 cond_resched();
268
269 iov_iter_advance(i, copied);
270 if (unlikely(copied == 0)) {
271 /*
272 * If we were unable to copy any data at all, we must
273 * fall back to a single segment length write.
274 *
275 * If we didn't fallback here, we could livelock
276 * because not all segments in the iov can be copied at
277 * once without a pagefault.
278 */
279 bytes = min_t(unsigned long, PAGE_SIZE - offset,
280 iov_iter_single_seg_count(i));
281 goto again;
282 }
283 pos += copied;
284 written += copied;
285 length -= copied;
286
287 balance_dirty_pages_ratelimited(inode->i_mapping);
288 } while (iov_iter_count(i) && length);
289
290 return written ? written : status;
291}
292
293ssize_t
294iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800295 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000296{
297 struct inode *inode = iocb->ki_filp->f_mapping->host;
298 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
299
300 while (iov_iter_count(iter)) {
301 ret = iomap_apply(inode, pos, iov_iter_count(iter),
302 IOMAP_WRITE, ops, iter, iomap_write_actor);
303 if (ret <= 0)
304 break;
305 pos += ret;
306 written += ret;
307 }
308
309 return written ? written : ret;
310}
311EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
312
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000313static struct page *
314__iomap_read_page(struct inode *inode, loff_t offset)
315{
316 struct address_space *mapping = inode->i_mapping;
317 struct page *page;
318
319 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
320 if (IS_ERR(page))
321 return page;
322 if (!PageUptodate(page)) {
323 put_page(page);
324 return ERR_PTR(-EIO);
325 }
326 return page;
327}
328
329static loff_t
330iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
331 struct iomap *iomap)
332{
333 long status = 0;
334 ssize_t written = 0;
335
336 do {
337 struct page *page, *rpage;
338 unsigned long offset; /* Offset into pagecache page */
339 unsigned long bytes; /* Bytes to write to page */
340
341 offset = (pos & (PAGE_SIZE - 1));
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700342 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000343
344 rpage = __iomap_read_page(inode, pos);
345 if (IS_ERR(rpage))
346 return PTR_ERR(rpage);
347
348 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700349 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000350 put_page(rpage);
351 if (unlikely(status))
352 return status;
353
354 WARN_ON_ONCE(!PageUptodate(page));
355
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700356 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000357 if (unlikely(status <= 0)) {
358 if (WARN_ON_ONCE(status == 0))
359 return -EIO;
360 return status;
361 }
362
363 cond_resched();
364
365 pos += status;
366 written += status;
367 length -= status;
368
369 balance_dirty_pages_ratelimited(inode->i_mapping);
370 } while (length);
371
372 return written;
373}
374
375int
376iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800377 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000378{
379 loff_t ret;
380
381 while (len) {
382 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
383 iomap_dirty_actor);
384 if (ret <= 0)
385 return ret;
386 pos += ret;
387 len -= ret;
388 }
389
390 return 0;
391}
392EXPORT_SYMBOL_GPL(iomap_file_dirty);
393
Christoph Hellwigae259a92016-06-21 09:23:11 +1000394static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
395 unsigned bytes, struct iomap *iomap)
396{
397 struct page *page;
398 int status;
399
Tetsuo Handac718a972017-05-08 15:58:59 -0700400 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
401 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000402 if (status)
403 return status;
404
405 zero_user(page, offset, bytes);
406 mark_page_accessed(page);
407
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700408 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000409}
410
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000411static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
412 struct iomap *iomap)
413{
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700414 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
415 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000416}
417
Christoph Hellwigae259a92016-06-21 09:23:11 +1000418static loff_t
419iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
420 void *data, struct iomap *iomap)
421{
422 bool *did_zero = data;
423 loff_t written = 0;
424 int status;
425
426 /* already zeroed? we're done. */
427 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
428 return count;
429
430 do {
431 unsigned offset, bytes;
432
433 offset = pos & (PAGE_SIZE - 1); /* Within page */
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700434 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000435
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000436 if (IS_DAX(inode))
437 status = iomap_dax_zero(pos, offset, bytes, iomap);
438 else
439 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000440 if (status < 0)
441 return status;
442
443 pos += bytes;
444 count -= bytes;
445 written += bytes;
446 if (did_zero)
447 *did_zero = true;
448 } while (count > 0);
449
450 return written;
451}
452
453int
454iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800455 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000456{
457 loff_t ret;
458
459 while (len > 0) {
460 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
461 ops, did_zero, iomap_zero_range_actor);
462 if (ret <= 0)
463 return ret;
464
465 pos += ret;
466 len -= ret;
467 }
468
469 return 0;
470}
471EXPORT_SYMBOL_GPL(iomap_zero_range);
472
473int
474iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800475 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000476{
Fabian Frederick93407472017-02-27 14:28:32 -0800477 unsigned int blocksize = i_blocksize(inode);
478 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000479
480 /* Block boundary? Nothing to do */
481 if (!off)
482 return 0;
483 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
484}
485EXPORT_SYMBOL_GPL(iomap_truncate_page);
486
487static loff_t
488iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
489 void *data, struct iomap *iomap)
490{
491 struct page *page = data;
492 int ret;
493
Jan Karac663e292016-10-24 14:20:25 +1100494 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000495 if (ret)
496 return ret;
497
498 block_commit_write(page, 0, length);
499 return length;
500}
501
Dave Jiang11bac802017-02-24 14:56:41 -0800502int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000503{
504 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800505 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000506 unsigned long length;
507 loff_t offset, size;
508 ssize_t ret;
509
510 lock_page(page);
511 size = i_size_read(inode);
512 if ((page->mapping != inode->i_mapping) ||
513 (page_offset(page) > size)) {
514 /* We overload EFAULT to mean page got truncated */
515 ret = -EFAULT;
516 goto out_unlock;
517 }
518
519 /* page is wholly or partially inside EOF */
520 if (((page->index + 1) << PAGE_SHIFT) > size)
521 length = size & ~PAGE_MASK;
522 else
523 length = PAGE_SIZE;
524
525 offset = page_offset(page);
526 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +1100527 ret = iomap_apply(inode, offset, length,
528 IOMAP_WRITE | IOMAP_FAULT, ops, page,
529 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000530 if (unlikely(ret <= 0))
531 goto out_unlock;
532 offset += ret;
533 length -= ret;
534 }
535
536 set_page_dirty(page);
537 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700538 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000539out_unlock:
540 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700541 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000542}
543EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000544
545struct fiemap_ctx {
546 struct fiemap_extent_info *fi;
547 struct iomap prev;
548};
549
550static int iomap_to_fiemap(struct fiemap_extent_info *fi,
551 struct iomap *iomap, u32 flags)
552{
553 switch (iomap->type) {
554 case IOMAP_HOLE:
555 /* skip holes */
556 return 0;
557 case IOMAP_DELALLOC:
558 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
559 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700560 case IOMAP_MAPPED:
561 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000562 case IOMAP_UNWRITTEN:
563 flags |= FIEMAP_EXTENT_UNWRITTEN;
564 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700565 case IOMAP_INLINE:
566 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000567 break;
568 }
569
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000570 if (iomap->flags & IOMAP_F_MERGED)
571 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +1000572 if (iomap->flags & IOMAP_F_SHARED)
573 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000574
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000575 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400576 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000577 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000578}
579
580static loff_t
581iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
582 struct iomap *iomap)
583{
584 struct fiemap_ctx *ctx = data;
585 loff_t ret = length;
586
587 if (iomap->type == IOMAP_HOLE)
588 return length;
589
590 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
591 ctx->prev = *iomap;
592 switch (ret) {
593 case 0: /* success */
594 return length;
595 case 1: /* extent array full */
596 return 0;
597 default:
598 return ret;
599 }
600}
601
602int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800603 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000604{
605 struct fiemap_ctx ctx;
606 loff_t ret;
607
608 memset(&ctx, 0, sizeof(ctx));
609 ctx.fi = fi;
610 ctx.prev.type = IOMAP_HOLE;
611
612 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
613 if (ret)
614 return ret;
615
Dave Chinner8896b8f2016-08-17 08:41:10 +1000616 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
617 ret = filemap_write_and_wait(inode->i_mapping);
618 if (ret)
619 return ret;
620 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000621
622 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +1100623 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000624 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +1000625 /* inode with no (attribute) mapping will give ENOENT */
626 if (ret == -ENOENT)
627 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000628 if (ret < 0)
629 return ret;
630 if (ret == 0)
631 break;
632
633 start += ret;
634 len -= ret;
635 }
636
637 if (ctx.prev.type != IOMAP_HOLE) {
638 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
639 if (ret < 0)
640 return ret;
641 }
642
643 return 0;
644}
645EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100646
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700647/*
648 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700649 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700650 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700651static bool
652page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
653 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700654{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700655 const struct address_space_operations *ops = inode->i_mapping->a_ops;
656 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700657 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700658 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700659
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700660 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
661 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700662
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700663 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700664 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700665 * Last offset smaller than the start of the page means we found
666 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700667 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700668 if (whence == SEEK_HOLE)
669 return true;
670 *lastoff = poff;
671 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700672
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700673 /*
674 * Just check the page unless we can and should check block ranges:
675 */
676 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
677 return PageUptodate(page) == seek_data;
678
679 lock_page(page);
680 if (unlikely(page->mapping != inode->i_mapping))
681 goto out_unlock_not_found;
682
683 for (off = 0; off < PAGE_SIZE; off += bsize) {
684 if ((*lastoff & ~PAGE_MASK) >= off + bsize)
685 continue;
686 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
687 unlock_page(page);
688 return true;
689 }
690 *lastoff = poff + off + bsize;
691 }
692
693out_unlock_not_found:
694 unlock_page(page);
695 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700696}
697
698/*
699 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
700 *
701 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -0700702 * and which are data: uptodate buffer heads count as data; everything else
703 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700704 *
705 * Returns the resulting offset on successs, and -ENOENT otherwise.
706 */
707static loff_t
708page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
709 int whence)
710{
711 pgoff_t index = offset >> PAGE_SHIFT;
712 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
713 loff_t lastoff = offset;
714 struct pagevec pvec;
715
716 if (length <= 0)
717 return -ENOENT;
718
719 pagevec_init(&pvec);
720
721 do {
722 unsigned nr_pages, i;
723
724 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
725 end - 1);
726 if (nr_pages == 0)
727 break;
728
729 for (i = 0; i < nr_pages; i++) {
730 struct page *page = pvec.pages[i];
731
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700732 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700733 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700734 lastoff = page_offset(page) + PAGE_SIZE;
735 }
736 pagevec_release(&pvec);
737 } while (index < end);
738
739 /* When no page at lastoff and we are not done, we found a hole. */
740 if (whence != SEEK_HOLE)
741 goto not_found;
742
743check_range:
744 if (lastoff < offset + length)
745 goto out;
746not_found:
747 lastoff = -ENOENT;
748out:
749 pagevec_release(&pvec);
750 return lastoff;
751}
752
753
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700754static loff_t
755iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
756 void *data, struct iomap *iomap)
757{
758 switch (iomap->type) {
759 case IOMAP_UNWRITTEN:
760 offset = page_cache_seek_hole_data(inode, offset, length,
761 SEEK_HOLE);
762 if (offset < 0)
763 return length;
764 /* fall through */
765 case IOMAP_HOLE:
766 *(loff_t *)data = offset;
767 return 0;
768 default:
769 return length;
770 }
771}
772
773loff_t
774iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
775{
776 loff_t size = i_size_read(inode);
777 loff_t length = size - offset;
778 loff_t ret;
779
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700780 /* Nothing to be found before or beyond the end of the file. */
781 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700782 return -ENXIO;
783
784 while (length > 0) {
785 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
786 &offset, iomap_seek_hole_actor);
787 if (ret < 0)
788 return ret;
789 if (ret == 0)
790 break;
791
792 offset += ret;
793 length -= ret;
794 }
795
796 return offset;
797}
798EXPORT_SYMBOL_GPL(iomap_seek_hole);
799
800static loff_t
801iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
802 void *data, struct iomap *iomap)
803{
804 switch (iomap->type) {
805 case IOMAP_HOLE:
806 return length;
807 case IOMAP_UNWRITTEN:
808 offset = page_cache_seek_hole_data(inode, offset, length,
809 SEEK_DATA);
810 if (offset < 0)
811 return length;
812 /*FALLTHRU*/
813 default:
814 *(loff_t *)data = offset;
815 return 0;
816 }
817}
818
819loff_t
820iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
821{
822 loff_t size = i_size_read(inode);
823 loff_t length = size - offset;
824 loff_t ret;
825
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700826 /* Nothing to be found before or beyond the end of the file. */
827 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700828 return -ENXIO;
829
830 while (length > 0) {
831 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
832 &offset, iomap_seek_data_actor);
833 if (ret < 0)
834 return ret;
835 if (ret == 0)
836 break;
837
838 offset += ret;
839 length -= ret;
840 }
841
842 if (length <= 0)
843 return -ENXIO;
844 return offset;
845}
846EXPORT_SYMBOL_GPL(iomap_seek_data);
847
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100848/*
849 * Private flags for iomap_dio, must not overlap with the public ones in
850 * iomap.h:
851 */
Dave Chinner3460cac2018-05-02 12:54:53 -0700852#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -0700853#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100854#define IOMAP_DIO_WRITE (1 << 30)
855#define IOMAP_DIO_DIRTY (1 << 31)
856
857struct iomap_dio {
858 struct kiocb *iocb;
859 iomap_dio_end_io_t *end_io;
860 loff_t i_size;
861 loff_t size;
862 atomic_t ref;
863 unsigned flags;
864 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -0700865 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100866
867 union {
868 /* used during submission and for synchronous completion: */
869 struct {
870 struct iov_iter *iter;
871 struct task_struct *waiter;
872 struct request_queue *last_queue;
873 blk_qc_t cookie;
874 } submit;
875
876 /* used for aio completion: */
877 struct {
878 struct work_struct work;
879 } aio;
880 };
881};
882
883static ssize_t iomap_dio_complete(struct iomap_dio *dio)
884{
885 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -0600886 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700887 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100888 ssize_t ret;
889
890 if (dio->end_io) {
891 ret = dio->end_io(iocb,
892 dio->error ? dio->error : dio->size,
893 dio->flags);
894 } else {
895 ret = dio->error;
896 }
897
898 if (likely(!ret)) {
899 ret = dio->size;
900 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -0700901 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100902 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -0700903 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100904 iocb->ki_pos += ret;
905 }
906
Eryu Guan5e25c262017-10-13 09:47:46 -0700907 /*
908 * Try again to invalidate clean pages which might have been cached by
909 * non-direct readahead, or faulted in by get_user_pages() if the source
910 * of the write was an mmap'ed region of the file we're writing. Either
911 * one is a pretty crazy thing to do, so we don't support it 100%. If
912 * this invalidation fails, tough, the write still worked...
913 *
914 * And this page cache invalidation has to be after dio->end_io(), as
915 * some filesystems convert unwritten extents to real allocations in
916 * end_io() when necessary, otherwise a racing buffer read would cache
917 * zeros from unwritten extents.
918 */
919 if (!dio->error &&
920 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
921 int err;
922 err = invalidate_inode_pages2_range(inode->i_mapping,
923 offset >> PAGE_SHIFT,
924 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -0800925 if (err)
926 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700927 }
928
Dave Chinner4f8ff442018-05-02 12:54:52 -0700929 /*
930 * If this is a DSYNC write, make sure we push it to stable storage now
931 * that we've written data.
932 */
933 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
934 ret = generic_write_sync(iocb, ret);
935
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100936 inode_dio_end(file_inode(iocb->ki_filp));
937 kfree(dio);
938
939 return ret;
940}
941
942static void iomap_dio_complete_work(struct work_struct *work)
943{
944 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
945 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100946
Dave Chinner4f8ff442018-05-02 12:54:52 -0700947 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100948}
949
950/*
951 * Set an error in the dio if none is set yet. We have to use cmpxchg
952 * as the submission context and the completion context(s) can race to
953 * update the error.
954 */
955static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
956{
957 cmpxchg(&dio->error, 0, ret);
958}
959
960static void iomap_dio_bio_end_io(struct bio *bio)
961{
962 struct iomap_dio *dio = bio->bi_private;
963 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
964
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200965 if (bio->bi_status)
966 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100967
968 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -0700969 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100970 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100971 WRITE_ONCE(dio->submit.waiter, NULL);
972 wake_up_process(waiter);
973 } else if (dio->flags & IOMAP_DIO_WRITE) {
974 struct inode *inode = file_inode(dio->iocb->ki_filp);
975
976 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
977 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
978 } else {
979 iomap_dio_complete_work(&dio->aio.work);
980 }
981 }
982
983 if (should_dirty) {
984 bio_check_pages_dirty(bio);
985 } else {
986 struct bio_vec *bvec;
987 int i;
988
989 bio_for_each_segment_all(bvec, bio, i)
990 put_page(bvec->bv_page);
991 bio_put(bio);
992 }
993}
994
995static blk_qc_t
996iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
997 unsigned len)
998{
999 struct page *page = ZERO_PAGE(0);
1000 struct bio *bio;
1001
1002 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001003 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001004 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001005 bio->bi_private = dio;
1006 bio->bi_end_io = iomap_dio_bio_end_io;
1007
1008 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001009 __bio_add_page(bio, page, len, 0);
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001010 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001011
1012 atomic_inc(&dio->ref);
1013 return submit_bio(bio);
1014}
1015
1016static loff_t
1017iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1018 void *data, struct iomap *iomap)
1019{
1020 struct iomap_dio *dio = data;
Fabian Frederick93407472017-02-27 14:28:32 -08001021 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1022 unsigned int fs_block_size = i_blocksize(inode), pad;
1023 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001024 struct iov_iter iter;
1025 struct bio *bio;
1026 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001027 bool use_fua = false;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001028 int nr_pages, ret;
Al Virocfe057f2017-09-11 21:17:09 +01001029 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001030
1031 if ((pos | length | align) & ((1 << blkbits) - 1))
1032 return -EINVAL;
1033
1034 switch (iomap->type) {
1035 case IOMAP_HOLE:
1036 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1037 return -EIO;
1038 /*FALLTHRU*/
1039 case IOMAP_UNWRITTEN:
1040 if (!(dio->flags & IOMAP_DIO_WRITE)) {
Al Virocfe057f2017-09-11 21:17:09 +01001041 length = iov_iter_zero(length, dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001042 dio->size += length;
1043 return length;
1044 }
1045 dio->flags |= IOMAP_DIO_UNWRITTEN;
1046 need_zeroout = true;
1047 break;
1048 case IOMAP_MAPPED:
1049 if (iomap->flags & IOMAP_F_SHARED)
1050 dio->flags |= IOMAP_DIO_COW;
Dave Chinner3460cac2018-05-02 12:54:53 -07001051 if (iomap->flags & IOMAP_F_NEW) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001052 need_zeroout = true;
Dave Chinner3460cac2018-05-02 12:54:53 -07001053 } else {
1054 /*
1055 * Use a FUA write if we need datasync semantics, this
1056 * is a pure data IO that doesn't require any metadata
1057 * updates and the underlying device supports FUA. This
1058 * allows us to avoid cache flushes on IO completion.
1059 */
1060 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1061 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1062 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1063 use_fua = true;
1064 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001065 break;
1066 default:
1067 WARN_ON_ONCE(1);
1068 return -EIO;
1069 }
1070
1071 /*
1072 * Operate on a partial iter trimmed to the extent we were called for.
1073 * We'll update the iter in the dio once we're done with this extent.
1074 */
1075 iter = *dio->submit.iter;
1076 iov_iter_truncate(&iter, length);
1077
1078 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1079 if (nr_pages <= 0)
1080 return nr_pages;
1081
1082 if (need_zeroout) {
1083 /* zero out from the start of the block to the write offset */
1084 pad = pos & (fs_block_size - 1);
1085 if (pad)
1086 iomap_dio_zero(dio, iomap, pos - pad, pad);
1087 }
1088
1089 do {
Al Virocfe057f2017-09-11 21:17:09 +01001090 size_t n;
1091 if (dio->error) {
1092 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001093 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001094 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001095
1096 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001097 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001098 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001099 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001100 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001101 bio->bi_private = dio;
1102 bio->bi_end_io = iomap_dio_bio_end_io;
1103
1104 ret = bio_iov_iter_get_pages(bio, &iter);
1105 if (unlikely(ret)) {
1106 bio_put(bio);
Al Virocfe057f2017-09-11 21:17:09 +01001107 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001108 }
1109
Al Virocfe057f2017-09-11 21:17:09 +01001110 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001111 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001112 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1113 if (use_fua)
1114 bio->bi_opf |= REQ_FUA;
1115 else
1116 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001117 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001118 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001119 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001120 if (dio->flags & IOMAP_DIO_DIRTY)
1121 bio_set_pages_dirty(bio);
1122 }
1123
Al Virocfe057f2017-09-11 21:17:09 +01001124 iov_iter_advance(dio->submit.iter, n);
1125
1126 dio->size += n;
1127 pos += n;
1128 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001129
1130 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1131
1132 atomic_inc(&dio->ref);
1133
1134 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1135 dio->submit.cookie = submit_bio(bio);
1136 } while (nr_pages);
1137
1138 if (need_zeroout) {
1139 /* zero out from the end of the write to the end of the block */
1140 pad = pos & (fs_block_size - 1);
1141 if (pad)
1142 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1143 }
Al Virocfe057f2017-09-11 21:17:09 +01001144 return copied;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001145}
1146
Dave Chinner4f8ff442018-05-02 12:54:52 -07001147/*
1148 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001149 * is being issued as AIO or not. This allows us to optimise pure data writes
1150 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1151 * REQ_FLUSH post write. This is slightly tricky because a single request here
1152 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1153 * may be pure data writes. In that case, we still need to do a full data sync
1154 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001155 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001156ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001157iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1158 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001159{
1160 struct address_space *mapping = iocb->ki_filp->f_mapping;
1161 struct inode *inode = file_inode(iocb->ki_filp);
1162 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001163 loff_t pos = iocb->ki_pos, start = pos;
1164 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001165 unsigned int flags = IOMAP_DIRECT;
1166 struct blk_plug plug;
1167 struct iomap_dio *dio;
1168
1169 lockdep_assert_held(&inode->i_rwsem);
1170
1171 if (!count)
1172 return 0;
1173
1174 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1175 if (!dio)
1176 return -ENOMEM;
1177
1178 dio->iocb = iocb;
1179 atomic_set(&dio->ref, 1);
1180 dio->size = 0;
1181 dio->i_size = i_size_read(inode);
1182 dio->end_io = end_io;
1183 dio->error = 0;
1184 dio->flags = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001185 dio->wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001186
1187 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001188 dio->submit.waiter = current;
1189 dio->submit.cookie = BLK_QC_T_NONE;
1190 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001191
1192 if (iov_iter_rw(iter) == READ) {
1193 if (pos >= dio->i_size)
1194 goto out_free_dio;
1195
1196 if (iter->type == ITER_IOVEC)
1197 dio->flags |= IOMAP_DIO_DIRTY;
1198 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001199 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001200 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001201
1202 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001203 if (iocb->ki_flags & IOCB_DSYNC)
1204 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001205
1206 /*
1207 * For datasync only writes, we optimistically try using FUA for
1208 * this IO. Any non-FUA write that occurs will clear this flag,
1209 * hence we know before completion whether a cache flush is
1210 * necessary.
1211 */
1212 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1213 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001214 }
1215
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001216 if (iocb->ki_flags & IOCB_NOWAIT) {
1217 if (filemap_range_has_page(mapping, start, end)) {
1218 ret = -EAGAIN;
1219 goto out_free_dio;
1220 }
1221 flags |= IOMAP_NOWAIT;
1222 }
1223
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001224 ret = filemap_write_and_wait_range(mapping, start, end);
1225 if (ret)
1226 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001227
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001228 /*
1229 * Try to invalidate cache pages for the range we're direct
1230 * writing. If this invalidation fails, tough, the write will
1231 * still work, but racing two incompatible write paths is a
1232 * pretty crazy thing to do, so we don't support it 100%.
1233 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001234 ret = invalidate_inode_pages2_range(mapping,
1235 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001236 if (ret)
1237 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001238 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001239
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001240 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001241 !inode->i_sb->s_dio_done_wq) {
1242 ret = sb_init_dio_done_wq(inode->i_sb);
1243 if (ret < 0)
1244 goto out_free_dio;
1245 }
1246
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001247 inode_dio_begin(inode);
1248
1249 blk_start_plug(&plug);
1250 do {
1251 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1252 iomap_dio_actor);
1253 if (ret <= 0) {
1254 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001255 if (ret == -ENOTBLK) {
1256 dio->wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001257 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001258 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001259 break;
1260 }
1261 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001262
1263 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1264 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001265 } while ((count = iov_iter_count(iter)) > 0);
1266 blk_finish_plug(&plug);
1267
1268 if (ret < 0)
1269 iomap_dio_set_error(dio, ret);
1270
Dave Chinner3460cac2018-05-02 12:54:53 -07001271 /*
1272 * If all the writes we issued were FUA, we don't need to flush the
1273 * cache on IO completion. Clear the sync flag for this case.
1274 */
1275 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1276 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1277
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001278 if (!atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001279 if (!dio->wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001280 return -EIOCBQUEUED;
1281
1282 for (;;) {
1283 set_current_state(TASK_UNINTERRUPTIBLE);
1284 if (!READ_ONCE(dio->submit.waiter))
1285 break;
1286
1287 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1288 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001289 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001290 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001291 io_schedule();
1292 }
1293 __set_current_state(TASK_RUNNING);
1294 }
1295
Eryu Guanc771c142017-03-02 15:02:06 -08001296 ret = iomap_dio_complete(dio);
1297
Eryu Guanc771c142017-03-02 15:02:06 -08001298 return ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001299
1300out_free_dio:
1301 kfree(dio);
1302 return ret;
1303}
1304EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001305
1306/* Swapfile activation */
1307
1308#ifdef CONFIG_SWAP
1309struct iomap_swapfile_info {
1310 struct iomap iomap; /* accumulated iomap */
1311 struct swap_info_struct *sis;
1312 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1313 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1314 unsigned long nr_pages; /* number of pages collected */
1315 int nr_extents; /* extent count */
1316};
1317
1318/*
1319 * Collect physical extents for this swap file. Physical extents reported to
1320 * the swap code must be trimmed to align to a page boundary. The logical
1321 * offset within the file is irrelevant since the swapfile code maps logical
1322 * page numbers of the swap device to the physical page-aligned extents.
1323 */
1324static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1325{
1326 struct iomap *iomap = &isi->iomap;
1327 unsigned long nr_pages;
1328 uint64_t first_ppage;
1329 uint64_t first_ppage_reported;
1330 uint64_t next_ppage;
1331 int error;
1332
1333 /*
1334 * Round the start up and the end down so that the physical
1335 * extent aligns to a page boundary.
1336 */
1337 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1338 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1339 PAGE_SHIFT;
1340
1341 /* Skip too-short physical extents. */
1342 if (first_ppage >= next_ppage)
1343 return 0;
1344 nr_pages = next_ppage - first_ppage;
1345
1346 /*
1347 * Calculate how much swap space we're adding; the first page contains
1348 * the swap header and doesn't count. The mm still wants that first
1349 * page fed to add_swap_extent, however.
1350 */
1351 first_ppage_reported = first_ppage;
1352 if (iomap->offset == 0)
1353 first_ppage_reported++;
1354 if (isi->lowest_ppage > first_ppage_reported)
1355 isi->lowest_ppage = first_ppage_reported;
1356 if (isi->highest_ppage < (next_ppage - 1))
1357 isi->highest_ppage = next_ppage - 1;
1358
1359 /* Add extent, set up for the next call. */
1360 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1361 if (error < 0)
1362 return error;
1363 isi->nr_extents += error;
1364 isi->nr_pages += nr_pages;
1365 return 0;
1366}
1367
1368/*
1369 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1370 * swap only cares about contiguous page-aligned physical extents and makes no
1371 * distinction between written and unwritten extents.
1372 */
1373static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1374 loff_t count, void *data, struct iomap *iomap)
1375{
1376 struct iomap_swapfile_info *isi = data;
1377 int error;
1378
Christoph Hellwig19319b52018-06-01 09:03:06 -07001379 switch (iomap->type) {
1380 case IOMAP_MAPPED:
1381 case IOMAP_UNWRITTEN:
1382 /* Only real or unwritten extents. */
1383 break;
1384 case IOMAP_INLINE:
1385 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07001386 pr_err("swapon: file is inline\n");
1387 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001388 default:
Omar Sandovalec601922018-05-16 11:13:34 -07001389 pr_err("swapon: file has unallocated extents\n");
1390 return -EINVAL;
1391 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001392
Omar Sandovalec601922018-05-16 11:13:34 -07001393 /* No uncommitted metadata or shared blocks. */
1394 if (iomap->flags & IOMAP_F_DIRTY) {
1395 pr_err("swapon: file is not committed\n");
1396 return -EINVAL;
1397 }
1398 if (iomap->flags & IOMAP_F_SHARED) {
1399 pr_err("swapon: file has shared extents\n");
1400 return -EINVAL;
1401 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001402
Omar Sandovalec601922018-05-16 11:13:34 -07001403 /* Only one bdev per swap file. */
1404 if (iomap->bdev != isi->sis->bdev) {
1405 pr_err("swapon: file is on multiple devices\n");
1406 return -EINVAL;
1407 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001408
1409 if (isi->iomap.length == 0) {
1410 /* No accumulated extent, so just store it. */
1411 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1412 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1413 /* Append this to the accumulated extent. */
1414 isi->iomap.length += iomap->length;
1415 } else {
1416 /* Otherwise, add the retained iomap and store this one. */
1417 error = iomap_swapfile_add_extent(isi);
1418 if (error)
1419 return error;
1420 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1421 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001422 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07001423}
1424
1425/*
1426 * Iterate a swap file's iomaps to construct physical extents that can be
1427 * passed to the swapfile subsystem.
1428 */
1429int iomap_swapfile_activate(struct swap_info_struct *sis,
1430 struct file *swap_file, sector_t *pagespan,
1431 const struct iomap_ops *ops)
1432{
1433 struct iomap_swapfile_info isi = {
1434 .sis = sis,
1435 .lowest_ppage = (sector_t)-1ULL,
1436 };
1437 struct address_space *mapping = swap_file->f_mapping;
1438 struct inode *inode = mapping->host;
1439 loff_t pos = 0;
1440 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1441 loff_t ret;
1442
Darrick J. Wong117a1482018-06-05 09:53:05 -07001443 /*
1444 * Persist all file mapping metadata so that we won't have any
1445 * IOMAP_F_DIRTY iomaps.
1446 */
1447 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07001448 if (ret)
1449 return ret;
1450
1451 while (len > 0) {
1452 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1453 ops, &isi, iomap_swapfile_activate_actor);
1454 if (ret <= 0)
1455 return ret;
1456
1457 pos += ret;
1458 len -= ret;
1459 }
1460
1461 if (isi.iomap.length) {
1462 ret = iomap_swapfile_add_extent(&isi);
1463 if (ret)
1464 return ret;
1465 }
1466
1467 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1468 sis->max = isi.nr_pages;
1469 sis->pages = isi.nr_pages - 1;
1470 sis->highest_bit = isi.nr_pages - 1;
1471 return isi.nr_extents;
1472}
1473EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1474#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07001475
1476static loff_t
1477iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
1478 void *data, struct iomap *iomap)
1479{
1480 sector_t *bno = data, addr;
1481
1482 if (iomap->type == IOMAP_MAPPED) {
1483 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
1484 if (addr > INT_MAX)
1485 WARN(1, "would truncate bmap result\n");
1486 else
1487 *bno = addr;
1488 }
1489 return 0;
1490}
1491
1492/* legacy ->bmap interface. 0 is the error return (!) */
1493sector_t
1494iomap_bmap(struct address_space *mapping, sector_t bno,
1495 const struct iomap_ops *ops)
1496{
1497 struct inode *inode = mapping->host;
1498 loff_t pos = bno >> inode->i_blkbits;
1499 unsigned blocksize = i_blocksize(inode);
1500
1501 if (filemap_write_and_wait(mapping))
1502 return 0;
1503
1504 bno = 0;
1505 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
1506 return bno;
1507}
1508EXPORT_SYMBOL_GPL(iomap_bmap);