blob: 4aecd7c5dbd8b8b9b3f0326b3571f986ef3cef75 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070023#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100024#include <linux/file.h>
25#include <linux/uio.h>
26#include <linux/backing-dev.h>
27#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110028#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100029#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010030#include <linux/sched/signal.h>
Darrick J. Wong67482122018-05-10 08:38:15 -070031#include <linux/swap.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010032
Christoph Hellwigae259a92016-06-21 09:23:11 +100033#include "internal.h"
34
Christoph Hellwigae259a92016-06-21 09:23:11 +100035/*
36 * Execute a iomap write on a segment of the mapping that spans a
37 * contiguous range of pages that have identical block mapping state.
38 *
39 * This avoids the need to map pages individually, do individual allocations
40 * for each page and most importantly avoid the need for filesystem specific
41 * locking per page. Instead, all the operations are amortised over the entire
42 * range of pages. It is assumed that the filesystems will lock whatever
43 * resources they require in the iomap_begin call, and release them in the
44 * iomap_end call.
45 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100046loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100047iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080048 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100049{
50 struct iomap iomap = { 0 };
51 loff_t written = 0, ret;
52
53 /*
54 * Need to map a range from start position for length bytes. This can
55 * span multiple pages - it is only guaranteed to return a range of a
56 * single type of pages (e.g. all into a hole, all mapped or all
57 * unwritten). Failure at this point has nothing to undo.
58 *
59 * If allocation is required for this range, reserve the space now so
60 * that the allocation is guaranteed to succeed later on. Once we copy
61 * the data into the page cache pages, then we cannot fail otherwise we
62 * expose transient stale data. If the reserve fails, we can safely
63 * back out at this point as there is nothing to undo.
64 */
65 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
66 if (ret)
67 return ret;
68 if (WARN_ON(iomap.offset > pos))
69 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080070 if (WARN_ON(iomap.length == 0))
71 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100072
73 /*
74 * Cut down the length to the one actually provided by the filesystem,
75 * as it might not be able to give us the whole size that we requested.
76 */
77 if (iomap.offset + iomap.length < pos + length)
78 length = iomap.offset + iomap.length - pos;
79
80 /*
81 * Now that we have guaranteed that the space allocation will succeed.
82 * we can do the copy-in page by page without having to worry about
83 * failures exposing transient data.
84 */
85 written = actor(inode, pos, length, data, &iomap);
86
87 /*
88 * Now the data has been copied, commit the range we've copied. This
89 * should not fail unless the filesystem has had a fatal error.
90 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100091 if (ops->iomap_end) {
92 ret = ops->iomap_end(inode, pos, length,
93 written > 0 ? written : 0,
94 flags, &iomap);
95 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100096
97 return written ? written : ret;
98}
99
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700100static sector_t
101iomap_sector(struct iomap *iomap, loff_t pos)
102{
103 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
104}
105
Christoph Hellwigae259a92016-06-21 09:23:11 +1000106static void
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700107iomap_read_inline_data(struct inode *inode, struct page *page,
108 struct iomap *iomap)
109{
110 size_t size = i_size_read(inode);
111 void *addr;
112
113 if (PageUptodate(page))
114 return;
115
116 BUG_ON(page->index);
117 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
118
119 addr = kmap_atomic(page);
120 memcpy(addr, iomap->inline_data, size);
121 memset(addr + size, 0, PAGE_SIZE - size);
122 kunmap_atomic(addr);
123 SetPageUptodate(page);
124}
125
126static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000127iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
128{
129 loff_t i_size = i_size_read(inode);
130
131 /*
132 * Only truncate newly allocated pages beyoned EOF, even if the
133 * write started inside the existing inode size.
134 */
135 if (pos + len > i_size)
136 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
137}
138
139static int
140iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
141 struct page **pagep, struct iomap *iomap)
142{
143 pgoff_t index = pos >> PAGE_SHIFT;
144 struct page *page;
145 int status = 0;
146
147 BUG_ON(pos + len > iomap->offset + iomap->length);
148
Michal Hockod1908f52017-02-03 13:13:26 -0800149 if (fatal_signal_pending(current))
150 return -EINTR;
151
Christoph Hellwigae259a92016-06-21 09:23:11 +1000152 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
153 if (!page)
154 return -ENOMEM;
155
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700156 if (iomap->type == IOMAP_INLINE)
157 iomap_read_inline_data(inode, page, iomap);
158 else
159 status = __block_write_begin_int(page, pos, len, NULL, iomap);
160
Christoph Hellwigae259a92016-06-21 09:23:11 +1000161 if (unlikely(status)) {
162 unlock_page(page);
163 put_page(page);
164 page = NULL;
165
166 iomap_write_failed(inode, pos, len);
167 }
168
169 *pagep = page;
170 return status;
171}
172
173static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700174iomap_write_end_inline(struct inode *inode, struct page *page,
175 struct iomap *iomap, loff_t pos, unsigned copied)
176{
177 void *addr;
178
179 WARN_ON_ONCE(!PageUptodate(page));
180 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
181
182 addr = kmap_atomic(page);
183 memcpy(iomap->inline_data + pos, addr + pos, copied);
184 kunmap_atomic(addr);
185
186 mark_inode_dirty(inode);
187 __generic_write_end(inode, pos, copied, page);
188 return copied;
189}
190
191static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000192iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700193 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000194{
195 int ret;
196
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700197 if (iomap->type == IOMAP_INLINE) {
198 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
199 } else {
200 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
201 copied, page, NULL);
202 }
203
Christoph Hellwigae259a92016-06-21 09:23:11 +1000204 if (ret < len)
205 iomap_write_failed(inode, pos, len);
206 return ret;
207}
208
209static loff_t
210iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
211 struct iomap *iomap)
212{
213 struct iov_iter *i = data;
214 long status = 0;
215 ssize_t written = 0;
216 unsigned int flags = AOP_FLAG_NOFS;
217
Christoph Hellwigae259a92016-06-21 09:23:11 +1000218 do {
219 struct page *page;
220 unsigned long offset; /* Offset into pagecache page */
221 unsigned long bytes; /* Bytes to write to page */
222 size_t copied; /* Bytes copied from user */
223
224 offset = (pos & (PAGE_SIZE - 1));
225 bytes = min_t(unsigned long, PAGE_SIZE - offset,
226 iov_iter_count(i));
227again:
228 if (bytes > length)
229 bytes = length;
230
231 /*
232 * Bring in the user page that we will copy from _first_.
233 * Otherwise there's a nasty deadlock on copying from the
234 * same page as we're writing to, without it being marked
235 * up-to-date.
236 *
237 * Not only is this an optimisation, but it is also required
238 * to check that the address is actually valid, when atomic
239 * usercopies are used, below.
240 */
241 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
242 status = -EFAULT;
243 break;
244 }
245
246 status = iomap_write_begin(inode, pos, bytes, flags, &page,
247 iomap);
248 if (unlikely(status))
249 break;
250
251 if (mapping_writably_mapped(inode->i_mapping))
252 flush_dcache_page(page);
253
Christoph Hellwigae259a92016-06-21 09:23:11 +1000254 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000255
256 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000257
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700258 status = iomap_write_end(inode, pos, bytes, copied, page,
259 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000260 if (unlikely(status < 0))
261 break;
262 copied = status;
263
264 cond_resched();
265
266 iov_iter_advance(i, copied);
267 if (unlikely(copied == 0)) {
268 /*
269 * If we were unable to copy any data at all, we must
270 * fall back to a single segment length write.
271 *
272 * If we didn't fallback here, we could livelock
273 * because not all segments in the iov can be copied at
274 * once without a pagefault.
275 */
276 bytes = min_t(unsigned long, PAGE_SIZE - offset,
277 iov_iter_single_seg_count(i));
278 goto again;
279 }
280 pos += copied;
281 written += copied;
282 length -= copied;
283
284 balance_dirty_pages_ratelimited(inode->i_mapping);
285 } while (iov_iter_count(i) && length);
286
287 return written ? written : status;
288}
289
290ssize_t
291iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800292 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000293{
294 struct inode *inode = iocb->ki_filp->f_mapping->host;
295 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
296
297 while (iov_iter_count(iter)) {
298 ret = iomap_apply(inode, pos, iov_iter_count(iter),
299 IOMAP_WRITE, ops, iter, iomap_write_actor);
300 if (ret <= 0)
301 break;
302 pos += ret;
303 written += ret;
304 }
305
306 return written ? written : ret;
307}
308EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
309
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000310static struct page *
311__iomap_read_page(struct inode *inode, loff_t offset)
312{
313 struct address_space *mapping = inode->i_mapping;
314 struct page *page;
315
316 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
317 if (IS_ERR(page))
318 return page;
319 if (!PageUptodate(page)) {
320 put_page(page);
321 return ERR_PTR(-EIO);
322 }
323 return page;
324}
325
326static loff_t
327iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
328 struct iomap *iomap)
329{
330 long status = 0;
331 ssize_t written = 0;
332
333 do {
334 struct page *page, *rpage;
335 unsigned long offset; /* Offset into pagecache page */
336 unsigned long bytes; /* Bytes to write to page */
337
338 offset = (pos & (PAGE_SIZE - 1));
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700339 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000340
341 rpage = __iomap_read_page(inode, pos);
342 if (IS_ERR(rpage))
343 return PTR_ERR(rpage);
344
345 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700346 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000347 put_page(rpage);
348 if (unlikely(status))
349 return status;
350
351 WARN_ON_ONCE(!PageUptodate(page));
352
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700353 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000354 if (unlikely(status <= 0)) {
355 if (WARN_ON_ONCE(status == 0))
356 return -EIO;
357 return status;
358 }
359
360 cond_resched();
361
362 pos += status;
363 written += status;
364 length -= status;
365
366 balance_dirty_pages_ratelimited(inode->i_mapping);
367 } while (length);
368
369 return written;
370}
371
372int
373iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800374 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000375{
376 loff_t ret;
377
378 while (len) {
379 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
380 iomap_dirty_actor);
381 if (ret <= 0)
382 return ret;
383 pos += ret;
384 len -= ret;
385 }
386
387 return 0;
388}
389EXPORT_SYMBOL_GPL(iomap_file_dirty);
390
Christoph Hellwigae259a92016-06-21 09:23:11 +1000391static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
392 unsigned bytes, struct iomap *iomap)
393{
394 struct page *page;
395 int status;
396
Tetsuo Handac718a972017-05-08 15:58:59 -0700397 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
398 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000399 if (status)
400 return status;
401
402 zero_user(page, offset, bytes);
403 mark_page_accessed(page);
404
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700405 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000406}
407
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000408static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
409 struct iomap *iomap)
410{
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700411 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
412 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000413}
414
Christoph Hellwigae259a92016-06-21 09:23:11 +1000415static loff_t
416iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
417 void *data, struct iomap *iomap)
418{
419 bool *did_zero = data;
420 loff_t written = 0;
421 int status;
422
423 /* already zeroed? we're done. */
424 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
425 return count;
426
427 do {
428 unsigned offset, bytes;
429
430 offset = pos & (PAGE_SIZE - 1); /* Within page */
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700431 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000432
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000433 if (IS_DAX(inode))
434 status = iomap_dax_zero(pos, offset, bytes, iomap);
435 else
436 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000437 if (status < 0)
438 return status;
439
440 pos += bytes;
441 count -= bytes;
442 written += bytes;
443 if (did_zero)
444 *did_zero = true;
445 } while (count > 0);
446
447 return written;
448}
449
450int
451iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800452 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000453{
454 loff_t ret;
455
456 while (len > 0) {
457 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
458 ops, did_zero, iomap_zero_range_actor);
459 if (ret <= 0)
460 return ret;
461
462 pos += ret;
463 len -= ret;
464 }
465
466 return 0;
467}
468EXPORT_SYMBOL_GPL(iomap_zero_range);
469
470int
471iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800472 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000473{
Fabian Frederick93407472017-02-27 14:28:32 -0800474 unsigned int blocksize = i_blocksize(inode);
475 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000476
477 /* Block boundary? Nothing to do */
478 if (!off)
479 return 0;
480 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
481}
482EXPORT_SYMBOL_GPL(iomap_truncate_page);
483
484static loff_t
485iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
486 void *data, struct iomap *iomap)
487{
488 struct page *page = data;
489 int ret;
490
Jan Karac663e292016-10-24 14:20:25 +1100491 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000492 if (ret)
493 return ret;
494
495 block_commit_write(page, 0, length);
496 return length;
497}
498
Dave Jiang11bac802017-02-24 14:56:41 -0800499int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000500{
501 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800502 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000503 unsigned long length;
504 loff_t offset, size;
505 ssize_t ret;
506
507 lock_page(page);
508 size = i_size_read(inode);
509 if ((page->mapping != inode->i_mapping) ||
510 (page_offset(page) > size)) {
511 /* We overload EFAULT to mean page got truncated */
512 ret = -EFAULT;
513 goto out_unlock;
514 }
515
516 /* page is wholly or partially inside EOF */
517 if (((page->index + 1) << PAGE_SHIFT) > size)
518 length = size & ~PAGE_MASK;
519 else
520 length = PAGE_SIZE;
521
522 offset = page_offset(page);
523 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +1100524 ret = iomap_apply(inode, offset, length,
525 IOMAP_WRITE | IOMAP_FAULT, ops, page,
526 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000527 if (unlikely(ret <= 0))
528 goto out_unlock;
529 offset += ret;
530 length -= ret;
531 }
532
533 set_page_dirty(page);
534 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700535 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000536out_unlock:
537 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700538 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000539}
540EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000541
542struct fiemap_ctx {
543 struct fiemap_extent_info *fi;
544 struct iomap prev;
545};
546
547static int iomap_to_fiemap(struct fiemap_extent_info *fi,
548 struct iomap *iomap, u32 flags)
549{
550 switch (iomap->type) {
551 case IOMAP_HOLE:
552 /* skip holes */
553 return 0;
554 case IOMAP_DELALLOC:
555 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
556 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700557 case IOMAP_MAPPED:
558 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000559 case IOMAP_UNWRITTEN:
560 flags |= FIEMAP_EXTENT_UNWRITTEN;
561 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700562 case IOMAP_INLINE:
563 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000564 break;
565 }
566
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000567 if (iomap->flags & IOMAP_F_MERGED)
568 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +1000569 if (iomap->flags & IOMAP_F_SHARED)
570 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000571
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000572 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400573 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000574 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000575}
576
577static loff_t
578iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
579 struct iomap *iomap)
580{
581 struct fiemap_ctx *ctx = data;
582 loff_t ret = length;
583
584 if (iomap->type == IOMAP_HOLE)
585 return length;
586
587 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
588 ctx->prev = *iomap;
589 switch (ret) {
590 case 0: /* success */
591 return length;
592 case 1: /* extent array full */
593 return 0;
594 default:
595 return ret;
596 }
597}
598
599int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800600 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000601{
602 struct fiemap_ctx ctx;
603 loff_t ret;
604
605 memset(&ctx, 0, sizeof(ctx));
606 ctx.fi = fi;
607 ctx.prev.type = IOMAP_HOLE;
608
609 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
610 if (ret)
611 return ret;
612
Dave Chinner8896b8f2016-08-17 08:41:10 +1000613 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
614 ret = filemap_write_and_wait(inode->i_mapping);
615 if (ret)
616 return ret;
617 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000618
619 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +1100620 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000621 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +1000622 /* inode with no (attribute) mapping will give ENOENT */
623 if (ret == -ENOENT)
624 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000625 if (ret < 0)
626 return ret;
627 if (ret == 0)
628 break;
629
630 start += ret;
631 len -= ret;
632 }
633
634 if (ctx.prev.type != IOMAP_HOLE) {
635 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
636 if (ret < 0)
637 return ret;
638 }
639
640 return 0;
641}
642EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100643
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700644/*
645 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700646 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700647 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700648static bool
649page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
650 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700651{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700652 const struct address_space_operations *ops = inode->i_mapping->a_ops;
653 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700654 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700655 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700656
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700657 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
658 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700659
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700660 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700661 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700662 * Last offset smaller than the start of the page means we found
663 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700664 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700665 if (whence == SEEK_HOLE)
666 return true;
667 *lastoff = poff;
668 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700669
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700670 /*
671 * Just check the page unless we can and should check block ranges:
672 */
673 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
674 return PageUptodate(page) == seek_data;
675
676 lock_page(page);
677 if (unlikely(page->mapping != inode->i_mapping))
678 goto out_unlock_not_found;
679
680 for (off = 0; off < PAGE_SIZE; off += bsize) {
681 if ((*lastoff & ~PAGE_MASK) >= off + bsize)
682 continue;
683 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
684 unlock_page(page);
685 return true;
686 }
687 *lastoff = poff + off + bsize;
688 }
689
690out_unlock_not_found:
691 unlock_page(page);
692 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700693}
694
695/*
696 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
697 *
698 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -0700699 * and which are data: uptodate buffer heads count as data; everything else
700 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700701 *
702 * Returns the resulting offset on successs, and -ENOENT otherwise.
703 */
704static loff_t
705page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
706 int whence)
707{
708 pgoff_t index = offset >> PAGE_SHIFT;
709 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
710 loff_t lastoff = offset;
711 struct pagevec pvec;
712
713 if (length <= 0)
714 return -ENOENT;
715
716 pagevec_init(&pvec);
717
718 do {
719 unsigned nr_pages, i;
720
721 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
722 end - 1);
723 if (nr_pages == 0)
724 break;
725
726 for (i = 0; i < nr_pages; i++) {
727 struct page *page = pvec.pages[i];
728
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -0700729 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700730 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -0700731 lastoff = page_offset(page) + PAGE_SIZE;
732 }
733 pagevec_release(&pvec);
734 } while (index < end);
735
736 /* When no page at lastoff and we are not done, we found a hole. */
737 if (whence != SEEK_HOLE)
738 goto not_found;
739
740check_range:
741 if (lastoff < offset + length)
742 goto out;
743not_found:
744 lastoff = -ENOENT;
745out:
746 pagevec_release(&pvec);
747 return lastoff;
748}
749
750
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700751static loff_t
752iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
753 void *data, struct iomap *iomap)
754{
755 switch (iomap->type) {
756 case IOMAP_UNWRITTEN:
757 offset = page_cache_seek_hole_data(inode, offset, length,
758 SEEK_HOLE);
759 if (offset < 0)
760 return length;
761 /* fall through */
762 case IOMAP_HOLE:
763 *(loff_t *)data = offset;
764 return 0;
765 default:
766 return length;
767 }
768}
769
770loff_t
771iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
772{
773 loff_t size = i_size_read(inode);
774 loff_t length = size - offset;
775 loff_t ret;
776
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700777 /* Nothing to be found before or beyond the end of the file. */
778 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700779 return -ENXIO;
780
781 while (length > 0) {
782 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
783 &offset, iomap_seek_hole_actor);
784 if (ret < 0)
785 return ret;
786 if (ret == 0)
787 break;
788
789 offset += ret;
790 length -= ret;
791 }
792
793 return offset;
794}
795EXPORT_SYMBOL_GPL(iomap_seek_hole);
796
797static loff_t
798iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
799 void *data, struct iomap *iomap)
800{
801 switch (iomap->type) {
802 case IOMAP_HOLE:
803 return length;
804 case IOMAP_UNWRITTEN:
805 offset = page_cache_seek_hole_data(inode, offset, length,
806 SEEK_DATA);
807 if (offset < 0)
808 return length;
809 /*FALLTHRU*/
810 default:
811 *(loff_t *)data = offset;
812 return 0;
813 }
814}
815
816loff_t
817iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
818{
819 loff_t size = i_size_read(inode);
820 loff_t length = size - offset;
821 loff_t ret;
822
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700823 /* Nothing to be found before or beyond the end of the file. */
824 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700825 return -ENXIO;
826
827 while (length > 0) {
828 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
829 &offset, iomap_seek_data_actor);
830 if (ret < 0)
831 return ret;
832 if (ret == 0)
833 break;
834
835 offset += ret;
836 length -= ret;
837 }
838
839 if (length <= 0)
840 return -ENXIO;
841 return offset;
842}
843EXPORT_SYMBOL_GPL(iomap_seek_data);
844
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100845/*
846 * Private flags for iomap_dio, must not overlap with the public ones in
847 * iomap.h:
848 */
Dave Chinner3460cac2018-05-02 12:54:53 -0700849#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -0700850#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100851#define IOMAP_DIO_WRITE (1 << 30)
852#define IOMAP_DIO_DIRTY (1 << 31)
853
854struct iomap_dio {
855 struct kiocb *iocb;
856 iomap_dio_end_io_t *end_io;
857 loff_t i_size;
858 loff_t size;
859 atomic_t ref;
860 unsigned flags;
861 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -0700862 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100863
864 union {
865 /* used during submission and for synchronous completion: */
866 struct {
867 struct iov_iter *iter;
868 struct task_struct *waiter;
869 struct request_queue *last_queue;
870 blk_qc_t cookie;
871 } submit;
872
873 /* used for aio completion: */
874 struct {
875 struct work_struct work;
876 } aio;
877 };
878};
879
880static ssize_t iomap_dio_complete(struct iomap_dio *dio)
881{
882 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -0600883 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700884 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100885 ssize_t ret;
886
887 if (dio->end_io) {
888 ret = dio->end_io(iocb,
889 dio->error ? dio->error : dio->size,
890 dio->flags);
891 } else {
892 ret = dio->error;
893 }
894
895 if (likely(!ret)) {
896 ret = dio->size;
897 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -0700898 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100899 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -0700900 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100901 iocb->ki_pos += ret;
902 }
903
Eryu Guan5e25c262017-10-13 09:47:46 -0700904 /*
905 * Try again to invalidate clean pages which might have been cached by
906 * non-direct readahead, or faulted in by get_user_pages() if the source
907 * of the write was an mmap'ed region of the file we're writing. Either
908 * one is a pretty crazy thing to do, so we don't support it 100%. If
909 * this invalidation fails, tough, the write still worked...
910 *
911 * And this page cache invalidation has to be after dio->end_io(), as
912 * some filesystems convert unwritten extents to real allocations in
913 * end_io() when necessary, otherwise a racing buffer read would cache
914 * zeros from unwritten extents.
915 */
916 if (!dio->error &&
917 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
918 int err;
919 err = invalidate_inode_pages2_range(inode->i_mapping,
920 offset >> PAGE_SHIFT,
921 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -0800922 if (err)
923 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700924 }
925
Dave Chinner4f8ff442018-05-02 12:54:52 -0700926 /*
927 * If this is a DSYNC write, make sure we push it to stable storage now
928 * that we've written data.
929 */
930 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
931 ret = generic_write_sync(iocb, ret);
932
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100933 inode_dio_end(file_inode(iocb->ki_filp));
934 kfree(dio);
935
936 return ret;
937}
938
939static void iomap_dio_complete_work(struct work_struct *work)
940{
941 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
942 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100943
Dave Chinner4f8ff442018-05-02 12:54:52 -0700944 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100945}
946
947/*
948 * Set an error in the dio if none is set yet. We have to use cmpxchg
949 * as the submission context and the completion context(s) can race to
950 * update the error.
951 */
952static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
953{
954 cmpxchg(&dio->error, 0, ret);
955}
956
957static void iomap_dio_bio_end_io(struct bio *bio)
958{
959 struct iomap_dio *dio = bio->bi_private;
960 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
961
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200962 if (bio->bi_status)
963 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100964
965 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -0700966 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100967 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100968 WRITE_ONCE(dio->submit.waiter, NULL);
969 wake_up_process(waiter);
970 } else if (dio->flags & IOMAP_DIO_WRITE) {
971 struct inode *inode = file_inode(dio->iocb->ki_filp);
972
973 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
974 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
975 } else {
976 iomap_dio_complete_work(&dio->aio.work);
977 }
978 }
979
980 if (should_dirty) {
981 bio_check_pages_dirty(bio);
982 } else {
983 struct bio_vec *bvec;
984 int i;
985
986 bio_for_each_segment_all(bvec, bio, i)
987 put_page(bvec->bv_page);
988 bio_put(bio);
989 }
990}
991
992static blk_qc_t
993iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
994 unsigned len)
995{
996 struct page *page = ZERO_PAGE(0);
997 struct bio *bio;
998
999 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001000 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001001 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001002 bio->bi_private = dio;
1003 bio->bi_end_io = iomap_dio_bio_end_io;
1004
1005 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001006 __bio_add_page(bio, page, len, 0);
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001007 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001008
1009 atomic_inc(&dio->ref);
1010 return submit_bio(bio);
1011}
1012
1013static loff_t
1014iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1015 void *data, struct iomap *iomap)
1016{
1017 struct iomap_dio *dio = data;
Fabian Frederick93407472017-02-27 14:28:32 -08001018 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1019 unsigned int fs_block_size = i_blocksize(inode), pad;
1020 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001021 struct iov_iter iter;
1022 struct bio *bio;
1023 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001024 bool use_fua = false;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001025 int nr_pages, ret;
Al Virocfe057f2017-09-11 21:17:09 +01001026 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001027
1028 if ((pos | length | align) & ((1 << blkbits) - 1))
1029 return -EINVAL;
1030
1031 switch (iomap->type) {
1032 case IOMAP_HOLE:
1033 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1034 return -EIO;
1035 /*FALLTHRU*/
1036 case IOMAP_UNWRITTEN:
1037 if (!(dio->flags & IOMAP_DIO_WRITE)) {
Al Virocfe057f2017-09-11 21:17:09 +01001038 length = iov_iter_zero(length, dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001039 dio->size += length;
1040 return length;
1041 }
1042 dio->flags |= IOMAP_DIO_UNWRITTEN;
1043 need_zeroout = true;
1044 break;
1045 case IOMAP_MAPPED:
1046 if (iomap->flags & IOMAP_F_SHARED)
1047 dio->flags |= IOMAP_DIO_COW;
Dave Chinner3460cac2018-05-02 12:54:53 -07001048 if (iomap->flags & IOMAP_F_NEW) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001049 need_zeroout = true;
Dave Chinner3460cac2018-05-02 12:54:53 -07001050 } else {
1051 /*
1052 * Use a FUA write if we need datasync semantics, this
1053 * is a pure data IO that doesn't require any metadata
1054 * updates and the underlying device supports FUA. This
1055 * allows us to avoid cache flushes on IO completion.
1056 */
1057 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1058 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1059 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1060 use_fua = true;
1061 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001062 break;
1063 default:
1064 WARN_ON_ONCE(1);
1065 return -EIO;
1066 }
1067
1068 /*
1069 * Operate on a partial iter trimmed to the extent we were called for.
1070 * We'll update the iter in the dio once we're done with this extent.
1071 */
1072 iter = *dio->submit.iter;
1073 iov_iter_truncate(&iter, length);
1074
1075 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1076 if (nr_pages <= 0)
1077 return nr_pages;
1078
1079 if (need_zeroout) {
1080 /* zero out from the start of the block to the write offset */
1081 pad = pos & (fs_block_size - 1);
1082 if (pad)
1083 iomap_dio_zero(dio, iomap, pos - pad, pad);
1084 }
1085
1086 do {
Al Virocfe057f2017-09-11 21:17:09 +01001087 size_t n;
1088 if (dio->error) {
1089 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001090 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001091 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001092
1093 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001094 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001095 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001096 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001097 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001098 bio->bi_private = dio;
1099 bio->bi_end_io = iomap_dio_bio_end_io;
1100
1101 ret = bio_iov_iter_get_pages(bio, &iter);
1102 if (unlikely(ret)) {
1103 bio_put(bio);
Al Virocfe057f2017-09-11 21:17:09 +01001104 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001105 }
1106
Al Virocfe057f2017-09-11 21:17:09 +01001107 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001108 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001109 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1110 if (use_fua)
1111 bio->bi_opf |= REQ_FUA;
1112 else
1113 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001114 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001115 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001116 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001117 if (dio->flags & IOMAP_DIO_DIRTY)
1118 bio_set_pages_dirty(bio);
1119 }
1120
Al Virocfe057f2017-09-11 21:17:09 +01001121 iov_iter_advance(dio->submit.iter, n);
1122
1123 dio->size += n;
1124 pos += n;
1125 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001126
1127 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1128
1129 atomic_inc(&dio->ref);
1130
1131 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1132 dio->submit.cookie = submit_bio(bio);
1133 } while (nr_pages);
1134
1135 if (need_zeroout) {
1136 /* zero out from the end of the write to the end of the block */
1137 pad = pos & (fs_block_size - 1);
1138 if (pad)
1139 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1140 }
Al Virocfe057f2017-09-11 21:17:09 +01001141 return copied;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001142}
1143
Dave Chinner4f8ff442018-05-02 12:54:52 -07001144/*
1145 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001146 * is being issued as AIO or not. This allows us to optimise pure data writes
1147 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1148 * REQ_FLUSH post write. This is slightly tricky because a single request here
1149 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1150 * may be pure data writes. In that case, we still need to do a full data sync
1151 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001152 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001153ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001154iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1155 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001156{
1157 struct address_space *mapping = iocb->ki_filp->f_mapping;
1158 struct inode *inode = file_inode(iocb->ki_filp);
1159 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001160 loff_t pos = iocb->ki_pos, start = pos;
1161 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001162 unsigned int flags = IOMAP_DIRECT;
1163 struct blk_plug plug;
1164 struct iomap_dio *dio;
1165
1166 lockdep_assert_held(&inode->i_rwsem);
1167
1168 if (!count)
1169 return 0;
1170
1171 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1172 if (!dio)
1173 return -ENOMEM;
1174
1175 dio->iocb = iocb;
1176 atomic_set(&dio->ref, 1);
1177 dio->size = 0;
1178 dio->i_size = i_size_read(inode);
1179 dio->end_io = end_io;
1180 dio->error = 0;
1181 dio->flags = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001182 dio->wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001183
1184 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001185 dio->submit.waiter = current;
1186 dio->submit.cookie = BLK_QC_T_NONE;
1187 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001188
1189 if (iov_iter_rw(iter) == READ) {
1190 if (pos >= dio->i_size)
1191 goto out_free_dio;
1192
1193 if (iter->type == ITER_IOVEC)
1194 dio->flags |= IOMAP_DIO_DIRTY;
1195 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001196 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001197 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001198
1199 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001200 if (iocb->ki_flags & IOCB_DSYNC)
1201 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001202
1203 /*
1204 * For datasync only writes, we optimistically try using FUA for
1205 * this IO. Any non-FUA write that occurs will clear this flag,
1206 * hence we know before completion whether a cache flush is
1207 * necessary.
1208 */
1209 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1210 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001211 }
1212
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001213 if (iocb->ki_flags & IOCB_NOWAIT) {
1214 if (filemap_range_has_page(mapping, start, end)) {
1215 ret = -EAGAIN;
1216 goto out_free_dio;
1217 }
1218 flags |= IOMAP_NOWAIT;
1219 }
1220
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001221 ret = filemap_write_and_wait_range(mapping, start, end);
1222 if (ret)
1223 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001224
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001225 /*
1226 * Try to invalidate cache pages for the range we're direct
1227 * writing. If this invalidation fails, tough, the write will
1228 * still work, but racing two incompatible write paths is a
1229 * pretty crazy thing to do, so we don't support it 100%.
1230 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001231 ret = invalidate_inode_pages2_range(mapping,
1232 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001233 if (ret)
1234 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001235 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001236
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001237 if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001238 !inode->i_sb->s_dio_done_wq) {
1239 ret = sb_init_dio_done_wq(inode->i_sb);
1240 if (ret < 0)
1241 goto out_free_dio;
1242 }
1243
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001244 inode_dio_begin(inode);
1245
1246 blk_start_plug(&plug);
1247 do {
1248 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1249 iomap_dio_actor);
1250 if (ret <= 0) {
1251 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001252 if (ret == -ENOTBLK) {
1253 dio->wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001254 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001255 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001256 break;
1257 }
1258 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001259
1260 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1261 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001262 } while ((count = iov_iter_count(iter)) > 0);
1263 blk_finish_plug(&plug);
1264
1265 if (ret < 0)
1266 iomap_dio_set_error(dio, ret);
1267
Dave Chinner3460cac2018-05-02 12:54:53 -07001268 /*
1269 * If all the writes we issued were FUA, we don't need to flush the
1270 * cache on IO completion. Clear the sync flag for this case.
1271 */
1272 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1273 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1274
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001275 if (!atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001276 if (!dio->wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001277 return -EIOCBQUEUED;
1278
1279 for (;;) {
1280 set_current_state(TASK_UNINTERRUPTIBLE);
1281 if (!READ_ONCE(dio->submit.waiter))
1282 break;
1283
1284 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1285 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001286 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001287 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001288 io_schedule();
1289 }
1290 __set_current_state(TASK_RUNNING);
1291 }
1292
Eryu Guanc771c142017-03-02 15:02:06 -08001293 ret = iomap_dio_complete(dio);
1294
Eryu Guanc771c142017-03-02 15:02:06 -08001295 return ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001296
1297out_free_dio:
1298 kfree(dio);
1299 return ret;
1300}
1301EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001302
1303/* Swapfile activation */
1304
1305#ifdef CONFIG_SWAP
1306struct iomap_swapfile_info {
1307 struct iomap iomap; /* accumulated iomap */
1308 struct swap_info_struct *sis;
1309 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1310 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1311 unsigned long nr_pages; /* number of pages collected */
1312 int nr_extents; /* extent count */
1313};
1314
1315/*
1316 * Collect physical extents for this swap file. Physical extents reported to
1317 * the swap code must be trimmed to align to a page boundary. The logical
1318 * offset within the file is irrelevant since the swapfile code maps logical
1319 * page numbers of the swap device to the physical page-aligned extents.
1320 */
1321static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1322{
1323 struct iomap *iomap = &isi->iomap;
1324 unsigned long nr_pages;
1325 uint64_t first_ppage;
1326 uint64_t first_ppage_reported;
1327 uint64_t next_ppage;
1328 int error;
1329
1330 /*
1331 * Round the start up and the end down so that the physical
1332 * extent aligns to a page boundary.
1333 */
1334 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1335 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1336 PAGE_SHIFT;
1337
1338 /* Skip too-short physical extents. */
1339 if (first_ppage >= next_ppage)
1340 return 0;
1341 nr_pages = next_ppage - first_ppage;
1342
1343 /*
1344 * Calculate how much swap space we're adding; the first page contains
1345 * the swap header and doesn't count. The mm still wants that first
1346 * page fed to add_swap_extent, however.
1347 */
1348 first_ppage_reported = first_ppage;
1349 if (iomap->offset == 0)
1350 first_ppage_reported++;
1351 if (isi->lowest_ppage > first_ppage_reported)
1352 isi->lowest_ppage = first_ppage_reported;
1353 if (isi->highest_ppage < (next_ppage - 1))
1354 isi->highest_ppage = next_ppage - 1;
1355
1356 /* Add extent, set up for the next call. */
1357 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1358 if (error < 0)
1359 return error;
1360 isi->nr_extents += error;
1361 isi->nr_pages += nr_pages;
1362 return 0;
1363}
1364
1365/*
1366 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1367 * swap only cares about contiguous page-aligned physical extents and makes no
1368 * distinction between written and unwritten extents.
1369 */
1370static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1371 loff_t count, void *data, struct iomap *iomap)
1372{
1373 struct iomap_swapfile_info *isi = data;
1374 int error;
1375
Christoph Hellwig19319b52018-06-01 09:03:06 -07001376 switch (iomap->type) {
1377 case IOMAP_MAPPED:
1378 case IOMAP_UNWRITTEN:
1379 /* Only real or unwritten extents. */
1380 break;
1381 case IOMAP_INLINE:
1382 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07001383 pr_err("swapon: file is inline\n");
1384 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001385 default:
Omar Sandovalec601922018-05-16 11:13:34 -07001386 pr_err("swapon: file has unallocated extents\n");
1387 return -EINVAL;
1388 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001389
Omar Sandovalec601922018-05-16 11:13:34 -07001390 /* No uncommitted metadata or shared blocks. */
1391 if (iomap->flags & IOMAP_F_DIRTY) {
1392 pr_err("swapon: file is not committed\n");
1393 return -EINVAL;
1394 }
1395 if (iomap->flags & IOMAP_F_SHARED) {
1396 pr_err("swapon: file has shared extents\n");
1397 return -EINVAL;
1398 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001399
Omar Sandovalec601922018-05-16 11:13:34 -07001400 /* Only one bdev per swap file. */
1401 if (iomap->bdev != isi->sis->bdev) {
1402 pr_err("swapon: file is on multiple devices\n");
1403 return -EINVAL;
1404 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001405
1406 if (isi->iomap.length == 0) {
1407 /* No accumulated extent, so just store it. */
1408 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1409 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1410 /* Append this to the accumulated extent. */
1411 isi->iomap.length += iomap->length;
1412 } else {
1413 /* Otherwise, add the retained iomap and store this one. */
1414 error = iomap_swapfile_add_extent(isi);
1415 if (error)
1416 return error;
1417 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1418 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001419 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07001420}
1421
1422/*
1423 * Iterate a swap file's iomaps to construct physical extents that can be
1424 * passed to the swapfile subsystem.
1425 */
1426int iomap_swapfile_activate(struct swap_info_struct *sis,
1427 struct file *swap_file, sector_t *pagespan,
1428 const struct iomap_ops *ops)
1429{
1430 struct iomap_swapfile_info isi = {
1431 .sis = sis,
1432 .lowest_ppage = (sector_t)-1ULL,
1433 };
1434 struct address_space *mapping = swap_file->f_mapping;
1435 struct inode *inode = mapping->host;
1436 loff_t pos = 0;
1437 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1438 loff_t ret;
1439
Darrick J. Wong117a1482018-06-05 09:53:05 -07001440 /*
1441 * Persist all file mapping metadata so that we won't have any
1442 * IOMAP_F_DIRTY iomaps.
1443 */
1444 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07001445 if (ret)
1446 return ret;
1447
1448 while (len > 0) {
1449 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1450 ops, &isi, iomap_swapfile_activate_actor);
1451 if (ret <= 0)
1452 return ret;
1453
1454 pos += ret;
1455 len -= ret;
1456 }
1457
1458 if (isi.iomap.length) {
1459 ret = iomap_swapfile_add_extent(&isi);
1460 if (ret)
1461 return ret;
1462 }
1463
1464 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1465 sis->max = isi.nr_pages;
1466 sis->pages = isi.nr_pages - 1;
1467 sis->highest_bit = isi.nr_pages - 1;
1468 return isi.nr_extents;
1469}
1470EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1471#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07001472
1473static loff_t
1474iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
1475 void *data, struct iomap *iomap)
1476{
1477 sector_t *bno = data, addr;
1478
1479 if (iomap->type == IOMAP_MAPPED) {
1480 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
1481 if (addr > INT_MAX)
1482 WARN(1, "would truncate bmap result\n");
1483 else
1484 *bno = addr;
1485 }
1486 return 0;
1487}
1488
1489/* legacy ->bmap interface. 0 is the error return (!) */
1490sector_t
1491iomap_bmap(struct address_space *mapping, sector_t bno,
1492 const struct iomap_ops *ops)
1493{
1494 struct inode *inode = mapping->host;
1495 loff_t pos = bno >> inode->i_blkbits;
1496 unsigned blocksize = i_blocksize(inode);
1497
1498 if (filemap_write_and_wait(mapping))
1499 return 0;
1500
1501 bno = 0;
1502 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
1503 return bno;
1504}
1505EXPORT_SYMBOL_GPL(iomap_bmap);