blob: 74cdf8b5bbb0cb8e18a50af96b50965cdebbbcb2 [file] [log] [blame]
Christoph Hellwigae259a92016-06-21 09:23:11 +10001/*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/module.h>
15#include <linux/compiler.h>
16#include <linux/fs.h>
17#include <linux/iomap.h>
18#include <linux/uaccess.h>
19#include <linux/gfp.h>
20#include <linux/mm.h>
21#include <linux/swap.h>
22#include <linux/pagemap.h>
23#include <linux/file.h>
24#include <linux/uio.h>
25#include <linux/backing-dev.h>
26#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110027#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100028#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010029#include <linux/sched/signal.h>
Darrick J. Wong67482122018-05-10 08:38:15 -070030#include <linux/swap.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010031
Christoph Hellwigae259a92016-06-21 09:23:11 +100032#include "internal.h"
33
Christoph Hellwigae259a92016-06-21 09:23:11 +100034/*
35 * Execute a iomap write on a segment of the mapping that spans a
36 * contiguous range of pages that have identical block mapping state.
37 *
38 * This avoids the need to map pages individually, do individual allocations
39 * for each page and most importantly avoid the need for filesystem specific
40 * locking per page. Instead, all the operations are amortised over the entire
41 * range of pages. It is assumed that the filesystems will lock whatever
42 * resources they require in the iomap_begin call, and release them in the
43 * iomap_end call.
44 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100045loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100046iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080047 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100048{
49 struct iomap iomap = { 0 };
50 loff_t written = 0, ret;
51
52 /*
53 * Need to map a range from start position for length bytes. This can
54 * span multiple pages - it is only guaranteed to return a range of a
55 * single type of pages (e.g. all into a hole, all mapped or all
56 * unwritten). Failure at this point has nothing to undo.
57 *
58 * If allocation is required for this range, reserve the space now so
59 * that the allocation is guaranteed to succeed later on. Once we copy
60 * the data into the page cache pages, then we cannot fail otherwise we
61 * expose transient stale data. If the reserve fails, we can safely
62 * back out at this point as there is nothing to undo.
63 */
64 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
65 if (ret)
66 return ret;
67 if (WARN_ON(iomap.offset > pos))
68 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080069 if (WARN_ON(iomap.length == 0))
70 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100071
72 /*
73 * Cut down the length to the one actually provided by the filesystem,
74 * as it might not be able to give us the whole size that we requested.
75 */
76 if (iomap.offset + iomap.length < pos + length)
77 length = iomap.offset + iomap.length - pos;
78
79 /*
80 * Now that we have guaranteed that the space allocation will succeed.
81 * we can do the copy-in page by page without having to worry about
82 * failures exposing transient data.
83 */
84 written = actor(inode, pos, length, data, &iomap);
85
86 /*
87 * Now the data has been copied, commit the range we've copied. This
88 * should not fail unless the filesystem has had a fatal error.
89 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100090 if (ops->iomap_end) {
91 ret = ops->iomap_end(inode, pos, length,
92 written > 0 ? written : 0,
93 flags, &iomap);
94 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100095
96 return written ? written : ret;
97}
98
Christoph Hellwig57fc5052018-06-01 09:03:08 -070099static sector_t
100iomap_sector(struct iomap *iomap, loff_t pos)
101{
102 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
103}
104
Christoph Hellwigae259a92016-06-21 09:23:11 +1000105static void
106iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
107{
108 loff_t i_size = i_size_read(inode);
109
110 /*
111 * Only truncate newly allocated pages beyoned EOF, even if the
112 * write started inside the existing inode size.
113 */
114 if (pos + len > i_size)
115 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
116}
117
118static int
119iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
120 struct page **pagep, struct iomap *iomap)
121{
122 pgoff_t index = pos >> PAGE_SHIFT;
123 struct page *page;
124 int status = 0;
125
126 BUG_ON(pos + len > iomap->offset + iomap->length);
127
Michal Hockod1908f52017-02-03 13:13:26 -0800128 if (fatal_signal_pending(current))
129 return -EINTR;
130
Christoph Hellwigae259a92016-06-21 09:23:11 +1000131 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
132 if (!page)
133 return -ENOMEM;
134
135 status = __block_write_begin_int(page, pos, len, NULL, iomap);
136 if (unlikely(status)) {
137 unlock_page(page);
138 put_page(page);
139 page = NULL;
140
141 iomap_write_failed(inode, pos, len);
142 }
143
144 *pagep = page;
145 return status;
146}
147
148static int
149iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
150 unsigned copied, struct page *page)
151{
152 int ret;
153
154 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
155 copied, page, NULL);
156 if (ret < len)
157 iomap_write_failed(inode, pos, len);
158 return ret;
159}
160
161static loff_t
162iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
163 struct iomap *iomap)
164{
165 struct iov_iter *i = data;
166 long status = 0;
167 ssize_t written = 0;
168 unsigned int flags = AOP_FLAG_NOFS;
169
Christoph Hellwigae259a92016-06-21 09:23:11 +1000170 do {
171 struct page *page;
172 unsigned long offset; /* Offset into pagecache page */
173 unsigned long bytes; /* Bytes to write to page */
174 size_t copied; /* Bytes copied from user */
175
176 offset = (pos & (PAGE_SIZE - 1));
177 bytes = min_t(unsigned long, PAGE_SIZE - offset,
178 iov_iter_count(i));
179again:
180 if (bytes > length)
181 bytes = length;
182
183 /*
184 * Bring in the user page that we will copy from _first_.
185 * Otherwise there's a nasty deadlock on copying from the
186 * same page as we're writing to, without it being marked
187 * up-to-date.
188 *
189 * Not only is this an optimisation, but it is also required
190 * to check that the address is actually valid, when atomic
191 * usercopies are used, below.
192 */
193 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
194 status = -EFAULT;
195 break;
196 }
197
198 status = iomap_write_begin(inode, pos, bytes, flags, &page,
199 iomap);
200 if (unlikely(status))
201 break;
202
203 if (mapping_writably_mapped(inode->i_mapping))
204 flush_dcache_page(page);
205
Christoph Hellwigae259a92016-06-21 09:23:11 +1000206 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000207
208 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000209
210 status = iomap_write_end(inode, pos, bytes, copied, page);
211 if (unlikely(status < 0))
212 break;
213 copied = status;
214
215 cond_resched();
216
217 iov_iter_advance(i, copied);
218 if (unlikely(copied == 0)) {
219 /*
220 * If we were unable to copy any data at all, we must
221 * fall back to a single segment length write.
222 *
223 * If we didn't fallback here, we could livelock
224 * because not all segments in the iov can be copied at
225 * once without a pagefault.
226 */
227 bytes = min_t(unsigned long, PAGE_SIZE - offset,
228 iov_iter_single_seg_count(i));
229 goto again;
230 }
231 pos += copied;
232 written += copied;
233 length -= copied;
234
235 balance_dirty_pages_ratelimited(inode->i_mapping);
236 } while (iov_iter_count(i) && length);
237
238 return written ? written : status;
239}
240
241ssize_t
242iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800243 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000244{
245 struct inode *inode = iocb->ki_filp->f_mapping->host;
246 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
247
248 while (iov_iter_count(iter)) {
249 ret = iomap_apply(inode, pos, iov_iter_count(iter),
250 IOMAP_WRITE, ops, iter, iomap_write_actor);
251 if (ret <= 0)
252 break;
253 pos += ret;
254 written += ret;
255 }
256
257 return written ? written : ret;
258}
259EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
260
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000261static struct page *
262__iomap_read_page(struct inode *inode, loff_t offset)
263{
264 struct address_space *mapping = inode->i_mapping;
265 struct page *page;
266
267 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
268 if (IS_ERR(page))
269 return page;
270 if (!PageUptodate(page)) {
271 put_page(page);
272 return ERR_PTR(-EIO);
273 }
274 return page;
275}
276
277static loff_t
278iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
279 struct iomap *iomap)
280{
281 long status = 0;
282 ssize_t written = 0;
283
284 do {
285 struct page *page, *rpage;
286 unsigned long offset; /* Offset into pagecache page */
287 unsigned long bytes; /* Bytes to write to page */
288
289 offset = (pos & (PAGE_SIZE - 1));
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700290 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000291
292 rpage = __iomap_read_page(inode, pos);
293 if (IS_ERR(rpage))
294 return PTR_ERR(rpage);
295
296 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700297 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000298 put_page(rpage);
299 if (unlikely(status))
300 return status;
301
302 WARN_ON_ONCE(!PageUptodate(page));
303
304 status = iomap_write_end(inode, pos, bytes, bytes, page);
305 if (unlikely(status <= 0)) {
306 if (WARN_ON_ONCE(status == 0))
307 return -EIO;
308 return status;
309 }
310
311 cond_resched();
312
313 pos += status;
314 written += status;
315 length -= status;
316
317 balance_dirty_pages_ratelimited(inode->i_mapping);
318 } while (length);
319
320 return written;
321}
322
323int
324iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800325 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000326{
327 loff_t ret;
328
329 while (len) {
330 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
331 iomap_dirty_actor);
332 if (ret <= 0)
333 return ret;
334 pos += ret;
335 len -= ret;
336 }
337
338 return 0;
339}
340EXPORT_SYMBOL_GPL(iomap_file_dirty);
341
Christoph Hellwigae259a92016-06-21 09:23:11 +1000342static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
343 unsigned bytes, struct iomap *iomap)
344{
345 struct page *page;
346 int status;
347
Tetsuo Handac718a972017-05-08 15:58:59 -0700348 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
349 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000350 if (status)
351 return status;
352
353 zero_user(page, offset, bytes);
354 mark_page_accessed(page);
355
356 return iomap_write_end(inode, pos, bytes, bytes, page);
357}
358
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000359static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
360 struct iomap *iomap)
361{
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700362 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
363 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000364}
365
Christoph Hellwigae259a92016-06-21 09:23:11 +1000366static loff_t
367iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
368 void *data, struct iomap *iomap)
369{
370 bool *did_zero = data;
371 loff_t written = 0;
372 int status;
373
374 /* already zeroed? we're done. */
375 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
376 return count;
377
378 do {
379 unsigned offset, bytes;
380
381 offset = pos & (PAGE_SIZE - 1); /* Within page */
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700382 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000383
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000384 if (IS_DAX(inode))
385 status = iomap_dax_zero(pos, offset, bytes, iomap);
386 else
387 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000388 if (status < 0)
389 return status;
390
391 pos += bytes;
392 count -= bytes;
393 written += bytes;
394 if (did_zero)
395 *did_zero = true;
396 } while (count > 0);
397
398 return written;
399}
400
401int
402iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800403 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000404{
405 loff_t ret;
406
407 while (len > 0) {
408 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
409 ops, did_zero, iomap_zero_range_actor);
410 if (ret <= 0)
411 return ret;
412
413 pos += ret;
414 len -= ret;
415 }
416
417 return 0;
418}
419EXPORT_SYMBOL_GPL(iomap_zero_range);
420
421int
422iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800423 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000424{
Fabian Frederick93407472017-02-27 14:28:32 -0800425 unsigned int blocksize = i_blocksize(inode);
426 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000427
428 /* Block boundary? Nothing to do */
429 if (!off)
430 return 0;
431 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
432}
433EXPORT_SYMBOL_GPL(iomap_truncate_page);
434
435static loff_t
436iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
437 void *data, struct iomap *iomap)
438{
439 struct page *page = data;
440 int ret;
441
Jan Karac663e292016-10-24 14:20:25 +1100442 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000443 if (ret)
444 return ret;
445
446 block_commit_write(page, 0, length);
447 return length;
448}
449
Dave Jiang11bac802017-02-24 14:56:41 -0800450int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000451{
452 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -0800453 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000454 unsigned long length;
455 loff_t offset, size;
456 ssize_t ret;
457
458 lock_page(page);
459 size = i_size_read(inode);
460 if ((page->mapping != inode->i_mapping) ||
461 (page_offset(page) > size)) {
462 /* We overload EFAULT to mean page got truncated */
463 ret = -EFAULT;
464 goto out_unlock;
465 }
466
467 /* page is wholly or partially inside EOF */
468 if (((page->index + 1) << PAGE_SHIFT) > size)
469 length = size & ~PAGE_MASK;
470 else
471 length = PAGE_SIZE;
472
473 offset = page_offset(page);
474 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +1100475 ret = iomap_apply(inode, offset, length,
476 IOMAP_WRITE | IOMAP_FAULT, ops, page,
477 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000478 if (unlikely(ret <= 0))
479 goto out_unlock;
480 offset += ret;
481 length -= ret;
482 }
483
484 set_page_dirty(page);
485 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700486 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000487out_unlock:
488 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -0700489 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000490}
491EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000492
493struct fiemap_ctx {
494 struct fiemap_extent_info *fi;
495 struct iomap prev;
496};
497
498static int iomap_to_fiemap(struct fiemap_extent_info *fi,
499 struct iomap *iomap, u32 flags)
500{
501 switch (iomap->type) {
502 case IOMAP_HOLE:
503 /* skip holes */
504 return 0;
505 case IOMAP_DELALLOC:
506 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
507 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700508 case IOMAP_MAPPED:
509 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000510 case IOMAP_UNWRITTEN:
511 flags |= FIEMAP_EXTENT_UNWRITTEN;
512 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -0700513 case IOMAP_INLINE:
514 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000515 break;
516 }
517
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000518 if (iomap->flags & IOMAP_F_MERGED)
519 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +1000520 if (iomap->flags & IOMAP_F_SHARED)
521 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000522
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000523 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -0400524 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +1000525 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000526}
527
528static loff_t
529iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
530 struct iomap *iomap)
531{
532 struct fiemap_ctx *ctx = data;
533 loff_t ret = length;
534
535 if (iomap->type == IOMAP_HOLE)
536 return length;
537
538 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
539 ctx->prev = *iomap;
540 switch (ret) {
541 case 0: /* success */
542 return length;
543 case 1: /* extent array full */
544 return 0;
545 default:
546 return ret;
547 }
548}
549
550int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800551 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000552{
553 struct fiemap_ctx ctx;
554 loff_t ret;
555
556 memset(&ctx, 0, sizeof(ctx));
557 ctx.fi = fi;
558 ctx.prev.type = IOMAP_HOLE;
559
560 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
561 if (ret)
562 return ret;
563
Dave Chinner8896b8f2016-08-17 08:41:10 +1000564 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
565 ret = filemap_write_and_wait(inode->i_mapping);
566 if (ret)
567 return ret;
568 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000569
570 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +1100571 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000572 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +1000573 /* inode with no (attribute) mapping will give ENOENT */
574 if (ret == -ENOENT)
575 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +1000576 if (ret < 0)
577 return ret;
578 if (ret == 0)
579 break;
580
581 start += ret;
582 len -= ret;
583 }
584
585 if (ctx.prev.type != IOMAP_HOLE) {
586 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
587 if (ret < 0)
588 return ret;
589 }
590
591 return 0;
592}
593EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100594
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700595static loff_t
596iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
597 void *data, struct iomap *iomap)
598{
599 switch (iomap->type) {
600 case IOMAP_UNWRITTEN:
601 offset = page_cache_seek_hole_data(inode, offset, length,
602 SEEK_HOLE);
603 if (offset < 0)
604 return length;
605 /* fall through */
606 case IOMAP_HOLE:
607 *(loff_t *)data = offset;
608 return 0;
609 default:
610 return length;
611 }
612}
613
614loff_t
615iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
616{
617 loff_t size = i_size_read(inode);
618 loff_t length = size - offset;
619 loff_t ret;
620
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700621 /* Nothing to be found before or beyond the end of the file. */
622 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700623 return -ENXIO;
624
625 while (length > 0) {
626 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
627 &offset, iomap_seek_hole_actor);
628 if (ret < 0)
629 return ret;
630 if (ret == 0)
631 break;
632
633 offset += ret;
634 length -= ret;
635 }
636
637 return offset;
638}
639EXPORT_SYMBOL_GPL(iomap_seek_hole);
640
641static loff_t
642iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
643 void *data, struct iomap *iomap)
644{
645 switch (iomap->type) {
646 case IOMAP_HOLE:
647 return length;
648 case IOMAP_UNWRITTEN:
649 offset = page_cache_seek_hole_data(inode, offset, length,
650 SEEK_DATA);
651 if (offset < 0)
652 return length;
653 /*FALLTHRU*/
654 default:
655 *(loff_t *)data = offset;
656 return 0;
657 }
658}
659
660loff_t
661iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
662{
663 loff_t size = i_size_read(inode);
664 loff_t length = size - offset;
665 loff_t ret;
666
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -0700667 /* Nothing to be found before or beyond the end of the file. */
668 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -0700669 return -ENXIO;
670
671 while (length > 0) {
672 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
673 &offset, iomap_seek_data_actor);
674 if (ret < 0)
675 return ret;
676 if (ret == 0)
677 break;
678
679 offset += ret;
680 length -= ret;
681 }
682
683 if (length <= 0)
684 return -ENXIO;
685 return offset;
686}
687EXPORT_SYMBOL_GPL(iomap_seek_data);
688
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100689/*
690 * Private flags for iomap_dio, must not overlap with the public ones in
691 * iomap.h:
692 */
Dave Chinner3460cac2018-05-02 12:54:53 -0700693#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -0700694#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100695#define IOMAP_DIO_WRITE (1 << 30)
696#define IOMAP_DIO_DIRTY (1 << 31)
697
698struct iomap_dio {
699 struct kiocb *iocb;
700 iomap_dio_end_io_t *end_io;
701 loff_t i_size;
702 loff_t size;
703 atomic_t ref;
704 unsigned flags;
705 int error;
706
707 union {
708 /* used during submission and for synchronous completion: */
709 struct {
710 struct iov_iter *iter;
711 struct task_struct *waiter;
712 struct request_queue *last_queue;
713 blk_qc_t cookie;
714 } submit;
715
716 /* used for aio completion: */
717 struct {
718 struct work_struct work;
719 } aio;
720 };
721};
722
723static ssize_t iomap_dio_complete(struct iomap_dio *dio)
724{
725 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -0600726 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700727 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100728 ssize_t ret;
729
730 if (dio->end_io) {
731 ret = dio->end_io(iocb,
732 dio->error ? dio->error : dio->size,
733 dio->flags);
734 } else {
735 ret = dio->error;
736 }
737
738 if (likely(!ret)) {
739 ret = dio->size;
740 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -0700741 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100742 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -0700743 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100744 iocb->ki_pos += ret;
745 }
746
Eryu Guan5e25c262017-10-13 09:47:46 -0700747 /*
748 * Try again to invalidate clean pages which might have been cached by
749 * non-direct readahead, or faulted in by get_user_pages() if the source
750 * of the write was an mmap'ed region of the file we're writing. Either
751 * one is a pretty crazy thing to do, so we don't support it 100%. If
752 * this invalidation fails, tough, the write still worked...
753 *
754 * And this page cache invalidation has to be after dio->end_io(), as
755 * some filesystems convert unwritten extents to real allocations in
756 * end_io() when necessary, otherwise a racing buffer read would cache
757 * zeros from unwritten extents.
758 */
759 if (!dio->error &&
760 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
761 int err;
762 err = invalidate_inode_pages2_range(inode->i_mapping,
763 offset >> PAGE_SHIFT,
764 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -0800765 if (err)
766 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -0700767 }
768
Dave Chinner4f8ff442018-05-02 12:54:52 -0700769 /*
770 * If this is a DSYNC write, make sure we push it to stable storage now
771 * that we've written data.
772 */
773 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
774 ret = generic_write_sync(iocb, ret);
775
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100776 inode_dio_end(file_inode(iocb->ki_filp));
777 kfree(dio);
778
779 return ret;
780}
781
782static void iomap_dio_complete_work(struct work_struct *work)
783{
784 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
785 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100786
Dave Chinner4f8ff442018-05-02 12:54:52 -0700787 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100788}
789
790/*
791 * Set an error in the dio if none is set yet. We have to use cmpxchg
792 * as the submission context and the completion context(s) can race to
793 * update the error.
794 */
795static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
796{
797 cmpxchg(&dio->error, 0, ret);
798}
799
800static void iomap_dio_bio_end_io(struct bio *bio)
801{
802 struct iomap_dio *dio = bio->bi_private;
803 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
804
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200805 if (bio->bi_status)
806 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100807
808 if (atomic_dec_and_test(&dio->ref)) {
809 if (is_sync_kiocb(dio->iocb)) {
810 struct task_struct *waiter = dio->submit.waiter;
811
812 WRITE_ONCE(dio->submit.waiter, NULL);
813 wake_up_process(waiter);
814 } else if (dio->flags & IOMAP_DIO_WRITE) {
815 struct inode *inode = file_inode(dio->iocb->ki_filp);
816
817 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
818 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
819 } else {
820 iomap_dio_complete_work(&dio->aio.work);
821 }
822 }
823
824 if (should_dirty) {
825 bio_check_pages_dirty(bio);
826 } else {
827 struct bio_vec *bvec;
828 int i;
829
830 bio_for_each_segment_all(bvec, bio, i)
831 put_page(bvec->bv_page);
832 bio_put(bio);
833 }
834}
835
836static blk_qc_t
837iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
838 unsigned len)
839{
840 struct page *page = ZERO_PAGE(0);
841 struct bio *bio;
842
843 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200844 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700845 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100846 bio->bi_private = dio;
847 bio->bi_end_io = iomap_dio_bio_end_io;
848
849 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -0700850 __bio_add_page(bio, page, len, 0);
Linus Torvalds5cc60ae2016-12-14 21:35:31 -0800851 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100852
853 atomic_inc(&dio->ref);
854 return submit_bio(bio);
855}
856
857static loff_t
858iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
859 void *data, struct iomap *iomap)
860{
861 struct iomap_dio *dio = data;
Fabian Frederick93407472017-02-27 14:28:32 -0800862 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
863 unsigned int fs_block_size = i_blocksize(inode), pad;
864 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100865 struct iov_iter iter;
866 struct bio *bio;
867 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -0700868 bool use_fua = false;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100869 int nr_pages, ret;
Al Virocfe057f2017-09-11 21:17:09 +0100870 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100871
872 if ((pos | length | align) & ((1 << blkbits) - 1))
873 return -EINVAL;
874
875 switch (iomap->type) {
876 case IOMAP_HOLE:
877 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
878 return -EIO;
879 /*FALLTHRU*/
880 case IOMAP_UNWRITTEN:
881 if (!(dio->flags & IOMAP_DIO_WRITE)) {
Al Virocfe057f2017-09-11 21:17:09 +0100882 length = iov_iter_zero(length, dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100883 dio->size += length;
884 return length;
885 }
886 dio->flags |= IOMAP_DIO_UNWRITTEN;
887 need_zeroout = true;
888 break;
889 case IOMAP_MAPPED:
890 if (iomap->flags & IOMAP_F_SHARED)
891 dio->flags |= IOMAP_DIO_COW;
Dave Chinner3460cac2018-05-02 12:54:53 -0700892 if (iomap->flags & IOMAP_F_NEW) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100893 need_zeroout = true;
Dave Chinner3460cac2018-05-02 12:54:53 -0700894 } else {
895 /*
896 * Use a FUA write if we need datasync semantics, this
897 * is a pure data IO that doesn't require any metadata
898 * updates and the underlying device supports FUA. This
899 * allows us to avoid cache flushes on IO completion.
900 */
901 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
902 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
903 blk_queue_fua(bdev_get_queue(iomap->bdev)))
904 use_fua = true;
905 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100906 break;
907 default:
908 WARN_ON_ONCE(1);
909 return -EIO;
910 }
911
912 /*
913 * Operate on a partial iter trimmed to the extent we were called for.
914 * We'll update the iter in the dio once we're done with this extent.
915 */
916 iter = *dio->submit.iter;
917 iov_iter_truncate(&iter, length);
918
919 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
920 if (nr_pages <= 0)
921 return nr_pages;
922
923 if (need_zeroout) {
924 /* zero out from the start of the block to the write offset */
925 pad = pos & (fs_block_size - 1);
926 if (pad)
927 iomap_dio_zero(dio, iomap, pos - pad, pad);
928 }
929
930 do {
Al Virocfe057f2017-09-11 21:17:09 +0100931 size_t n;
932 if (dio->error) {
933 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100934 return 0;
Al Virocfe057f2017-09-11 21:17:09 +0100935 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100936
937 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200938 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -0700939 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -0600940 bio->bi_write_hint = dio->iocb->ki_hint;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100941 bio->bi_private = dio;
942 bio->bi_end_io = iomap_dio_bio_end_io;
943
944 ret = bio_iov_iter_get_pages(bio, &iter);
945 if (unlikely(ret)) {
946 bio_put(bio);
Al Virocfe057f2017-09-11 21:17:09 +0100947 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100948 }
949
Al Virocfe057f2017-09-11 21:17:09 +0100950 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100951 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -0700952 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
953 if (use_fua)
954 bio->bi_opf |= REQ_FUA;
955 else
956 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +0100957 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100958 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -0700959 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100960 if (dio->flags & IOMAP_DIO_DIRTY)
961 bio_set_pages_dirty(bio);
962 }
963
Al Virocfe057f2017-09-11 21:17:09 +0100964 iov_iter_advance(dio->submit.iter, n);
965
966 dio->size += n;
967 pos += n;
968 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100969
970 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
971
972 atomic_inc(&dio->ref);
973
974 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
975 dio->submit.cookie = submit_bio(bio);
976 } while (nr_pages);
977
978 if (need_zeroout) {
979 /* zero out from the end of the write to the end of the block */
980 pad = pos & (fs_block_size - 1);
981 if (pad)
982 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
983 }
Al Virocfe057f2017-09-11 21:17:09 +0100984 return copied;
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100985}
986
Dave Chinner4f8ff442018-05-02 12:54:52 -0700987/*
988 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -0700989 * is being issued as AIO or not. This allows us to optimise pure data writes
990 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
991 * REQ_FLUSH post write. This is slightly tricky because a single request here
992 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
993 * may be pure data writes. In that case, we still need to do a full data sync
994 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -0700995 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100996ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800997iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
998 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +1100999{
1000 struct address_space *mapping = iocb->ki_filp->f_mapping;
1001 struct inode *inode = file_inode(iocb->ki_filp);
1002 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001003 loff_t pos = iocb->ki_pos, start = pos;
1004 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001005 unsigned int flags = IOMAP_DIRECT;
1006 struct blk_plug plug;
1007 struct iomap_dio *dio;
1008
1009 lockdep_assert_held(&inode->i_rwsem);
1010
1011 if (!count)
1012 return 0;
1013
1014 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1015 if (!dio)
1016 return -ENOMEM;
1017
1018 dio->iocb = iocb;
1019 atomic_set(&dio->ref, 1);
1020 dio->size = 0;
1021 dio->i_size = i_size_read(inode);
1022 dio->end_io = end_io;
1023 dio->error = 0;
1024 dio->flags = 0;
1025
1026 dio->submit.iter = iter;
1027 if (is_sync_kiocb(iocb)) {
1028 dio->submit.waiter = current;
1029 dio->submit.cookie = BLK_QC_T_NONE;
1030 dio->submit.last_queue = NULL;
1031 }
1032
1033 if (iov_iter_rw(iter) == READ) {
1034 if (pos >= dio->i_size)
1035 goto out_free_dio;
1036
1037 if (iter->type == ITER_IOVEC)
1038 dio->flags |= IOMAP_DIO_DIRTY;
1039 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001040 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001041 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001042
1043 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001044 if (iocb->ki_flags & IOCB_DSYNC)
1045 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001046
1047 /*
1048 * For datasync only writes, we optimistically try using FUA for
1049 * this IO. Any non-FUA write that occurs will clear this flag,
1050 * hence we know before completion whether a cache flush is
1051 * necessary.
1052 */
1053 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1054 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001055 }
1056
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001057 if (iocb->ki_flags & IOCB_NOWAIT) {
1058 if (filemap_range_has_page(mapping, start, end)) {
1059 ret = -EAGAIN;
1060 goto out_free_dio;
1061 }
1062 flags |= IOMAP_NOWAIT;
1063 }
1064
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001065 ret = filemap_write_and_wait_range(mapping, start, end);
1066 if (ret)
1067 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001068
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001069 /*
1070 * Try to invalidate cache pages for the range we're direct
1071 * writing. If this invalidation fails, tough, the write will
1072 * still work, but racing two incompatible write paths is a
1073 * pretty crazy thing to do, so we don't support it 100%.
1074 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001075 ret = invalidate_inode_pages2_range(mapping,
1076 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001077 if (ret)
1078 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001079 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001080
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001081 if (iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
1082 !inode->i_sb->s_dio_done_wq) {
1083 ret = sb_init_dio_done_wq(inode->i_sb);
1084 if (ret < 0)
1085 goto out_free_dio;
1086 }
1087
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001088 inode_dio_begin(inode);
1089
1090 blk_start_plug(&plug);
1091 do {
1092 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1093 iomap_dio_actor);
1094 if (ret <= 0) {
1095 /* magic error code to fall back to buffered I/O */
1096 if (ret == -ENOTBLK)
1097 ret = 0;
1098 break;
1099 }
1100 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001101
1102 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1103 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001104 } while ((count = iov_iter_count(iter)) > 0);
1105 blk_finish_plug(&plug);
1106
1107 if (ret < 0)
1108 iomap_dio_set_error(dio, ret);
1109
Dave Chinner3460cac2018-05-02 12:54:53 -07001110 /*
1111 * If all the writes we issued were FUA, we don't need to flush the
1112 * cache on IO completion. Clear the sync flag for this case.
1113 */
1114 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1115 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1116
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001117 if (!atomic_dec_and_test(&dio->ref)) {
1118 if (!is_sync_kiocb(iocb))
1119 return -EIOCBQUEUED;
1120
1121 for (;;) {
1122 set_current_state(TASK_UNINTERRUPTIBLE);
1123 if (!READ_ONCE(dio->submit.waiter))
1124 break;
1125
1126 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1127 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001128 !blk_poll(dio->submit.last_queue,
Linus Torvalds5cc60ae2016-12-14 21:35:31 -08001129 dio->submit.cookie))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001130 io_schedule();
1131 }
1132 __set_current_state(TASK_RUNNING);
1133 }
1134
Eryu Guanc771c142017-03-02 15:02:06 -08001135 ret = iomap_dio_complete(dio);
1136
Eryu Guanc771c142017-03-02 15:02:06 -08001137 return ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001138
1139out_free_dio:
1140 kfree(dio);
1141 return ret;
1142}
1143EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001144
1145/* Swapfile activation */
1146
1147#ifdef CONFIG_SWAP
1148struct iomap_swapfile_info {
1149 struct iomap iomap; /* accumulated iomap */
1150 struct swap_info_struct *sis;
1151 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
1152 uint64_t highest_ppage; /* highest physical addr seen (pages) */
1153 unsigned long nr_pages; /* number of pages collected */
1154 int nr_extents; /* extent count */
1155};
1156
1157/*
1158 * Collect physical extents for this swap file. Physical extents reported to
1159 * the swap code must be trimmed to align to a page boundary. The logical
1160 * offset within the file is irrelevant since the swapfile code maps logical
1161 * page numbers of the swap device to the physical page-aligned extents.
1162 */
1163static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
1164{
1165 struct iomap *iomap = &isi->iomap;
1166 unsigned long nr_pages;
1167 uint64_t first_ppage;
1168 uint64_t first_ppage_reported;
1169 uint64_t next_ppage;
1170 int error;
1171
1172 /*
1173 * Round the start up and the end down so that the physical
1174 * extent aligns to a page boundary.
1175 */
1176 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
1177 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
1178 PAGE_SHIFT;
1179
1180 /* Skip too-short physical extents. */
1181 if (first_ppage >= next_ppage)
1182 return 0;
1183 nr_pages = next_ppage - first_ppage;
1184
1185 /*
1186 * Calculate how much swap space we're adding; the first page contains
1187 * the swap header and doesn't count. The mm still wants that first
1188 * page fed to add_swap_extent, however.
1189 */
1190 first_ppage_reported = first_ppage;
1191 if (iomap->offset == 0)
1192 first_ppage_reported++;
1193 if (isi->lowest_ppage > first_ppage_reported)
1194 isi->lowest_ppage = first_ppage_reported;
1195 if (isi->highest_ppage < (next_ppage - 1))
1196 isi->highest_ppage = next_ppage - 1;
1197
1198 /* Add extent, set up for the next call. */
1199 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
1200 if (error < 0)
1201 return error;
1202 isi->nr_extents += error;
1203 isi->nr_pages += nr_pages;
1204 return 0;
1205}
1206
1207/*
1208 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
1209 * swap only cares about contiguous page-aligned physical extents and makes no
1210 * distinction between written and unwritten extents.
1211 */
1212static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
1213 loff_t count, void *data, struct iomap *iomap)
1214{
1215 struct iomap_swapfile_info *isi = data;
1216 int error;
1217
Christoph Hellwig19319b52018-06-01 09:03:06 -07001218 switch (iomap->type) {
1219 case IOMAP_MAPPED:
1220 case IOMAP_UNWRITTEN:
1221 /* Only real or unwritten extents. */
1222 break;
1223 case IOMAP_INLINE:
1224 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07001225 pr_err("swapon: file is inline\n");
1226 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001227 default:
Omar Sandovalec601922018-05-16 11:13:34 -07001228 pr_err("swapon: file has unallocated extents\n");
1229 return -EINVAL;
1230 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001231
Omar Sandovalec601922018-05-16 11:13:34 -07001232 /* No uncommitted metadata or shared blocks. */
1233 if (iomap->flags & IOMAP_F_DIRTY) {
1234 pr_err("swapon: file is not committed\n");
1235 return -EINVAL;
1236 }
1237 if (iomap->flags & IOMAP_F_SHARED) {
1238 pr_err("swapon: file has shared extents\n");
1239 return -EINVAL;
1240 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001241
Omar Sandovalec601922018-05-16 11:13:34 -07001242 /* Only one bdev per swap file. */
1243 if (iomap->bdev != isi->sis->bdev) {
1244 pr_err("swapon: file is on multiple devices\n");
1245 return -EINVAL;
1246 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001247
1248 if (isi->iomap.length == 0) {
1249 /* No accumulated extent, so just store it. */
1250 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1251 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
1252 /* Append this to the accumulated extent. */
1253 isi->iomap.length += iomap->length;
1254 } else {
1255 /* Otherwise, add the retained iomap and store this one. */
1256 error = iomap_swapfile_add_extent(isi);
1257 if (error)
1258 return error;
1259 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
1260 }
Darrick J. Wong67482122018-05-10 08:38:15 -07001261 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07001262}
1263
1264/*
1265 * Iterate a swap file's iomaps to construct physical extents that can be
1266 * passed to the swapfile subsystem.
1267 */
1268int iomap_swapfile_activate(struct swap_info_struct *sis,
1269 struct file *swap_file, sector_t *pagespan,
1270 const struct iomap_ops *ops)
1271{
1272 struct iomap_swapfile_info isi = {
1273 .sis = sis,
1274 .lowest_ppage = (sector_t)-1ULL,
1275 };
1276 struct address_space *mapping = swap_file->f_mapping;
1277 struct inode *inode = mapping->host;
1278 loff_t pos = 0;
1279 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
1280 loff_t ret;
1281
1282 ret = filemap_write_and_wait(inode->i_mapping);
1283 if (ret)
1284 return ret;
1285
1286 while (len > 0) {
1287 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
1288 ops, &isi, iomap_swapfile_activate_actor);
1289 if (ret <= 0)
1290 return ret;
1291
1292 pos += ret;
1293 len -= ret;
1294 }
1295
1296 if (isi.iomap.length) {
1297 ret = iomap_swapfile_add_extent(&isi);
1298 if (ret)
1299 return ret;
1300 }
1301
1302 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
1303 sis->max = isi.nr_pages;
1304 sis->pages = isi.nr_pages - 1;
1305 sis->highest_bit = isi.nr_pages - 1;
1306 return isi.nr_extents;
1307}
1308EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
1309#endif /* CONFIG_SWAP */