blob: fbfe20b7f6f0a14772642d0d896ad4c9b20e78a0 [file] [log] [blame]
Christoph Hellwig73ce6ab2019-04-28 08:34:02 -07001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwigae259a92016-06-21 09:23:11 +10002/*
3 * Copyright (C) 2010 Red Hat, Inc.
Christoph Hellwig72b4daa2018-06-19 15:10:57 -07004 * Copyright (c) 2016-2018 Christoph Hellwig.
Christoph Hellwigae259a92016-06-21 09:23:11 +10005 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/uaccess.h>
11#include <linux/gfp.h>
Christoph Hellwig9dc55f12018-07-11 22:26:05 -070012#include <linux/migrate.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100013#include <linux/mm.h>
Christoph Hellwig72b4daa2018-06-19 15:10:57 -070014#include <linux/mm_inline.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100015#include <linux/swap.h>
16#include <linux/pagemap.h>
Christoph Hellwig8a78cb12018-06-01 09:04:40 -070017#include <linux/pagevec.h>
Christoph Hellwigae259a92016-06-21 09:23:11 +100018#include <linux/file.h>
19#include <linux/uio.h>
20#include <linux/backing-dev.h>
21#include <linux/buffer_head.h>
Christoph Hellwigff6a9292016-11-30 14:36:01 +110022#include <linux/task_io_accounting_ops.h>
Christoph Hellwig9a286f02016-06-21 09:31:39 +100023#include <linux/dax.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010024#include <linux/sched/signal.h>
25
Christoph Hellwigae259a92016-06-21 09:23:11 +100026#include "internal.h"
27
Christoph Hellwigae259a92016-06-21 09:23:11 +100028/*
29 * Execute a iomap write on a segment of the mapping that spans a
30 * contiguous range of pages that have identical block mapping state.
31 *
32 * This avoids the need to map pages individually, do individual allocations
33 * for each page and most importantly avoid the need for filesystem specific
34 * locking per page. Instead, all the operations are amortised over the entire
35 * range of pages. It is assumed that the filesystems will lock whatever
36 * resources they require in the iomap_begin call, and release them in the
37 * iomap_end call.
38 */
Christoph Hellwigbefb5032016-09-19 11:24:49 +100039loff_t
Christoph Hellwigae259a92016-06-21 09:23:11 +100040iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -080041 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
Christoph Hellwigae259a92016-06-21 09:23:11 +100042{
43 struct iomap iomap = { 0 };
44 loff_t written = 0, ret;
45
46 /*
47 * Need to map a range from start position for length bytes. This can
48 * span multiple pages - it is only guaranteed to return a range of a
49 * single type of pages (e.g. all into a hole, all mapped or all
50 * unwritten). Failure at this point has nothing to undo.
51 *
52 * If allocation is required for this range, reserve the space now so
53 * that the allocation is guaranteed to succeed later on. Once we copy
54 * the data into the page cache pages, then we cannot fail otherwise we
55 * expose transient stale data. If the reserve fails, we can safely
56 * back out at this point as there is nothing to undo.
57 */
58 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
59 if (ret)
60 return ret;
61 if (WARN_ON(iomap.offset > pos))
62 return -EIO;
Darrick J. Wong0c6dda72018-01-26 11:11:20 -080063 if (WARN_ON(iomap.length == 0))
64 return -EIO;
Christoph Hellwigae259a92016-06-21 09:23:11 +100065
66 /*
67 * Cut down the length to the one actually provided by the filesystem,
68 * as it might not be able to give us the whole size that we requested.
69 */
70 if (iomap.offset + iomap.length < pos + length)
71 length = iomap.offset + iomap.length - pos;
72
73 /*
74 * Now that we have guaranteed that the space allocation will succeed.
75 * we can do the copy-in page by page without having to worry about
76 * failures exposing transient data.
77 */
78 written = actor(inode, pos, length, data, &iomap);
79
80 /*
81 * Now the data has been copied, commit the range we've copied. This
82 * should not fail unless the filesystem has had a fatal error.
83 */
Christoph Hellwigf20ac7a2016-08-17 08:42:34 +100084 if (ops->iomap_end) {
85 ret = ops->iomap_end(inode, pos, length,
86 written > 0 ? written : 0,
87 flags, &iomap);
88 }
Christoph Hellwigae259a92016-06-21 09:23:11 +100089
90 return written ? written : ret;
91}
92
Christoph Hellwig57fc5052018-06-01 09:03:08 -070093static sector_t
94iomap_sector(struct iomap *iomap, loff_t pos)
95{
96 return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT;
97}
98
Christoph Hellwig9dc55f12018-07-11 22:26:05 -070099static struct iomap_page *
100iomap_page_create(struct inode *inode, struct page *page)
101{
102 struct iomap_page *iop = to_iomap_page(page);
103
104 if (iop || i_blocksize(inode) == PAGE_SIZE)
105 return iop;
106
107 iop = kmalloc(sizeof(*iop), GFP_NOFS | __GFP_NOFAIL);
108 atomic_set(&iop->read_count, 0);
109 atomic_set(&iop->write_count, 0);
110 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800111
112 /*
113 * migrate_page_move_mapping() assumes that pages with private data have
114 * their count elevated by 1.
115 */
116 get_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700117 set_page_private(page, (unsigned long)iop);
118 SetPagePrivate(page);
119 return iop;
120}
121
122static void
123iomap_page_release(struct page *page)
124{
125 struct iomap_page *iop = to_iomap_page(page);
126
127 if (!iop)
128 return;
129 WARN_ON_ONCE(atomic_read(&iop->read_count));
130 WARN_ON_ONCE(atomic_read(&iop->write_count));
131 ClearPagePrivate(page);
132 set_page_private(page, 0);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800133 put_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700134 kfree(iop);
135}
136
137/*
138 * Calculate the range inside the page that we actually need to read.
139 */
140static void
141iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
142 loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
143{
Dave Chinner8c110d42018-11-21 08:06:37 -0800144 loff_t orig_pos = *pos;
145 loff_t isize = i_size_read(inode);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700146 unsigned block_bits = inode->i_blkbits;
147 unsigned block_size = (1 << block_bits);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700148 unsigned poff = offset_in_page(*pos);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700149 unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
150 unsigned first = poff >> block_bits;
151 unsigned last = (poff + plen - 1) >> block_bits;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700152
153 /*
154 * If the block size is smaller than the page size we need to check the
155 * per-block uptodate status and adjust the offset and length if needed
156 * to avoid reading in already uptodate ranges.
157 */
158 if (iop) {
159 unsigned int i;
160
161 /* move forward for each leading block marked uptodate */
162 for (i = first; i <= last; i++) {
163 if (!test_bit(i, iop->uptodate))
164 break;
165 *pos += block_size;
166 poff += block_size;
167 plen -= block_size;
168 first++;
169 }
170
171 /* truncate len if we find any trailing uptodate block(s) */
172 for ( ; i <= last; i++) {
173 if (test_bit(i, iop->uptodate)) {
174 plen -= (last - i + 1) * block_size;
175 last = i - 1;
176 break;
177 }
178 }
179 }
180
181 /*
182 * If the extent spans the block that contains the i_size we need to
183 * handle both halves separately so that we properly zero data in the
184 * page cache for blocks that are entirely outside of i_size.
185 */
Dave Chinner8c110d42018-11-21 08:06:37 -0800186 if (orig_pos <= isize && orig_pos + length > isize) {
187 unsigned end = offset_in_page(isize - 1) >> block_bits;
188
189 if (first <= end && last > end)
190 plen -= (last - end) * block_size;
191 }
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700192
193 *offp = poff;
194 *lenp = plen;
195}
196
197static void
198iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
199{
200 struct iomap_page *iop = to_iomap_page(page);
201 struct inode *inode = page->mapping->host;
202 unsigned first = off >> inode->i_blkbits;
203 unsigned last = (off + len - 1) >> inode->i_blkbits;
204 unsigned int i;
205 bool uptodate = true;
206
207 if (iop) {
208 for (i = 0; i < PAGE_SIZE / i_blocksize(inode); i++) {
209 if (i >= first && i <= last)
210 set_bit(i, iop->uptodate);
211 else if (!test_bit(i, iop->uptodate))
212 uptodate = false;
213 }
214 }
215
216 if (uptodate && !PageError(page))
217 SetPageUptodate(page);
218}
219
220static void
221iomap_read_finish(struct iomap_page *iop, struct page *page)
222{
223 if (!iop || atomic_dec_and_test(&iop->read_count))
224 unlock_page(page);
225}
226
227static void
228iomap_read_page_end_io(struct bio_vec *bvec, int error)
229{
230 struct page *page = bvec->bv_page;
231 struct iomap_page *iop = to_iomap_page(page);
232
233 if (unlikely(error)) {
234 ClearPageUptodate(page);
235 SetPageError(page);
236 } else {
237 iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
238 }
239
240 iomap_read_finish(iop, page);
241}
242
Christoph Hellwigae259a92016-06-21 09:23:11 +1000243static void
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700244iomap_read_inline_data(struct inode *inode, struct page *page,
245 struct iomap *iomap)
246{
247 size_t size = i_size_read(inode);
248 void *addr;
249
250 if (PageUptodate(page))
251 return;
252
253 BUG_ON(page->index);
254 BUG_ON(size > PAGE_SIZE - offset_in_page(iomap->inline_data));
255
256 addr = kmap_atomic(page);
257 memcpy(addr, iomap->inline_data, size);
258 memset(addr + size, 0, PAGE_SIZE - size);
259 kunmap_atomic(addr);
260 SetPageUptodate(page);
261}
262
263static void
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700264iomap_read_end_io(struct bio *bio)
265{
266 int error = blk_status_to_errno(bio->bi_status);
267 struct bio_vec *bvec;
268 int i;
Ming Lei6dc4f102019-02-15 19:13:19 +0800269 struct bvec_iter_all iter_all;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700270
Ming Lei6dc4f102019-02-15 19:13:19 +0800271 bio_for_each_segment_all(bvec, bio, i, iter_all)
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700272 iomap_read_page_end_io(bvec, error);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700273 bio_put(bio);
274}
275
276struct iomap_readpage_ctx {
277 struct page *cur_page;
278 bool cur_page_in_bio;
279 bool is_readahead;
280 struct bio *bio;
281 struct list_head *pages;
282};
283
284static loff_t
285iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
286 struct iomap *iomap)
287{
288 struct iomap_readpage_ctx *ctx = data;
289 struct page *page = ctx->cur_page;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700290 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700291 bool is_contig = false;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700292 loff_t orig_pos = pos;
293 unsigned poff, plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700294 sector_t sector;
295
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700296 if (iomap->type == IOMAP_INLINE) {
Darrick J. Wong7d5e0492018-08-10 17:55:57 -0700297 WARN_ON_ONCE(pos);
Andreas Gruenbacher806a1472018-07-03 09:07:47 -0700298 iomap_read_inline_data(inode, page, iomap);
299 return PAGE_SIZE;
300 }
301
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700302 /* zero post-eof blocks as the page may be mapped */
303 iomap_adjust_read_range(inode, iop, &pos, length, &poff, &plen);
304 if (plen == 0)
305 goto done;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700306
307 if (iomap->type != IOMAP_MAPPED || pos >= i_size_read(inode)) {
308 zero_user(page, poff, plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700309 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700310 goto done;
311 }
312
313 ctx->cur_page_in_bio = true;
314
315 /*
316 * Try to merge into a previous segment if we can.
317 */
318 sector = iomap_sector(iomap, pos);
319 if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
Ming Lei07173c32019-02-15 19:13:20 +0800320 if (__bio_try_merge_page(ctx->bio, page, plen, poff, true))
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700321 goto done;
322 is_contig = true;
323 }
324
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700325 /*
326 * If we start a new segment we need to increase the read count, and we
327 * need to do so before submitting any previous full bio to make sure
328 * that we don't prematurely unlock the page.
329 */
330 if (iop)
331 atomic_inc(&iop->read_count);
332
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700333 if (!ctx->bio || !is_contig || bio_full(ctx->bio)) {
334 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
335 int nr_vecs = (length + PAGE_SIZE - 1) >> PAGE_SHIFT;
336
337 if (ctx->bio)
338 submit_bio(ctx->bio);
339
340 if (ctx->is_readahead) /* same as readahead_gfp_mask */
341 gfp |= __GFP_NORETRY | __GFP_NOWARN;
342 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
343 ctx->bio->bi_opf = REQ_OP_READ;
344 if (ctx->is_readahead)
345 ctx->bio->bi_opf |= REQ_RAHEAD;
346 ctx->bio->bi_iter.bi_sector = sector;
347 bio_set_dev(ctx->bio, iomap->bdev);
348 ctx->bio->bi_end_io = iomap_read_end_io;
349 }
350
Ming Lei07173c32019-02-15 19:13:20 +0800351 bio_add_page(ctx->bio, page, plen, poff);
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700352done:
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700353 /*
354 * Move the caller beyond our range so that it keeps making progress.
355 * For that we have to include any leading non-uptodate ranges, but
356 * we can skip trailing ones as they will be handled in the next
357 * iteration.
358 */
359 return pos - orig_pos + plen;
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700360}
361
362int
363iomap_readpage(struct page *page, const struct iomap_ops *ops)
364{
365 struct iomap_readpage_ctx ctx = { .cur_page = page };
366 struct inode *inode = page->mapping->host;
367 unsigned poff;
368 loff_t ret;
369
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700370 for (poff = 0; poff < PAGE_SIZE; poff += ret) {
371 ret = iomap_apply(inode, page_offset(page) + poff,
372 PAGE_SIZE - poff, 0, ops, &ctx,
373 iomap_readpage_actor);
374 if (ret <= 0) {
375 WARN_ON_ONCE(ret == 0);
376 SetPageError(page);
377 break;
378 }
379 }
380
381 if (ctx.bio) {
382 submit_bio(ctx.bio);
383 WARN_ON_ONCE(!ctx.cur_page_in_bio);
384 } else {
385 WARN_ON_ONCE(ctx.cur_page_in_bio);
386 unlock_page(page);
387 }
388
389 /*
390 * Just like mpage_readpages and block_read_full_page we always
391 * return 0 and just mark the page as PageError on errors. This
392 * should be cleaned up all through the stack eventually.
393 */
394 return 0;
395}
396EXPORT_SYMBOL_GPL(iomap_readpage);
397
398static struct page *
399iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
400 loff_t length, loff_t *done)
401{
402 while (!list_empty(pages)) {
403 struct page *page = lru_to_page(pages);
404
405 if (page_offset(page) >= (u64)pos + length)
406 break;
407
408 list_del(&page->lru);
409 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
410 GFP_NOFS))
411 return page;
412
413 /*
414 * If we already have a page in the page cache at index we are
415 * done. Upper layers don't care if it is uptodate after the
416 * readpages call itself as every page gets checked again once
417 * actually needed.
418 */
419 *done += PAGE_SIZE;
420 put_page(page);
421 }
422
423 return NULL;
424}
425
426static loff_t
427iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
428 void *data, struct iomap *iomap)
429{
430 struct iomap_readpage_ctx *ctx = data;
431 loff_t done, ret;
432
433 for (done = 0; done < length; done += ret) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700434 if (ctx->cur_page && offset_in_page(pos + done) == 0) {
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700435 if (!ctx->cur_page_in_bio)
436 unlock_page(ctx->cur_page);
437 put_page(ctx->cur_page);
438 ctx->cur_page = NULL;
439 }
440 if (!ctx->cur_page) {
441 ctx->cur_page = iomap_next_page(inode, ctx->pages,
442 pos, length, &done);
443 if (!ctx->cur_page)
444 break;
445 ctx->cur_page_in_bio = false;
446 }
447 ret = iomap_readpage_actor(inode, pos + done, length - done,
448 ctx, iomap);
449 }
450
451 return done;
452}
453
454int
455iomap_readpages(struct address_space *mapping, struct list_head *pages,
456 unsigned nr_pages, const struct iomap_ops *ops)
457{
458 struct iomap_readpage_ctx ctx = {
459 .pages = pages,
460 .is_readahead = true,
461 };
462 loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
463 loff_t last = page_offset(list_entry(pages->next, struct page, lru));
464 loff_t length = last - pos + PAGE_SIZE, ret = 0;
465
466 while (length > 0) {
467 ret = iomap_apply(mapping->host, pos, length, 0, ops,
468 &ctx, iomap_readpages_actor);
469 if (ret <= 0) {
470 WARN_ON_ONCE(ret == 0);
471 goto done;
472 }
473 pos += ret;
474 length -= ret;
475 }
476 ret = 0;
477done:
478 if (ctx.bio)
479 submit_bio(ctx.bio);
480 if (ctx.cur_page) {
481 if (!ctx.cur_page_in_bio)
482 unlock_page(ctx.cur_page);
483 put_page(ctx.cur_page);
484 }
485
486 /*
487 * Check that we didn't lose a page due to the arcance calling
488 * conventions..
489 */
490 WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
491 return ret;
492}
493EXPORT_SYMBOL_GPL(iomap_readpages);
494
Eric Sandeen3cc31fa2018-12-21 08:42:50 -0800495/*
496 * iomap_is_partially_uptodate checks whether blocks within a page are
497 * uptodate or not.
498 *
499 * Returns true if all blocks which correspond to a file portion
500 * we want to read within the page are uptodate.
501 */
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700502int
503iomap_is_partially_uptodate(struct page *page, unsigned long from,
504 unsigned long count)
505{
506 struct iomap_page *iop = to_iomap_page(page);
507 struct inode *inode = page->mapping->host;
Eric Sandeen3cc31fa2018-12-21 08:42:50 -0800508 unsigned len, first, last;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700509 unsigned i;
510
Eric Sandeen3cc31fa2018-12-21 08:42:50 -0800511 /* Limit range to one page */
512 len = min_t(unsigned, PAGE_SIZE - from, count);
513
514 /* First and last blocks in range within page */
515 first = from >> inode->i_blkbits;
516 last = (from + len - 1) >> inode->i_blkbits;
517
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700518 if (iop) {
519 for (i = first; i <= last; i++)
520 if (!test_bit(i, iop->uptodate))
521 return 0;
522 return 1;
523 }
524
525 return 0;
526}
527EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
528
529int
530iomap_releasepage(struct page *page, gfp_t gfp_mask)
531{
532 /*
533 * mm accommodates an old ext3 case where clean pages might not have had
534 * the dirty bit cleared. Thus, it can send actual dirty pages to
535 * ->releasepage() via shrink_active_list(), skip those here.
536 */
537 if (PageDirty(page) || PageWriteback(page))
538 return 0;
539 iomap_page_release(page);
540 return 1;
541}
542EXPORT_SYMBOL_GPL(iomap_releasepage);
543
544void
545iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
546{
547 /*
548 * If we are invalidating the entire page, clear the dirty state from it
549 * and release it to avoid unnecessary buildup of the LRU.
550 */
551 if (offset == 0 && len == PAGE_SIZE) {
552 WARN_ON_ONCE(PageWriteback(page));
553 cancel_dirty_page(page);
554 iomap_page_release(page);
555 }
556}
557EXPORT_SYMBOL_GPL(iomap_invalidatepage);
558
559#ifdef CONFIG_MIGRATION
560int
561iomap_migrate_page(struct address_space *mapping, struct page *newpage,
562 struct page *page, enum migrate_mode mode)
563{
564 int ret;
565
Jan Karaab41ee62018-12-28 00:39:20 -0800566 ret = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700567 if (ret != MIGRATEPAGE_SUCCESS)
568 return ret;
569
570 if (page_has_private(page)) {
571 ClearPagePrivate(page);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800572 get_page(newpage);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700573 set_page_private(newpage, page_private(page));
574 set_page_private(page, 0);
Piotr Jaroszynski8e47a452019-01-27 08:46:45 -0800575 put_page(page);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700576 SetPagePrivate(newpage);
577 }
578
579 if (mode != MIGRATE_SYNC_NO_COPY)
580 migrate_page_copy(newpage, page);
581 else
582 migrate_page_states(newpage, page);
583 return MIGRATEPAGE_SUCCESS;
584}
585EXPORT_SYMBOL_GPL(iomap_migrate_page);
586#endif /* CONFIG_MIGRATION */
587
Christoph Hellwig72b4daa2018-06-19 15:10:57 -0700588static void
Christoph Hellwigae259a92016-06-21 09:23:11 +1000589iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
590{
591 loff_t i_size = i_size_read(inode);
592
593 /*
594 * Only truncate newly allocated pages beyoned EOF, even if the
595 * write started inside the existing inode size.
596 */
597 if (pos + len > i_size)
598 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
599}
600
601static int
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700602iomap_read_page_sync(struct inode *inode, loff_t block_start, struct page *page,
603 unsigned poff, unsigned plen, unsigned from, unsigned to,
604 struct iomap *iomap)
605{
606 struct bio_vec bvec;
607 struct bio bio;
608
609 if (iomap->type != IOMAP_MAPPED || block_start >= i_size_read(inode)) {
610 zero_user_segments(page, poff, from, to, poff + plen);
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700611 iomap_set_range_uptodate(page, poff, plen);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700612 return 0;
613 }
614
615 bio_init(&bio, &bvec, 1);
616 bio.bi_opf = REQ_OP_READ;
617 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
618 bio_set_dev(&bio, iomap->bdev);
619 __bio_add_page(&bio, page, plen, poff);
620 return submit_bio_wait(&bio);
621}
622
623static int
624__iomap_write_begin(struct inode *inode, loff_t pos, unsigned len,
625 struct page *page, struct iomap *iomap)
626{
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700627 struct iomap_page *iop = iomap_page_create(inode, page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700628 loff_t block_size = i_blocksize(inode);
629 loff_t block_start = pos & ~(block_size - 1);
630 loff_t block_end = (pos + len + block_size - 1) & ~(block_size - 1);
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700631 unsigned from = offset_in_page(pos), to = from + len, poff, plen;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700632 int status = 0;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700633
634 if (PageUptodate(page))
635 return 0;
Christoph Hellwig9dc55f12018-07-11 22:26:05 -0700636
637 do {
638 iomap_adjust_read_range(inode, iop, &block_start,
639 block_end - block_start, &poff, &plen);
640 if (plen == 0)
641 break;
642
643 if ((from > poff && from < poff + plen) ||
644 (to > poff && to < poff + plen)) {
645 status = iomap_read_page_sync(inode, block_start, page,
646 poff, plen, from, to, iomap);
647 if (status)
648 break;
649 }
650
651 } while ((block_start += plen) < block_end);
652
653 return status;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700654}
655
656static int
Christoph Hellwigae259a92016-06-21 09:23:11 +1000657iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
658 struct page **pagep, struct iomap *iomap)
659{
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700660 const struct iomap_page_ops *page_ops = iomap->page_ops;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000661 pgoff_t index = pos >> PAGE_SHIFT;
662 struct page *page;
663 int status = 0;
664
665 BUG_ON(pos + len > iomap->offset + iomap->length);
666
Michal Hockod1908f52017-02-03 13:13:26 -0800667 if (fatal_signal_pending(current))
668 return -EINTR;
669
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700670 if (page_ops && page_ops->page_prepare) {
671 status = page_ops->page_prepare(inode, pos, len, iomap);
672 if (status)
673 return status;
674 }
675
Christoph Hellwigae259a92016-06-21 09:23:11 +1000676 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700677 if (!page) {
678 status = -ENOMEM;
679 goto out_no_page;
680 }
Christoph Hellwigae259a92016-06-21 09:23:11 +1000681
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700682 if (iomap->type == IOMAP_INLINE)
683 iomap_read_inline_data(inode, page, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700684 else if (iomap->flags & IOMAP_F_BUFFER_HEAD)
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700685 status = __block_write_begin_int(page, pos, len, NULL, iomap);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700686 else
687 status = __iomap_write_begin(inode, pos, len, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000688
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700689 if (unlikely(status))
690 goto out_unlock;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000691
692 *pagep = page;
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700693 return 0;
694
695out_unlock:
696 unlock_page(page);
697 put_page(page);
698 iomap_write_failed(inode, pos, len);
699
700out_no_page:
701 if (page_ops && page_ops->page_done)
702 page_ops->page_done(inode, pos, 0, NULL, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000703 return status;
704}
705
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700706int
707iomap_set_page_dirty(struct page *page)
708{
709 struct address_space *mapping = page_mapping(page);
710 int newly_dirty;
711
712 if (unlikely(!mapping))
713 return !TestSetPageDirty(page);
714
715 /*
716 * Lock out page->mem_cgroup migration to keep PageDirty
717 * synchronized with per-memcg dirty page counters.
718 */
719 lock_page_memcg(page);
720 newly_dirty = !TestSetPageDirty(page);
721 if (newly_dirty)
722 __set_page_dirty(page, mapping, 0);
723 unlock_page_memcg(page);
724
725 if (newly_dirty)
726 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
727 return newly_dirty;
728}
729EXPORT_SYMBOL_GPL(iomap_set_page_dirty);
730
731static int
732__iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
733 unsigned copied, struct page *page, struct iomap *iomap)
734{
735 flush_dcache_page(page);
736
737 /*
738 * The blocks that were entirely written will now be uptodate, so we
739 * don't have to worry about a readpage reading them and overwriting a
740 * partial write. However if we have encountered a short write and only
741 * partially written into a block, it will not be marked uptodate, so a
742 * readpage might come in and destroy our partial write.
743 *
744 * Do the simplest thing, and just treat any short write to a non
745 * uptodate page as a zero-length write, and force the caller to redo
746 * the whole thing.
747 */
Christoph Hellwigdbc582b2019-04-30 08:45:33 -0700748 if (unlikely(copied < len && !PageUptodate(page)))
749 return 0;
750 iomap_set_range_uptodate(page, offset_in_page(pos), len);
751 iomap_set_page_dirty(page);
752 return copied;
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700753}
754
Christoph Hellwigae259a92016-06-21 09:23:11 +1000755static int
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700756iomap_write_end_inline(struct inode *inode, struct page *page,
757 struct iomap *iomap, loff_t pos, unsigned copied)
758{
759 void *addr;
760
761 WARN_ON_ONCE(!PageUptodate(page));
762 BUG_ON(pos + copied > PAGE_SIZE - offset_in_page(iomap->inline_data));
763
764 addr = kmap_atomic(page);
765 memcpy(iomap->inline_data + pos, addr + pos, copied);
766 kunmap_atomic(addr);
767
768 mark_inode_dirty(inode);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700769 return copied;
770}
771
Christoph Hellwigae259a92016-06-21 09:23:11 +1000772static int
773iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700774 unsigned copied, struct page *page, struct iomap *iomap)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000775{
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700776 const struct iomap_page_ops *page_ops = iomap->page_ops;
Christoph Hellwigae259a92016-06-21 09:23:11 +1000777 int ret;
778
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700779 if (iomap->type == IOMAP_INLINE) {
780 ret = iomap_write_end_inline(inode, page, iomap, pos, copied);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700781 } else if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
Christoph Hellwigdbc582b2019-04-30 08:45:33 -0700782 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied,
783 page, NULL);
Christoph Hellwigc03cea42018-06-19 15:10:58 -0700784 } else {
785 ret = __iomap_write_end(inode, pos, len, copied, page, iomap);
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700786 }
787
Andreas Gruenbacher26ddb1f2019-04-30 08:45:33 -0700788 __generic_write_end(inode, pos, ret, page);
Andreas Gruenbacherdf0db3e2019-04-30 08:45:34 -0700789 if (page_ops && page_ops->page_done)
790 page_ops->page_done(inode, pos, copied, page, iomap);
Andreas Gruenbacher7a77dad2019-04-30 08:45:34 -0700791 put_page(page);
Christoph Hellwig63899c62018-06-19 15:10:56 -0700792
Christoph Hellwigae259a92016-06-21 09:23:11 +1000793 if (ret < len)
794 iomap_write_failed(inode, pos, len);
795 return ret;
796}
797
798static loff_t
799iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
800 struct iomap *iomap)
801{
802 struct iov_iter *i = data;
803 long status = 0;
804 ssize_t written = 0;
805 unsigned int flags = AOP_FLAG_NOFS;
806
Christoph Hellwigae259a92016-06-21 09:23:11 +1000807 do {
808 struct page *page;
809 unsigned long offset; /* Offset into pagecache page */
810 unsigned long bytes; /* Bytes to write to page */
811 size_t copied; /* Bytes copied from user */
812
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700813 offset = offset_in_page(pos);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000814 bytes = min_t(unsigned long, PAGE_SIZE - offset,
815 iov_iter_count(i));
816again:
817 if (bytes > length)
818 bytes = length;
819
820 /*
821 * Bring in the user page that we will copy from _first_.
822 * Otherwise there's a nasty deadlock on copying from the
823 * same page as we're writing to, without it being marked
824 * up-to-date.
825 *
826 * Not only is this an optimisation, but it is also required
827 * to check that the address is actually valid, when atomic
828 * usercopies are used, below.
829 */
830 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
831 status = -EFAULT;
832 break;
833 }
834
835 status = iomap_write_begin(inode, pos, bytes, flags, &page,
836 iomap);
837 if (unlikely(status))
838 break;
839
840 if (mapping_writably_mapped(inode->i_mapping))
841 flush_dcache_page(page);
842
Christoph Hellwigae259a92016-06-21 09:23:11 +1000843 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000844
845 flush_dcache_page(page);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000846
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700847 status = iomap_write_end(inode, pos, bytes, copied, page,
848 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000849 if (unlikely(status < 0))
850 break;
851 copied = status;
852
853 cond_resched();
854
855 iov_iter_advance(i, copied);
856 if (unlikely(copied == 0)) {
857 /*
858 * If we were unable to copy any data at all, we must
859 * fall back to a single segment length write.
860 *
861 * If we didn't fallback here, we could livelock
862 * because not all segments in the iov can be copied at
863 * once without a pagefault.
864 */
865 bytes = min_t(unsigned long, PAGE_SIZE - offset,
866 iov_iter_single_seg_count(i));
867 goto again;
868 }
869 pos += copied;
870 written += copied;
871 length -= copied;
872
873 balance_dirty_pages_ratelimited(inode->i_mapping);
874 } while (iov_iter_count(i) && length);
875
876 return written ? written : status;
877}
878
879ssize_t
880iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800881 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +1000882{
883 struct inode *inode = iocb->ki_filp->f_mapping->host;
884 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
885
886 while (iov_iter_count(iter)) {
887 ret = iomap_apply(inode, pos, iov_iter_count(iter),
888 IOMAP_WRITE, ops, iter, iomap_write_actor);
889 if (ret <= 0)
890 break;
891 pos += ret;
892 written += ret;
893 }
894
895 return written ? written : ret;
896}
897EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
898
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000899static struct page *
900__iomap_read_page(struct inode *inode, loff_t offset)
901{
902 struct address_space *mapping = inode->i_mapping;
903 struct page *page;
904
905 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
906 if (IS_ERR(page))
907 return page;
908 if (!PageUptodate(page)) {
909 put_page(page);
910 return ERR_PTR(-EIO);
911 }
912 return page;
913}
914
915static loff_t
916iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
917 struct iomap *iomap)
918{
919 long status = 0;
920 ssize_t written = 0;
921
922 do {
923 struct page *page, *rpage;
924 unsigned long offset; /* Offset into pagecache page */
925 unsigned long bytes; /* Bytes to write to page */
926
Andreas Gruenbacher10259de2018-08-10 11:46:14 -0700927 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -0700928 bytes = min_t(loff_t, PAGE_SIZE - offset, length);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000929
930 rpage = __iomap_read_page(inode, pos);
931 if (IS_ERR(rpage))
932 return PTR_ERR(rpage);
933
934 status = iomap_write_begin(inode, pos, bytes,
Tetsuo Handac718a972017-05-08 15:58:59 -0700935 AOP_FLAG_NOFS, &page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000936 put_page(rpage);
937 if (unlikely(status))
938 return status;
939
940 WARN_ON_ONCE(!PageUptodate(page));
941
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700942 status = iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000943 if (unlikely(status <= 0)) {
944 if (WARN_ON_ONCE(status == 0))
945 return -EIO;
946 return status;
947 }
948
949 cond_resched();
950
951 pos += status;
952 written += status;
953 length -= status;
954
955 balance_dirty_pages_ratelimited(inode->i_mapping);
956 } while (length);
957
958 return written;
959}
960
961int
962iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -0800963 const struct iomap_ops *ops)
Christoph Hellwig5f4e5752016-09-19 10:12:45 +1000964{
965 loff_t ret;
966
967 while (len) {
968 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
969 iomap_dirty_actor);
970 if (ret <= 0)
971 return ret;
972 pos += ret;
973 len -= ret;
974 }
975
976 return 0;
977}
978EXPORT_SYMBOL_GPL(iomap_file_dirty);
979
Christoph Hellwigae259a92016-06-21 09:23:11 +1000980static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
981 unsigned bytes, struct iomap *iomap)
982{
983 struct page *page;
984 int status;
985
Tetsuo Handac718a972017-05-08 15:58:59 -0700986 status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
987 iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000988 if (status)
989 return status;
990
991 zero_user(page, offset, bytes);
992 mark_page_accessed(page);
993
Andreas Gruenbacher19e0c582018-06-19 15:10:56 -0700994 return iomap_write_end(inode, pos, bytes, bytes, page, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +1000995}
996
Christoph Hellwig9a286f02016-06-21 09:31:39 +1000997static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
998 struct iomap *iomap)
999{
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001000 return __dax_zero_page_range(iomap->bdev, iomap->dax_dev,
1001 iomap_sector(iomap, pos & PAGE_MASK), offset, bytes);
Christoph Hellwig9a286f02016-06-21 09:31:39 +10001002}
1003
Christoph Hellwigae259a92016-06-21 09:23:11 +10001004static loff_t
1005iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
1006 void *data, struct iomap *iomap)
1007{
1008 bool *did_zero = data;
1009 loff_t written = 0;
1010 int status;
1011
1012 /* already zeroed? we're done. */
1013 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1014 return count;
1015
1016 do {
1017 unsigned offset, bytes;
1018
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001019 offset = offset_in_page(pos);
Christoph Hellwige28ae8e2017-08-11 12:45:35 -07001020 bytes = min_t(loff_t, PAGE_SIZE - offset, count);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001021
Christoph Hellwig9a286f02016-06-21 09:31:39 +10001022 if (IS_DAX(inode))
1023 status = iomap_dax_zero(pos, offset, bytes, iomap);
1024 else
1025 status = iomap_zero(inode, pos, offset, bytes, iomap);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001026 if (status < 0)
1027 return status;
1028
1029 pos += bytes;
1030 count -= bytes;
1031 written += bytes;
1032 if (did_zero)
1033 *did_zero = true;
1034 } while (count > 0);
1035
1036 return written;
1037}
1038
1039int
1040iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001041 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001042{
1043 loff_t ret;
1044
1045 while (len > 0) {
1046 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
1047 ops, did_zero, iomap_zero_range_actor);
1048 if (ret <= 0)
1049 return ret;
1050
1051 pos += ret;
1052 len -= ret;
1053 }
1054
1055 return 0;
1056}
1057EXPORT_SYMBOL_GPL(iomap_zero_range);
1058
1059int
1060iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001061 const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001062{
Fabian Frederick93407472017-02-27 14:28:32 -08001063 unsigned int blocksize = i_blocksize(inode);
1064 unsigned int off = pos & (blocksize - 1);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001065
1066 /* Block boundary? Nothing to do */
1067 if (!off)
1068 return 0;
1069 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1070}
1071EXPORT_SYMBOL_GPL(iomap_truncate_page);
1072
1073static loff_t
1074iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
1075 void *data, struct iomap *iomap)
1076{
1077 struct page *page = data;
1078 int ret;
1079
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001080 if (iomap->flags & IOMAP_F_BUFFER_HEAD) {
1081 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
1082 if (ret)
1083 return ret;
1084 block_commit_write(page, 0, length);
1085 } else {
1086 WARN_ON_ONCE(!PageUptodate(page));
Christoph Hellwig9dc55f12018-07-11 22:26:05 -07001087 iomap_page_create(inode, page);
Brian Foster561295a2018-09-29 13:51:01 +10001088 set_page_dirty(page);
Christoph Hellwigc03cea42018-06-19 15:10:58 -07001089 }
Christoph Hellwigae259a92016-06-21 09:23:11 +10001090
Christoph Hellwigae259a92016-06-21 09:23:11 +10001091 return length;
1092}
1093
Souptick Joarder5780a022018-10-26 15:02:59 -07001094vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
Christoph Hellwigae259a92016-06-21 09:23:11 +10001095{
1096 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -08001097 struct inode *inode = file_inode(vmf->vma->vm_file);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001098 unsigned long length;
1099 loff_t offset, size;
1100 ssize_t ret;
1101
1102 lock_page(page);
1103 size = i_size_read(inode);
1104 if ((page->mapping != inode->i_mapping) ||
1105 (page_offset(page) > size)) {
1106 /* We overload EFAULT to mean page got truncated */
1107 ret = -EFAULT;
1108 goto out_unlock;
1109 }
1110
1111 /* page is wholly or partially inside EOF */
1112 if (((page->index + 1) << PAGE_SHIFT) > size)
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001113 length = offset_in_page(size);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001114 else
1115 length = PAGE_SIZE;
1116
1117 offset = page_offset(page);
1118 while (length > 0) {
Jan Kara9484ab12016-11-10 10:26:50 +11001119 ret = iomap_apply(inode, offset, length,
1120 IOMAP_WRITE | IOMAP_FAULT, ops, page,
1121 iomap_page_mkwrite_actor);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001122 if (unlikely(ret <= 0))
1123 goto out_unlock;
1124 offset += ret;
1125 length -= ret;
1126 }
1127
Christoph Hellwigae259a92016-06-21 09:23:11 +10001128 wait_for_stable_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001129 return VM_FAULT_LOCKED;
Christoph Hellwigae259a92016-06-21 09:23:11 +10001130out_unlock:
1131 unlock_page(page);
Christoph Hellwige7647fb2017-08-29 10:08:41 -07001132 return block_page_mkwrite_return(ret);
Christoph Hellwigae259a92016-06-21 09:23:11 +10001133}
1134EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001135
1136struct fiemap_ctx {
1137 struct fiemap_extent_info *fi;
1138 struct iomap prev;
1139};
1140
1141static int iomap_to_fiemap(struct fiemap_extent_info *fi,
1142 struct iomap *iomap, u32 flags)
1143{
1144 switch (iomap->type) {
1145 case IOMAP_HOLE:
1146 /* skip holes */
1147 return 0;
1148 case IOMAP_DELALLOC:
1149 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
1150 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001151 case IOMAP_MAPPED:
1152 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001153 case IOMAP_UNWRITTEN:
1154 flags |= FIEMAP_EXTENT_UNWRITTEN;
1155 break;
Christoph Hellwig19319b52018-06-01 09:03:06 -07001156 case IOMAP_INLINE:
1157 flags |= FIEMAP_EXTENT_DATA_INLINE;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001158 break;
1159 }
1160
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001161 if (iomap->flags & IOMAP_F_MERGED)
1162 flags |= FIEMAP_EXTENT_MERGED;
Darrick J. Wonge43c4602016-09-19 10:13:02 +10001163 if (iomap->flags & IOMAP_F_SHARED)
1164 flags |= FIEMAP_EXTENT_SHARED;
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001165
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001166 return fiemap_fill_next_extent(fi, iomap->offset,
Andreas Gruenbacher19fe5f62017-10-01 17:55:54 -04001167 iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
Christoph Hellwig17de0a92016-08-29 11:33:58 +10001168 iomap->length, flags);
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001169}
1170
1171static loff_t
1172iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1173 struct iomap *iomap)
1174{
1175 struct fiemap_ctx *ctx = data;
1176 loff_t ret = length;
1177
1178 if (iomap->type == IOMAP_HOLE)
1179 return length;
1180
1181 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
1182 ctx->prev = *iomap;
1183 switch (ret) {
1184 case 0: /* success */
1185 return length;
1186 case 1: /* extent array full */
1187 return 0;
1188 default:
1189 return ret;
1190 }
1191}
1192
1193int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001194 loff_t start, loff_t len, const struct iomap_ops *ops)
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001195{
1196 struct fiemap_ctx ctx;
1197 loff_t ret;
1198
1199 memset(&ctx, 0, sizeof(ctx));
1200 ctx.fi = fi;
1201 ctx.prev.type = IOMAP_HOLE;
1202
1203 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
1204 if (ret)
1205 return ret;
1206
Dave Chinner8896b8f2016-08-17 08:41:10 +10001207 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
1208 ret = filemap_write_and_wait(inode->i_mapping);
1209 if (ret)
1210 return ret;
1211 }
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001212
1213 while (len > 0) {
Christoph Hellwigd33fd772016-10-20 15:51:28 +11001214 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001215 iomap_fiemap_actor);
Dave Chinnerac2dc052016-08-17 08:41:34 +10001216 /* inode with no (attribute) mapping will give ENOENT */
1217 if (ret == -ENOENT)
1218 break;
Christoph Hellwig8be9f562016-06-21 09:38:45 +10001219 if (ret < 0)
1220 return ret;
1221 if (ret == 0)
1222 break;
1223
1224 start += ret;
1225 len -= ret;
1226 }
1227
1228 if (ctx.prev.type != IOMAP_HOLE) {
1229 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
1230 if (ret < 0)
1231 return ret;
1232 }
1233
1234 return 0;
1235}
1236EXPORT_SYMBOL_GPL(iomap_fiemap);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001237
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001238/*
1239 * Seek for SEEK_DATA / SEEK_HOLE within @page, starting at @lastoff.
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001240 * Returns true if found and updates @lastoff to the offset in file.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001241 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001242static bool
1243page_seek_hole_data(struct inode *inode, struct page *page, loff_t *lastoff,
1244 int whence)
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001245{
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001246 const struct address_space_operations *ops = inode->i_mapping->a_ops;
1247 unsigned int bsize = i_blocksize(inode), off;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001248 bool seek_data = whence == SEEK_DATA;
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001249 loff_t poff = page_offset(page);
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001250
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001251 if (WARN_ON_ONCE(*lastoff >= poff + PAGE_SIZE))
1252 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001253
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001254 if (*lastoff < poff) {
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001255 /*
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001256 * Last offset smaller than the start of the page means we found
1257 * a hole:
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001258 */
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001259 if (whence == SEEK_HOLE)
1260 return true;
1261 *lastoff = poff;
1262 }
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001263
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001264 /*
1265 * Just check the page unless we can and should check block ranges:
1266 */
1267 if (bsize == PAGE_SIZE || !ops->is_partially_uptodate)
1268 return PageUptodate(page) == seek_data;
1269
1270 lock_page(page);
1271 if (unlikely(page->mapping != inode->i_mapping))
1272 goto out_unlock_not_found;
1273
1274 for (off = 0; off < PAGE_SIZE; off += bsize) {
Andreas Gruenbacher10259de2018-08-10 11:46:14 -07001275 if (offset_in_page(*lastoff) >= off + bsize)
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001276 continue;
1277 if (ops->is_partially_uptodate(page, off, bsize) == seek_data) {
1278 unlock_page(page);
1279 return true;
1280 }
1281 *lastoff = poff + off + bsize;
1282 }
1283
1284out_unlock_not_found:
1285 unlock_page(page);
1286 return false;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001287}
1288
1289/*
1290 * Seek for SEEK_DATA / SEEK_HOLE in the page cache.
1291 *
1292 * Within unwritten extents, the page cache determines which parts are holes
Christoph Hellwigbd56b3e2018-06-01 09:05:14 -07001293 * and which are data: uptodate buffer heads count as data; everything else
1294 * counts as a hole.
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001295 *
1296 * Returns the resulting offset on successs, and -ENOENT otherwise.
1297 */
1298static loff_t
1299page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
1300 int whence)
1301{
1302 pgoff_t index = offset >> PAGE_SHIFT;
1303 pgoff_t end = DIV_ROUND_UP(offset + length, PAGE_SIZE);
1304 loff_t lastoff = offset;
1305 struct pagevec pvec;
1306
1307 if (length <= 0)
1308 return -ENOENT;
1309
1310 pagevec_init(&pvec);
1311
1312 do {
1313 unsigned nr_pages, i;
1314
1315 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index,
1316 end - 1);
1317 if (nr_pages == 0)
1318 break;
1319
1320 for (i = 0; i < nr_pages; i++) {
1321 struct page *page = pvec.pages[i];
1322
Christoph Hellwigafd9d6a2018-06-01 09:05:15 -07001323 if (page_seek_hole_data(inode, page, &lastoff, whence))
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001324 goto check_range;
Christoph Hellwig8a78cb12018-06-01 09:04:40 -07001325 lastoff = page_offset(page) + PAGE_SIZE;
1326 }
1327 pagevec_release(&pvec);
1328 } while (index < end);
1329
1330 /* When no page at lastoff and we are not done, we found a hole. */
1331 if (whence != SEEK_HOLE)
1332 goto not_found;
1333
1334check_range:
1335 if (lastoff < offset + length)
1336 goto out;
1337not_found:
1338 lastoff = -ENOENT;
1339out:
1340 pagevec_release(&pvec);
1341 return lastoff;
1342}
1343
1344
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001345static loff_t
1346iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
1347 void *data, struct iomap *iomap)
1348{
1349 switch (iomap->type) {
1350 case IOMAP_UNWRITTEN:
1351 offset = page_cache_seek_hole_data(inode, offset, length,
1352 SEEK_HOLE);
1353 if (offset < 0)
1354 return length;
1355 /* fall through */
1356 case IOMAP_HOLE:
1357 *(loff_t *)data = offset;
1358 return 0;
1359 default:
1360 return length;
1361 }
1362}
1363
1364loff_t
1365iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1366{
1367 loff_t size = i_size_read(inode);
1368 loff_t length = size - offset;
1369 loff_t ret;
1370
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001371 /* Nothing to be found before or beyond the end of the file. */
1372 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001373 return -ENXIO;
1374
1375 while (length > 0) {
1376 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1377 &offset, iomap_seek_hole_actor);
1378 if (ret < 0)
1379 return ret;
1380 if (ret == 0)
1381 break;
1382
1383 offset += ret;
1384 length -= ret;
1385 }
1386
1387 return offset;
1388}
1389EXPORT_SYMBOL_GPL(iomap_seek_hole);
1390
1391static loff_t
1392iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
1393 void *data, struct iomap *iomap)
1394{
1395 switch (iomap->type) {
1396 case IOMAP_HOLE:
1397 return length;
1398 case IOMAP_UNWRITTEN:
1399 offset = page_cache_seek_hole_data(inode, offset, length,
1400 SEEK_DATA);
1401 if (offset < 0)
1402 return length;
1403 /*FALLTHRU*/
1404 default:
1405 *(loff_t *)data = offset;
1406 return 0;
1407 }
1408}
1409
1410loff_t
1411iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
1412{
1413 loff_t size = i_size_read(inode);
1414 loff_t length = size - offset;
1415 loff_t ret;
1416
Darrick J. Wongd6ab17f2017-07-12 10:26:47 -07001417 /* Nothing to be found before or beyond the end of the file. */
1418 if (offset < 0 || offset >= size)
Andreas Gruenbacher0ed3b0d2017-06-29 11:43:21 -07001419 return -ENXIO;
1420
1421 while (length > 0) {
1422 ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
1423 &offset, iomap_seek_data_actor);
1424 if (ret < 0)
1425 return ret;
1426 if (ret == 0)
1427 break;
1428
1429 offset += ret;
1430 length -= ret;
1431 }
1432
1433 if (length <= 0)
1434 return -ENXIO;
1435 return offset;
1436}
1437EXPORT_SYMBOL_GPL(iomap_seek_data);
1438
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001439/*
1440 * Private flags for iomap_dio, must not overlap with the public ones in
1441 * iomap.h:
1442 */
Dave Chinner3460cac2018-05-02 12:54:53 -07001443#define IOMAP_DIO_WRITE_FUA (1 << 28)
Dave Chinner4f8ff442018-05-02 12:54:52 -07001444#define IOMAP_DIO_NEED_SYNC (1 << 29)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001445#define IOMAP_DIO_WRITE (1 << 30)
1446#define IOMAP_DIO_DIRTY (1 << 31)
1447
1448struct iomap_dio {
1449 struct kiocb *iocb;
1450 iomap_dio_end_io_t *end_io;
1451 loff_t i_size;
1452 loff_t size;
1453 atomic_t ref;
1454 unsigned flags;
1455 int error;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001456 bool wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001457
1458 union {
1459 /* used during submission and for synchronous completion: */
1460 struct {
1461 struct iov_iter *iter;
1462 struct task_struct *waiter;
1463 struct request_queue *last_queue;
1464 blk_qc_t cookie;
1465 } submit;
1466
1467 /* used for aio completion: */
1468 struct {
1469 struct work_struct work;
1470 } aio;
1471 };
1472};
1473
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001474int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
1475{
1476 struct request_queue *q = READ_ONCE(kiocb->private);
1477
1478 if (!q)
1479 return 0;
1480 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
1481}
1482EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
1483
1484static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
1485 struct bio *bio)
1486{
1487 atomic_inc(&dio->ref);
1488
1489 if (dio->iocb->ki_flags & IOCB_HIPRI)
1490 bio_set_polled(bio, dio->iocb);
1491
1492 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
1493 dio->submit.cookie = submit_bio(bio);
1494}
1495
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001496static ssize_t iomap_dio_complete(struct iomap_dio *dio)
1497{
1498 struct kiocb *iocb = dio->iocb;
Lukas Czerner332391a2017-09-21 08:16:29 -06001499 struct inode *inode = file_inode(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001500 loff_t offset = iocb->ki_pos;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001501 ssize_t ret;
1502
1503 if (dio->end_io) {
1504 ret = dio->end_io(iocb,
1505 dio->error ? dio->error : dio->size,
1506 dio->flags);
1507 } else {
1508 ret = dio->error;
1509 }
1510
1511 if (likely(!ret)) {
1512 ret = dio->size;
1513 /* check for short read */
Eryu Guan5e25c262017-10-13 09:47:46 -07001514 if (offset + ret > dio->i_size &&
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001515 !(dio->flags & IOMAP_DIO_WRITE))
Eryu Guan5e25c262017-10-13 09:47:46 -07001516 ret = dio->i_size - offset;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001517 iocb->ki_pos += ret;
1518 }
1519
Eryu Guan5e25c262017-10-13 09:47:46 -07001520 /*
1521 * Try again to invalidate clean pages which might have been cached by
1522 * non-direct readahead, or faulted in by get_user_pages() if the source
1523 * of the write was an mmap'ed region of the file we're writing. Either
1524 * one is a pretty crazy thing to do, so we don't support it 100%. If
1525 * this invalidation fails, tough, the write still worked...
1526 *
1527 * And this page cache invalidation has to be after dio->end_io(), as
1528 * some filesystems convert unwritten extents to real allocations in
1529 * end_io() when necessary, otherwise a racing buffer read would cache
1530 * zeros from unwritten extents.
1531 */
1532 if (!dio->error &&
1533 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
1534 int err;
1535 err = invalidate_inode_pages2_range(inode->i_mapping,
1536 offset >> PAGE_SHIFT,
1537 (offset + dio->size - 1) >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001538 if (err)
1539 dio_warn_stale_pagecache(iocb->ki_filp);
Eryu Guan5e25c262017-10-13 09:47:46 -07001540 }
1541
Dave Chinner4f8ff442018-05-02 12:54:52 -07001542 /*
1543 * If this is a DSYNC write, make sure we push it to stable storage now
1544 * that we've written data.
1545 */
1546 if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
1547 ret = generic_write_sync(iocb, ret);
1548
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001549 inode_dio_end(file_inode(iocb->ki_filp));
1550 kfree(dio);
1551
1552 return ret;
1553}
1554
1555static void iomap_dio_complete_work(struct work_struct *work)
1556{
1557 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
1558 struct kiocb *iocb = dio->iocb;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001559
Dave Chinner4f8ff442018-05-02 12:54:52 -07001560 iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001561}
1562
1563/*
1564 * Set an error in the dio if none is set yet. We have to use cmpxchg
1565 * as the submission context and the completion context(s) can race to
1566 * update the error.
1567 */
1568static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
1569{
1570 cmpxchg(&dio->error, 0, ret);
1571}
1572
1573static void iomap_dio_bio_end_io(struct bio *bio)
1574{
1575 struct iomap_dio *dio = bio->bi_private;
1576 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
1577
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001578 if (bio->bi_status)
1579 iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001580
1581 if (atomic_dec_and_test(&dio->ref)) {
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001582 if (dio->wait_for_completion) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001583 struct task_struct *waiter = dio->submit.waiter;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001584 WRITE_ONCE(dio->submit.waiter, NULL);
Jens Axboe06193172018-11-13 21:16:54 -07001585 blk_wake_io_task(waiter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001586 } else if (dio->flags & IOMAP_DIO_WRITE) {
1587 struct inode *inode = file_inode(dio->iocb->ki_filp);
1588
1589 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
1590 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
1591 } else {
1592 iomap_dio_complete_work(&dio->aio.work);
1593 }
1594 }
1595
1596 if (should_dirty) {
1597 bio_check_pages_dirty(bio);
1598 } else {
Jens Axboe399254a2019-02-27 13:13:23 -07001599 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
1600 struct bvec_iter_all iter_all;
1601 struct bio_vec *bvec;
1602 int i;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001603
Jens Axboe399254a2019-02-27 13:13:23 -07001604 bio_for_each_segment_all(bvec, bio, i, iter_all)
1605 put_page(bvec->bv_page);
1606 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001607 bio_put(bio);
1608 }
1609}
1610
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001611static void
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001612iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
1613 unsigned len)
1614{
1615 struct page *page = ZERO_PAGE(0);
Jens Axboed1e36282018-08-29 10:36:56 -06001616 int flags = REQ_SYNC | REQ_IDLE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001617 struct bio *bio;
1618
1619 bio = bio_alloc(GFP_KERNEL, 1);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001620 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001621 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001622 bio->bi_private = dio;
1623 bio->bi_end_io = iomap_dio_bio_end_io;
1624
1625 get_page(page);
Christoph Hellwig6533b4e2018-06-01 09:03:07 -07001626 __bio_add_page(bio, page, len, 0);
Jens Axboed1e36282018-08-29 10:36:56 -06001627 bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001628 iomap_dio_submit_bio(dio, iomap, bio);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001629}
1630
1631static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001632iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
1633 struct iomap_dio *dio, struct iomap *iomap)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001634{
Fabian Frederick93407472017-02-27 14:28:32 -08001635 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
1636 unsigned int fs_block_size = i_blocksize(inode), pad;
1637 unsigned int align = iov_iter_alignment(dio->submit.iter);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001638 struct iov_iter iter;
1639 struct bio *bio;
1640 bool need_zeroout = false;
Dave Chinner3460cac2018-05-02 12:54:53 -07001641 bool use_fua = false;
Dave Chinner4721a602018-11-19 13:31:11 -08001642 int nr_pages, ret = 0;
Al Virocfe057f2017-09-11 21:17:09 +01001643 size_t copied = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001644
1645 if ((pos | length | align) & ((1 << blkbits) - 1))
1646 return -EINVAL;
1647
Christoph Hellwig09230432018-07-03 09:07:46 -07001648 if (iomap->type == IOMAP_UNWRITTEN) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001649 dio->flags |= IOMAP_DIO_UNWRITTEN;
1650 need_zeroout = true;
Christoph Hellwig09230432018-07-03 09:07:46 -07001651 }
1652
1653 if (iomap->flags & IOMAP_F_SHARED)
1654 dio->flags |= IOMAP_DIO_COW;
1655
1656 if (iomap->flags & IOMAP_F_NEW) {
1657 need_zeroout = true;
Dave Chinner0929d852018-11-19 13:31:10 -08001658 } else if (iomap->type == IOMAP_MAPPED) {
Christoph Hellwig09230432018-07-03 09:07:46 -07001659 /*
Dave Chinner0929d852018-11-19 13:31:10 -08001660 * Use a FUA write if we need datasync semantics, this is a pure
1661 * data IO that doesn't require any metadata updates (including
1662 * after IO completion such as unwritten extent conversion) and
1663 * the underlying device supports FUA. This allows us to avoid
1664 * cache flushes on IO completion.
Christoph Hellwig09230432018-07-03 09:07:46 -07001665 */
1666 if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
1667 (dio->flags & IOMAP_DIO_WRITE_FUA) &&
1668 blk_queue_fua(bdev_get_queue(iomap->bdev)))
1669 use_fua = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001670 }
1671
1672 /*
1673 * Operate on a partial iter trimmed to the extent we were called for.
1674 * We'll update the iter in the dio once we're done with this extent.
1675 */
1676 iter = *dio->submit.iter;
1677 iov_iter_truncate(&iter, length);
1678
1679 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
1680 if (nr_pages <= 0)
1681 return nr_pages;
1682
1683 if (need_zeroout) {
1684 /* zero out from the start of the block to the write offset */
1685 pad = pos & (fs_block_size - 1);
1686 if (pad)
1687 iomap_dio_zero(dio, iomap, pos - pad, pad);
1688 }
1689
1690 do {
Al Virocfe057f2017-09-11 21:17:09 +01001691 size_t n;
1692 if (dio->error) {
1693 iov_iter_revert(dio->submit.iter, copied);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001694 return 0;
Al Virocfe057f2017-09-11 21:17:09 +01001695 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001696
1697 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +02001698 bio_set_dev(bio, iomap->bdev);
Christoph Hellwig57fc5052018-06-01 09:03:08 -07001699 bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
Jens Axboe45d06cf2017-06-27 11:01:22 -06001700 bio->bi_write_hint = dio->iocb->ki_hint;
Adam Manzanares087e5662018-05-22 10:52:21 -07001701 bio->bi_ioprio = dio->iocb->ki_ioprio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001702 bio->bi_private = dio;
1703 bio->bi_end_io = iomap_dio_bio_end_io;
1704
1705 ret = bio_iov_iter_get_pages(bio, &iter);
1706 if (unlikely(ret)) {
Dave Chinner4721a602018-11-19 13:31:11 -08001707 /*
1708 * We have to stop part way through an IO. We must fall
1709 * through to the sub-block tail zeroing here, otherwise
1710 * this short IO may expose stale data in the tail of
1711 * the block we haven't written data to.
1712 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001713 bio_put(bio);
Dave Chinner4721a602018-11-19 13:31:11 -08001714 goto zero_tail;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001715 }
1716
Al Virocfe057f2017-09-11 21:17:09 +01001717 n = bio->bi_iter.bi_size;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001718 if (dio->flags & IOMAP_DIO_WRITE) {
Dave Chinner3460cac2018-05-02 12:54:53 -07001719 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
1720 if (use_fua)
1721 bio->bi_opf |= REQ_FUA;
1722 else
1723 dio->flags &= ~IOMAP_DIO_WRITE_FUA;
Al Virocfe057f2017-09-11 21:17:09 +01001724 task_io_account_write(n);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001725 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001726 bio->bi_opf = REQ_OP_READ;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001727 if (dio->flags & IOMAP_DIO_DIRTY)
1728 bio_set_pages_dirty(bio);
1729 }
1730
Al Virocfe057f2017-09-11 21:17:09 +01001731 iov_iter_advance(dio->submit.iter, n);
1732
1733 dio->size += n;
1734 pos += n;
1735 copied += n;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001736
1737 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001738 iomap_dio_submit_bio(dio, iomap, bio);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001739 } while (nr_pages);
1740
Dave Chinnerb4506722018-11-19 13:31:10 -08001741 /*
1742 * We need to zeroout the tail of a sub-block write if the extent type
1743 * requires zeroing or the write extends beyond EOF. If we don't zero
1744 * the block tail in the latter case, we can expose stale data via mmap
1745 * reads of the EOF block.
1746 */
Dave Chinner4721a602018-11-19 13:31:11 -08001747zero_tail:
Dave Chinnerb4506722018-11-19 13:31:10 -08001748 if (need_zeroout ||
1749 ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001750 /* zero out from the end of the write to the end of the block */
1751 pad = pos & (fs_block_size - 1);
1752 if (pad)
1753 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
1754 }
Dave Chinner4721a602018-11-19 13:31:11 -08001755 return copied ? copied : ret;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001756}
1757
Christoph Hellwig09230432018-07-03 09:07:46 -07001758static loff_t
1759iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
1760{
1761 length = iov_iter_zero(length, dio->submit.iter);
1762 dio->size += length;
1763 return length;
1764}
1765
1766static loff_t
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001767iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
1768 struct iomap_dio *dio, struct iomap *iomap)
1769{
1770 struct iov_iter *iter = dio->submit.iter;
1771 size_t copied;
1772
1773 BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
1774
1775 if (dio->flags & IOMAP_DIO_WRITE) {
1776 loff_t size = inode->i_size;
1777
1778 if (pos > size)
1779 memset(iomap->inline_data + size, 0, pos - size);
1780 copied = copy_from_iter(iomap->inline_data + pos, length, iter);
1781 if (copied) {
1782 if (pos + copied > size)
1783 i_size_write(inode, pos + copied);
1784 mark_inode_dirty(inode);
1785 }
1786 } else {
1787 copied = copy_to_iter(iomap->inline_data + pos, length, iter);
1788 }
1789 dio->size += copied;
1790 return copied;
1791}
1792
1793static loff_t
Christoph Hellwig09230432018-07-03 09:07:46 -07001794iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
1795 void *data, struct iomap *iomap)
1796{
1797 struct iomap_dio *dio = data;
1798
1799 switch (iomap->type) {
1800 case IOMAP_HOLE:
1801 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
1802 return -EIO;
1803 return iomap_dio_hole_actor(length, dio);
1804 case IOMAP_UNWRITTEN:
1805 if (!(dio->flags & IOMAP_DIO_WRITE))
1806 return iomap_dio_hole_actor(length, dio);
1807 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
1808 case IOMAP_MAPPED:
1809 return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
Andreas Gruenbacherec181f62018-07-03 09:07:47 -07001810 case IOMAP_INLINE:
1811 return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
Christoph Hellwig09230432018-07-03 09:07:46 -07001812 default:
1813 WARN_ON_ONCE(1);
1814 return -EIO;
1815 }
1816}
1817
Dave Chinner4f8ff442018-05-02 12:54:52 -07001818/*
1819 * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
Dave Chinner3460cac2018-05-02 12:54:53 -07001820 * is being issued as AIO or not. This allows us to optimise pure data writes
1821 * to use REQ_FUA rather than requiring generic_write_sync() to issue a
1822 * REQ_FLUSH post write. This is slightly tricky because a single request here
1823 * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
1824 * may be pure data writes. In that case, we still need to do a full data sync
1825 * completion.
Dave Chinner4f8ff442018-05-02 12:54:52 -07001826 */
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001827ssize_t
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001828iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
1829 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001830{
1831 struct address_space *mapping = iocb->ki_filp->f_mapping;
1832 struct inode *inode = file_inode(iocb->ki_filp);
1833 size_t count = iov_iter_count(iter);
Eryu Guanc771c142017-03-02 15:02:06 -08001834 loff_t pos = iocb->ki_pos, start = pos;
1835 loff_t end = iocb->ki_pos + count - 1, ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001836 unsigned int flags = IOMAP_DIRECT;
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001837 bool wait_for_completion = is_sync_kiocb(iocb);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001838 struct blk_plug plug;
1839 struct iomap_dio *dio;
1840
1841 lockdep_assert_held(&inode->i_rwsem);
1842
1843 if (!count)
1844 return 0;
1845
1846 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1847 if (!dio)
1848 return -ENOMEM;
1849
1850 dio->iocb = iocb;
1851 atomic_set(&dio->ref, 1);
1852 dio->size = 0;
1853 dio->i_size = i_size_read(inode);
1854 dio->end_io = end_io;
1855 dio->error = 0;
1856 dio->flags = 0;
1857
1858 dio->submit.iter = iter;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001859 dio->submit.waiter = current;
1860 dio->submit.cookie = BLK_QC_T_NONE;
1861 dio->submit.last_queue = NULL;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001862
1863 if (iov_iter_rw(iter) == READ) {
1864 if (pos >= dio->i_size)
1865 goto out_free_dio;
1866
David Howells00e23702018-10-22 13:07:28 +01001867 if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001868 dio->flags |= IOMAP_DIO_DIRTY;
1869 } else {
Dave Chinner3460cac2018-05-02 12:54:53 -07001870 flags |= IOMAP_WRITE;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001871 dio->flags |= IOMAP_DIO_WRITE;
Dave Chinner3460cac2018-05-02 12:54:53 -07001872
1873 /* for data sync or sync, we need sync completion processing */
Dave Chinner4f8ff442018-05-02 12:54:52 -07001874 if (iocb->ki_flags & IOCB_DSYNC)
1875 dio->flags |= IOMAP_DIO_NEED_SYNC;
Dave Chinner3460cac2018-05-02 12:54:53 -07001876
1877 /*
1878 * For datasync only writes, we optimistically try using FUA for
1879 * this IO. Any non-FUA write that occurs will clear this flag,
1880 * hence we know before completion whether a cache flush is
1881 * necessary.
1882 */
1883 if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
1884 dio->flags |= IOMAP_DIO_WRITE_FUA;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001885 }
1886
Goldwyn Rodriguesa38d1242017-06-20 07:05:45 -05001887 if (iocb->ki_flags & IOCB_NOWAIT) {
1888 if (filemap_range_has_page(mapping, start, end)) {
1889 ret = -EAGAIN;
1890 goto out_free_dio;
1891 }
1892 flags |= IOMAP_NOWAIT;
1893 }
1894
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001895 ret = filemap_write_and_wait_range(mapping, start, end);
1896 if (ret)
1897 goto out_free_dio;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001898
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001899 /*
1900 * Try to invalidate cache pages for the range we're direct
1901 * writing. If this invalidation fails, tough, the write will
1902 * still work, but racing two incompatible write paths is a
1903 * pretty crazy thing to do, so we don't support it 100%.
1904 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001905 ret = invalidate_inode_pages2_range(mapping,
1906 start >> PAGE_SHIFT, end >> PAGE_SHIFT);
Darrick J. Wong5a9d9292018-01-08 10:41:39 -08001907 if (ret)
1908 dio_warn_stale_pagecache(iocb->ki_filp);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07001909 ret = 0;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001910
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001911 if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
Chandan Rajendra546e7be2017-09-22 11:47:33 -07001912 !inode->i_sb->s_dio_done_wq) {
1913 ret = sb_init_dio_done_wq(inode->i_sb);
1914 if (ret < 0)
1915 goto out_free_dio;
1916 }
1917
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001918 inode_dio_begin(inode);
1919
1920 blk_start_plug(&plug);
1921 do {
1922 ret = iomap_apply(inode, pos, count, flags, ops, dio,
1923 iomap_dio_actor);
1924 if (ret <= 0) {
1925 /* magic error code to fall back to buffered I/O */
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001926 if (ret == -ENOTBLK) {
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001927 wait_for_completion = true;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001928 ret = 0;
Andreas Gruenbacherebf00be2018-06-19 15:10:55 -07001929 }
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001930 break;
1931 }
1932 pos += ret;
Chandan Rajendraa008c312017-04-12 11:03:20 -07001933
1934 if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
1935 break;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001936 } while ((count = iov_iter_count(iter)) > 0);
1937 blk_finish_plug(&plug);
1938
1939 if (ret < 0)
1940 iomap_dio_set_error(dio, ret);
1941
Dave Chinner3460cac2018-05-02 12:54:53 -07001942 /*
1943 * If all the writes we issued were FUA, we don't need to flush the
1944 * cache on IO completion. Clear the sync flag for this case.
1945 */
1946 if (dio->flags & IOMAP_DIO_WRITE_FUA)
1947 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
1948
Christoph Hellwig81214ba2018-12-04 11:12:08 -07001949 WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
1950 WRITE_ONCE(iocb->private, dio->submit.last_queue);
1951
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001952 /*
1953 * We are about to drop our additional submission reference, which
1954 * might be the last reference to the dio. There are three three
1955 * different ways we can progress here:
1956 *
1957 * (a) If this is the last reference we will always complete and free
1958 * the dio ourselves.
1959 * (b) If this is not the last reference, and we serve an asynchronous
1960 * iocb, we must never touch the dio after the decrement, the
1961 * I/O completion handler will complete and free it.
1962 * (c) If this is not the last reference, but we serve a synchronous
1963 * iocb, the I/O completion handler will wake us up on the drop
1964 * of the final reference, and we will complete and free it here
1965 * after we got woken by the I/O completion handler.
1966 */
1967 dio->wait_for_completion = wait_for_completion;
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001968 if (!atomic_dec_and_test(&dio->ref)) {
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001969 if (!wait_for_completion)
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001970 return -EIOCBQUEUED;
1971
1972 for (;;) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -08001973 set_current_state(TASK_UNINTERRUPTIBLE);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001974 if (!READ_ONCE(dio->submit.waiter))
1975 break;
1976
1977 if (!(iocb->ki_flags & IOCB_HIPRI) ||
1978 !dio->submit.last_queue ||
Christoph Hellwigea435e12017-11-02 21:29:54 +03001979 !blk_poll(dio->submit.last_queue,
Jens Axboe0a1b8b82018-11-26 08:24:43 -07001980 dio->submit.cookie, true))
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001981 io_schedule();
1982 }
1983 __set_current_state(TASK_RUNNING);
1984 }
1985
Christoph Hellwig4ea899e2019-01-17 08:58:58 -08001986 return iomap_dio_complete(dio);
Christoph Hellwigff6a9292016-11-30 14:36:01 +11001987
1988out_free_dio:
1989 kfree(dio);
1990 return ret;
1991}
1992EXPORT_SYMBOL_GPL(iomap_dio_rw);
Darrick J. Wong67482122018-05-10 08:38:15 -07001993
1994/* Swapfile activation */
1995
1996#ifdef CONFIG_SWAP
1997struct iomap_swapfile_info {
1998 struct iomap iomap; /* accumulated iomap */
1999 struct swap_info_struct *sis;
2000 uint64_t lowest_ppage; /* lowest physical addr seen (pages) */
2001 uint64_t highest_ppage; /* highest physical addr seen (pages) */
2002 unsigned long nr_pages; /* number of pages collected */
2003 int nr_extents; /* extent count */
2004};
2005
2006/*
2007 * Collect physical extents for this swap file. Physical extents reported to
2008 * the swap code must be trimmed to align to a page boundary. The logical
2009 * offset within the file is irrelevant since the swapfile code maps logical
2010 * page numbers of the swap device to the physical page-aligned extents.
2011 */
2012static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
2013{
2014 struct iomap *iomap = &isi->iomap;
2015 unsigned long nr_pages;
2016 uint64_t first_ppage;
2017 uint64_t first_ppage_reported;
2018 uint64_t next_ppage;
2019 int error;
2020
2021 /*
2022 * Round the start up and the end down so that the physical
2023 * extent aligns to a page boundary.
2024 */
2025 first_ppage = ALIGN(iomap->addr, PAGE_SIZE) >> PAGE_SHIFT;
2026 next_ppage = ALIGN_DOWN(iomap->addr + iomap->length, PAGE_SIZE) >>
2027 PAGE_SHIFT;
2028
2029 /* Skip too-short physical extents. */
2030 if (first_ppage >= next_ppage)
2031 return 0;
2032 nr_pages = next_ppage - first_ppage;
2033
2034 /*
2035 * Calculate how much swap space we're adding; the first page contains
2036 * the swap header and doesn't count. The mm still wants that first
2037 * page fed to add_swap_extent, however.
2038 */
2039 first_ppage_reported = first_ppage;
2040 if (iomap->offset == 0)
2041 first_ppage_reported++;
2042 if (isi->lowest_ppage > first_ppage_reported)
2043 isi->lowest_ppage = first_ppage_reported;
2044 if (isi->highest_ppage < (next_ppage - 1))
2045 isi->highest_ppage = next_ppage - 1;
2046
2047 /* Add extent, set up for the next call. */
2048 error = add_swap_extent(isi->sis, isi->nr_pages, nr_pages, first_ppage);
2049 if (error < 0)
2050 return error;
2051 isi->nr_extents += error;
2052 isi->nr_pages += nr_pages;
2053 return 0;
2054}
2055
2056/*
2057 * Accumulate iomaps for this swap file. We have to accumulate iomaps because
2058 * swap only cares about contiguous page-aligned physical extents and makes no
2059 * distinction between written and unwritten extents.
2060 */
2061static loff_t iomap_swapfile_activate_actor(struct inode *inode, loff_t pos,
2062 loff_t count, void *data, struct iomap *iomap)
2063{
2064 struct iomap_swapfile_info *isi = data;
2065 int error;
2066
Christoph Hellwig19319b52018-06-01 09:03:06 -07002067 switch (iomap->type) {
2068 case IOMAP_MAPPED:
2069 case IOMAP_UNWRITTEN:
2070 /* Only real or unwritten extents. */
2071 break;
2072 case IOMAP_INLINE:
2073 /* No inline data. */
Omar Sandovalec601922018-05-16 11:13:34 -07002074 pr_err("swapon: file is inline\n");
2075 return -EINVAL;
Christoph Hellwig19319b52018-06-01 09:03:06 -07002076 default:
Omar Sandovalec601922018-05-16 11:13:34 -07002077 pr_err("swapon: file has unallocated extents\n");
2078 return -EINVAL;
2079 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002080
Omar Sandovalec601922018-05-16 11:13:34 -07002081 /* No uncommitted metadata or shared blocks. */
2082 if (iomap->flags & IOMAP_F_DIRTY) {
2083 pr_err("swapon: file is not committed\n");
2084 return -EINVAL;
2085 }
2086 if (iomap->flags & IOMAP_F_SHARED) {
2087 pr_err("swapon: file has shared extents\n");
2088 return -EINVAL;
2089 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002090
Omar Sandovalec601922018-05-16 11:13:34 -07002091 /* Only one bdev per swap file. */
2092 if (iomap->bdev != isi->sis->bdev) {
2093 pr_err("swapon: file is on multiple devices\n");
2094 return -EINVAL;
2095 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002096
2097 if (isi->iomap.length == 0) {
2098 /* No accumulated extent, so just store it. */
2099 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2100 } else if (isi->iomap.addr + isi->iomap.length == iomap->addr) {
2101 /* Append this to the accumulated extent. */
2102 isi->iomap.length += iomap->length;
2103 } else {
2104 /* Otherwise, add the retained iomap and store this one. */
2105 error = iomap_swapfile_add_extent(isi);
2106 if (error)
2107 return error;
2108 memcpy(&isi->iomap, iomap, sizeof(isi->iomap));
2109 }
Darrick J. Wong67482122018-05-10 08:38:15 -07002110 return count;
Darrick J. Wong67482122018-05-10 08:38:15 -07002111}
2112
2113/*
2114 * Iterate a swap file's iomaps to construct physical extents that can be
2115 * passed to the swapfile subsystem.
2116 */
2117int iomap_swapfile_activate(struct swap_info_struct *sis,
2118 struct file *swap_file, sector_t *pagespan,
2119 const struct iomap_ops *ops)
2120{
2121 struct iomap_swapfile_info isi = {
2122 .sis = sis,
2123 .lowest_ppage = (sector_t)-1ULL,
2124 };
2125 struct address_space *mapping = swap_file->f_mapping;
2126 struct inode *inode = mapping->host;
2127 loff_t pos = 0;
2128 loff_t len = ALIGN_DOWN(i_size_read(inode), PAGE_SIZE);
2129 loff_t ret;
2130
Darrick J. Wong117a1482018-06-05 09:53:05 -07002131 /*
2132 * Persist all file mapping metadata so that we won't have any
2133 * IOMAP_F_DIRTY iomaps.
2134 */
2135 ret = vfs_fsync(swap_file, 1);
Darrick J. Wong67482122018-05-10 08:38:15 -07002136 if (ret)
2137 return ret;
2138
2139 while (len > 0) {
2140 ret = iomap_apply(inode, pos, len, IOMAP_REPORT,
2141 ops, &isi, iomap_swapfile_activate_actor);
2142 if (ret <= 0)
2143 return ret;
2144
2145 pos += ret;
2146 len -= ret;
2147 }
2148
2149 if (isi.iomap.length) {
2150 ret = iomap_swapfile_add_extent(&isi);
2151 if (ret)
2152 return ret;
2153 }
2154
2155 *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
2156 sis->max = isi.nr_pages;
2157 sis->pages = isi.nr_pages - 1;
2158 sis->highest_bit = isi.nr_pages - 1;
2159 return isi.nr_extents;
2160}
2161EXPORT_SYMBOL_GPL(iomap_swapfile_activate);
2162#endif /* CONFIG_SWAP */
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002163
2164static loff_t
2165iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
2166 void *data, struct iomap *iomap)
2167{
2168 sector_t *bno = data, addr;
2169
2170 if (iomap->type == IOMAP_MAPPED) {
2171 addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
2172 if (addr > INT_MAX)
2173 WARN(1, "would truncate bmap result\n");
2174 else
2175 *bno = addr;
2176 }
2177 return 0;
2178}
2179
2180/* legacy ->bmap interface. 0 is the error return (!) */
2181sector_t
2182iomap_bmap(struct address_space *mapping, sector_t bno,
2183 const struct iomap_ops *ops)
2184{
2185 struct inode *inode = mapping->host;
Eric Sandeen79b3dbe2018-08-02 13:09:27 -07002186 loff_t pos = bno << inode->i_blkbits;
Christoph Hellwig89eb1902018-06-01 09:03:08 -07002187 unsigned blocksize = i_blocksize(inode);
2188
2189 if (filemap_write_and_wait(mapping))
2190 return 0;
2191
2192 bno = 0;
2193 iomap_apply(inode, pos, blocksize, 0, ops, &bno, iomap_bmap_actor);
2194 return bno;
2195}
2196EXPORT_SYMBOL_GPL(iomap_bmap);