blob: 46bba3e0af474e14ac3f4ec5fe54aec9d139de79 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_mount.h"
26#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_dinode.h"
28#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include "xfs_error.h"
31#include "xfs_rw.h"
32#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100033#include "xfs_vnodeops.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000034#include "xfs_trace.h"
Dave Chinner3ed3a432010-03-05 02:00:42 +000035#include "xfs_bmap.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110038#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/writeback.h>
40
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000041void
Nathan Scottf51623b2006-03-14 13:26:27 +110042xfs_count_page_state(
43 struct page *page,
44 int *delalloc,
Nathan Scottf51623b2006-03-14 13:26:27 +110045 int *unwritten)
46{
47 struct buffer_head *bh, *head;
48
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100049 *delalloc = *unwritten = 0;
Nathan Scottf51623b2006-03-14 13:26:27 +110050
51 bh = head = page_buffers(page);
52 do {
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100053 if (buffer_unwritten(bh))
Nathan Scottf51623b2006-03-14 13:26:27 +110054 (*unwritten) = 1;
55 else if (buffer_delay(bh))
56 (*delalloc) = 1;
57 } while ((bh = bh->b_this_page) != head);
58}
59
Christoph Hellwig6214ed42007-09-14 15:23:17 +100060STATIC struct block_device *
61xfs_find_bdev_for_inode(
Christoph Hellwig046f1682010-04-28 12:28:52 +000062 struct inode *inode)
Christoph Hellwig6214ed42007-09-14 15:23:17 +100063{
Christoph Hellwig046f1682010-04-28 12:28:52 +000064 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig6214ed42007-09-14 15:23:17 +100065 struct xfs_mount *mp = ip->i_mount;
66
Eric Sandeen71ddabb2007-11-23 16:29:42 +110067 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +100068 return mp->m_rtdev_targp->bt_bdev;
69 else
70 return mp->m_ddev_targp->bt_bdev;
71}
72
Christoph Hellwig0829c362005-09-02 16:58:49 +100073/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110074 * We're now finished for good with this ioend structure.
75 * Update the page state via the associated buffer_heads,
76 * release holds on the inode and bio, and finally free
77 * up memory. Do not use the ioend after this.
78 */
Christoph Hellwig0829c362005-09-02 16:58:49 +100079STATIC void
80xfs_destroy_ioend(
81 xfs_ioend_t *ioend)
82{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110083 struct buffer_head *bh, *next;
84
85 for (bh = ioend->io_buffer_head; bh; bh = next) {
86 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +100087 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +110088 }
Christoph Hellwig583fa582008-12-03 12:20:38 +010089
Christoph Hellwigc859cdd2011-08-23 08:28:10 +000090 if (ioend->io_iocb) {
91 if (ioend->io_isasync)
92 aio_complete(ioend->io_iocb, ioend->io_result, 0);
93 inode_dio_done(ioend->io_inode);
94 }
Christoph Hellwig4a06fd22011-08-23 08:28:13 +000095
Christoph Hellwig0829c362005-09-02 16:58:49 +100096 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99/*
Dave Chinner932640e2009-10-06 20:29:29 +0000100 * If the end of the current ioend is beyond the current EOF,
101 * return the new EOF value, otherwise zero.
102 */
103STATIC xfs_fsize_t
104xfs_ioend_new_eof(
105 xfs_ioend_t *ioend)
106{
107 xfs_inode_t *ip = XFS_I(ioend->io_inode);
108 xfs_fsize_t isize;
109 xfs_fsize_t bsize;
110
111 bsize = ioend->io_offset + ioend->io_size;
112 isize = MAX(ip->i_size, ip->i_new_size);
113 isize = MIN(isize, bsize);
114 return isize > ip->i_d.di_size ? isize : 0;
115}
116
117/*
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000118 * Fast and loose check if this write could update the on-disk inode size.
119 */
120static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
121{
122 return ioend->io_offset + ioend->io_size >
123 XFS_I(ioend->io_inode)->i_d.di_size;
124}
125
126/*
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000127 * Update on-disk file size now that data has been written to disk. The
128 * current in-memory file size is i_size. If a write is beyond eof i_new_size
129 * will be the intended file size until i_size is updated. If this write does
130 * not extend all the way to the valid file size then restrict this update to
131 * the end of the write.
132 *
133 * This function does not block as blocking on the inode lock in IO completion
134 * can lead to IO completion order dependency deadlocks.. If it can't get the
135 * inode ilock it will return EAGAIN. Callers must handle this.
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000136 */
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000137STATIC int
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000138xfs_setfilesize(
139 xfs_ioend_t *ioend)
140{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000141 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000142 xfs_fsize_t isize;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000143
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000144 if (unlikely(ioend->io_error))
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000145 return 0;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000146
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000147 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
148 return EAGAIN;
149
Dave Chinner932640e2009-10-06 20:29:29 +0000150 isize = xfs_ioend_new_eof(ioend);
151 if (isize) {
Dave Chinner55fb25d52011-07-18 03:40:19 +0000152 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000153 ip->i_d.di_size = isize;
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000154 xfs_mark_inode_dirty(ip);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000155 }
156
157 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000158 return 0;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000159}
160
161/*
Christoph Hellwig209fb872010-07-18 21:17:11 +0000162 * Schedule IO completion handling on the final put of an ioend.
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000163 *
164 * If there is no work to do we might as well call it a day and free the
165 * ioend right now.
Dave Chinnerc626d172009-04-06 18:42:11 +0200166 */
167STATIC void
168xfs_finish_ioend(
Christoph Hellwig209fb872010-07-18 21:17:11 +0000169 struct xfs_ioend *ioend)
Dave Chinnerc626d172009-04-06 18:42:11 +0200170{
171 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig209fb872010-07-18 21:17:11 +0000172 if (ioend->io_type == IO_UNWRITTEN)
173 queue_work(xfsconvertd_workqueue, &ioend->io_work);
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000174 else if (xfs_ioend_is_append(ioend))
Christoph Hellwig209fb872010-07-18 21:17:11 +0000175 queue_work(xfsdatad_workqueue, &ioend->io_work);
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000176 else
177 xfs_destroy_ioend(ioend);
Dave Chinnerc626d172009-04-06 18:42:11 +0200178 }
179}
180
181/*
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000182 * IO write completion.
183 */
184STATIC void
185xfs_end_io(
186 struct work_struct *work)
187{
188 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
189 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Dave Chinner69418932010-03-04 00:57:09 +0000190 int error = 0;
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000191
192 /*
193 * For unwritten extents we need to issue transactions to convert a
194 * range to normal written extens after the data I/O has finished.
195 */
Christoph Hellwig34a52c62010-04-28 12:28:57 +0000196 if (ioend->io_type == IO_UNWRITTEN &&
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000197 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
198
199 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
200 ioend->io_size);
201 if (error)
202 ioend->io_error = error;
203 }
204
205 /*
206 * We might have to update the on-disk file size after extending
207 * writes.
208 */
Christoph Hellwiga206c812010-12-10 08:42:20 +0000209 error = xfs_setfilesize(ioend);
210 ASSERT(!error || error == EAGAIN);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000211
212 /*
213 * If we didn't complete processing of the ioend, requeue it to the
214 * tail of the workqueue for another attempt later. Otherwise destroy
215 * it.
216 */
217 if (error == EAGAIN) {
218 atomic_inc(&ioend->io_remaining);
Christoph Hellwig209fb872010-07-18 21:17:11 +0000219 xfs_finish_ioend(ioend);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000220 /* ensure we don't spin on blocked ioends */
221 delay(1);
Christoph Hellwigfb511f22010-07-18 21:17:10 +0000222 } else {
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000223 xfs_destroy_ioend(ioend);
Christoph Hellwigfb511f22010-07-18 21:17:10 +0000224 }
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000225}
226
227/*
Christoph Hellwig209fb872010-07-18 21:17:11 +0000228 * Call IO completion handling in caller context on the final put of an ioend.
229 */
230STATIC void
231xfs_finish_ioend_sync(
232 struct xfs_ioend *ioend)
233{
234 if (atomic_dec_and_test(&ioend->io_remaining))
235 xfs_end_io(&ioend->io_work);
236}
237
238/*
Christoph Hellwig0829c362005-09-02 16:58:49 +1000239 * Allocate and initialise an IO completion structure.
240 * We need to track unwritten extent write completion here initially.
241 * We'll need to extend this for updating the ondisk inode size later
242 * (vs. incore size).
243 */
244STATIC xfs_ioend_t *
245xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100246 struct inode *inode,
247 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000248{
249 xfs_ioend_t *ioend;
250
251 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
252
253 /*
254 * Set the count to 1 initially, which will prevent an I/O
255 * completion callback from happening before we have started
256 * all the I/O from calling the completion routine too early.
257 */
258 atomic_set(&ioend->io_remaining, 1);
Christoph Hellwigc859cdd2011-08-23 08:28:10 +0000259 ioend->io_isasync = 0;
Nathan Scott7d04a332006-06-09 14:58:38 +1000260 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100261 ioend->io_list = NULL;
262 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000263 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000264 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100265 ioend->io_buffer_tail = NULL;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000266 ioend->io_offset = 0;
267 ioend->io_size = 0;
Christoph Hellwigfb511f22010-07-18 21:17:10 +0000268 ioend->io_iocb = NULL;
269 ioend->io_result = 0;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000270
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000271 INIT_WORK(&ioend->io_work, xfs_end_io);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000272 return ioend;
273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275STATIC int
276xfs_map_blocks(
277 struct inode *inode,
278 loff_t offset,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000279 struct xfs_bmbt_irec *imap,
Christoph Hellwiga206c812010-12-10 08:42:20 +0000280 int type,
281 int nonblocking)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Christoph Hellwiga206c812010-12-10 08:42:20 +0000283 struct xfs_inode *ip = XFS_I(inode);
284 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000285 ssize_t count = 1 << inode->i_blkbits;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000286 xfs_fileoff_t offset_fsb, end_fsb;
287 int error = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000288 int bmapi_flags = XFS_BMAPI_ENTIRE;
289 int nimaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Christoph Hellwiga206c812010-12-10 08:42:20 +0000291 if (XFS_FORCED_SHUTDOWN(mp))
292 return -XFS_ERROR(EIO);
293
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000294 if (type == IO_UNWRITTEN)
Christoph Hellwiga206c812010-12-10 08:42:20 +0000295 bmapi_flags |= XFS_BMAPI_IGSTATE;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000296
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000297 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
298 if (nonblocking)
299 return -XFS_ERROR(EAGAIN);
300 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000301 }
302
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000303 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
304 (ip->i_df.if_flags & XFS_IFEXTENTS));
Christoph Hellwiga206c812010-12-10 08:42:20 +0000305 ASSERT(offset <= mp->m_maxioffset);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000306
Christoph Hellwiga206c812010-12-10 08:42:20 +0000307 if (offset + count > mp->m_maxioffset)
308 count = mp->m_maxioffset - offset;
309 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
310 offset_fsb = XFS_B_TO_FSBT(mp, offset);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000311 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
312 bmapi_flags, NULL, 0, imap, &nimaps, NULL);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000313 xfs_iunlock(ip, XFS_ILOCK_SHARED);
314
Christoph Hellwiga206c812010-12-10 08:42:20 +0000315 if (error)
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000316 return -XFS_ERROR(error);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000317
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000318 if (type == IO_DELALLOC &&
319 (!nimaps || isnullstartblock(imap->br_startblock))) {
Christoph Hellwiga206c812010-12-10 08:42:20 +0000320 error = xfs_iomap_write_allocate(ip, offset, count, imap);
321 if (!error)
322 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000323 return -XFS_ERROR(error);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000324 }
325
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000326#ifdef DEBUG
327 if (type == IO_UNWRITTEN) {
328 ASSERT(nimaps);
329 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
330 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
331 }
332#endif
333 if (nimaps)
334 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
335 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
Christoph Hellwigb8f82a42009-11-14 16:17:22 +0000338STATIC int
Christoph Hellwig558e6892010-04-28 12:28:58 +0000339xfs_imap_valid(
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000340 struct inode *inode,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000341 struct xfs_bmbt_irec *imap,
Christoph Hellwig558e6892010-04-28 12:28:58 +0000342 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343{
Christoph Hellwig558e6892010-04-28 12:28:58 +0000344 offset >>= inode->i_blkbits;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000345
Christoph Hellwig558e6892010-04-28 12:28:58 +0000346 return offset >= imap->br_startoff &&
347 offset < imap->br_startoff + imap->br_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100350/*
351 * BIO completion handler for buffered IO.
352 */
Al Viro782e3b32007-10-12 07:17:47 +0100353STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100354xfs_end_bio(
355 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100356 int error)
357{
358 xfs_ioend_t *ioend = bio->bi_private;
359
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100360 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000361 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100362
363 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100364 bio->bi_private = NULL;
365 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100366 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000367
Christoph Hellwig209fb872010-07-18 21:17:11 +0000368 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100369}
370
371STATIC void
372xfs_submit_ioend_bio(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000373 struct writeback_control *wbc,
374 xfs_ioend_t *ioend,
375 struct bio *bio)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100376{
377 atomic_inc(&ioend->io_remaining);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100378 bio->bi_private = ioend;
379 bio->bi_end_io = xfs_end_bio;
380
Dave Chinner932640e2009-10-06 20:29:29 +0000381 /*
382 * If the I/O is beyond EOF we mark the inode dirty immediately
383 * but don't update the inode size until I/O completion.
384 */
385 if (xfs_ioend_new_eof(ioend))
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000386 xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
Dave Chinner932640e2009-10-06 20:29:29 +0000387
Jens Axboe721a9602011-03-09 11:56:30 +0100388 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100389}
390
391STATIC struct bio *
392xfs_alloc_ioend_bio(
393 struct buffer_head *bh)
394{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100395 int nvecs = bio_get_nr_vecs(bh->b_bdev);
Christoph Hellwig221cb252010-12-10 08:42:17 +0000396 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100397
398 ASSERT(bio->bi_private == NULL);
399 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
400 bio->bi_bdev = bh->b_bdev;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100401 return bio;
402}
403
404STATIC void
405xfs_start_buffer_writeback(
406 struct buffer_head *bh)
407{
408 ASSERT(buffer_mapped(bh));
409 ASSERT(buffer_locked(bh));
410 ASSERT(!buffer_delay(bh));
411 ASSERT(!buffer_unwritten(bh));
412
413 mark_buffer_async_write(bh);
414 set_buffer_uptodate(bh);
415 clear_buffer_dirty(bh);
416}
417
418STATIC void
419xfs_start_page_writeback(
420 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100421 int clear_dirty,
422 int buffers)
423{
424 ASSERT(PageLocked(page));
425 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100426 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100427 clear_page_dirty_for_io(page);
428 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100429 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700430 /* If no buffers on the page are to be written, finish it here */
431 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100432 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100433}
434
435static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
436{
437 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
438}
439
440/*
David Chinnerd88992f2006-01-18 13:38:12 +1100441 * Submit all of the bios for all of the ioends we have saved up, covering the
442 * initial writepage page and also any probed pages.
443 *
444 * Because we may have multiple ioends spanning a page, we need to start
445 * writeback on all the buffers before we submit them for I/O. If we mark the
446 * buffers as we got, then we can end up with a page that only has buffers
447 * marked async write and I/O complete on can occur before we mark the other
448 * buffers async write.
449 *
450 * The end result of this is that we trip a bug in end_page_writeback() because
451 * we call it twice for the one page as the code in end_buffer_async_write()
452 * assumes that all buffers on the page are started at the same time.
453 *
454 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000455 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100456 */
457STATIC void
458xfs_submit_ioend(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000459 struct writeback_control *wbc,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100460 xfs_ioend_t *ioend)
461{
David Chinnerd88992f2006-01-18 13:38:12 +1100462 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100463 xfs_ioend_t *next;
464 struct buffer_head *bh;
465 struct bio *bio;
466 sector_t lastblock = 0;
467
David Chinnerd88992f2006-01-18 13:38:12 +1100468 /* Pass 1 - start writeback */
469 do {
470 next = ioend->io_list;
Christoph Hellwig221cb252010-12-10 08:42:17 +0000471 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
David Chinnerd88992f2006-01-18 13:38:12 +1100472 xfs_start_buffer_writeback(bh);
David Chinnerd88992f2006-01-18 13:38:12 +1100473 } while ((ioend = next) != NULL);
474
475 /* Pass 2 - submit I/O */
476 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100477 do {
478 next = ioend->io_list;
479 bio = NULL;
480
481 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100482
483 if (!bio) {
484 retry:
485 bio = xfs_alloc_ioend_bio(bh);
486 } else if (bh->b_blocknr != lastblock + 1) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000487 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100488 goto retry;
489 }
490
491 if (bio_add_buffer(bio, bh) != bh->b_size) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000492 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100493 goto retry;
494 }
495
496 lastblock = bh->b_blocknr;
497 }
498 if (bio)
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000499 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwig209fb872010-07-18 21:17:11 +0000500 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100501 } while ((ioend = next) != NULL);
502}
503
504/*
505 * Cancel submission of all buffer_heads so far in this endio.
506 * Toss the endio too. Only ever called for the initial page
507 * in a writepage request, so only ever one page.
508 */
509STATIC void
510xfs_cancel_ioend(
511 xfs_ioend_t *ioend)
512{
513 xfs_ioend_t *next;
514 struct buffer_head *bh, *next_bh;
515
516 do {
517 next = ioend->io_list;
518 bh = ioend->io_buffer_head;
519 do {
520 next_bh = bh->b_private;
521 clear_buffer_async_write(bh);
522 unlock_buffer(bh);
523 } while ((bh = next_bh) != NULL);
524
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100525 mempool_free(ioend, xfs_ioend_pool);
526 } while ((ioend = next) != NULL);
527}
528
529/*
530 * Test to see if we've been building up a completion structure for
531 * earlier buffers -- if so, we try to append to this ioend if we
532 * can, otherwise we finish off any current ioend and start another.
533 * Return true if we've finished the given ioend.
534 */
535STATIC void
536xfs_add_to_ioend(
537 struct inode *inode,
538 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100539 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100540 unsigned int type,
541 xfs_ioend_t **result,
542 int need_ioend)
543{
544 xfs_ioend_t *ioend = *result;
545
546 if (!ioend || need_ioend || type != ioend->io_type) {
547 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100548
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100549 ioend = xfs_alloc_ioend(inode, type);
550 ioend->io_offset = offset;
551 ioend->io_buffer_head = bh;
552 ioend->io_buffer_tail = bh;
553 if (previous)
554 previous->io_list = ioend;
555 *result = ioend;
556 } else {
557 ioend->io_buffer_tail->b_private = bh;
558 ioend->io_buffer_tail = bh;
559 }
560
561 bh->b_private = NULL;
562 ioend->io_size += bh->b_size;
563}
564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100566xfs_map_buffer(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000567 struct inode *inode,
Nathan Scott87cbc492006-03-14 13:26:43 +1100568 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000569 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000570 xfs_off_t offset)
Nathan Scott87cbc492006-03-14 13:26:43 +1100571{
572 sector_t bn;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000573 struct xfs_mount *m = XFS_I(inode)->i_mount;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000574 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
575 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
Nathan Scott87cbc492006-03-14 13:26:43 +1100576
Christoph Hellwig207d0412010-04-28 12:28:56 +0000577 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
578 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Nathan Scott87cbc492006-03-14 13:26:43 +1100579
Christoph Hellwige5131822010-04-28 12:28:55 +0000580 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000581 ((offset - iomap_offset) >> inode->i_blkbits);
Nathan Scott87cbc492006-03-14 13:26:43 +1100582
Christoph Hellwig046f1682010-04-28 12:28:52 +0000583 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
Nathan Scott87cbc492006-03-14 13:26:43 +1100584
585 bh->b_blocknr = bn;
586 set_buffer_mapped(bh);
587}
588
589STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590xfs_map_at_offset(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000591 struct inode *inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000593 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000594 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
Christoph Hellwig207d0412010-04-28 12:28:56 +0000596 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
597 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Christoph Hellwig207d0412010-04-28 12:28:56 +0000599 xfs_map_buffer(inode, bh, imap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 set_buffer_mapped(bh);
601 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100602 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
605/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100606 * Test if a given page is suitable for writing as part of an unwritten
607 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100609STATIC int
610xfs_is_delayed_page(
611 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100612 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100615 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
617 if (page->mapping && page_has_buffers(page)) {
618 struct buffer_head *bh, *head;
619 int acceptable = 0;
620
621 bh = head = page_buffers(page);
622 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100623 if (buffer_unwritten(bh))
Christoph Hellwig34a52c62010-04-28 12:28:57 +0000624 acceptable = (type == IO_UNWRITTEN);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100625 else if (buffer_delay(bh))
Christoph Hellwiga206c812010-12-10 08:42:20 +0000626 acceptable = (type == IO_DELALLOC);
David Chinner2ddee842006-03-22 12:47:40 +1100627 else if (buffer_dirty(bh) && buffer_mapped(bh))
Christoph Hellwiga206c812010-12-10 08:42:20 +0000628 acceptable = (type == IO_OVERWRITE);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100629 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 } while ((bh = bh->b_this_page) != head);
632
633 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100634 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 }
636
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100637 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638}
639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640/*
641 * Allocate & map buffers for page given the extent map. Write it out.
642 * except for the original page of a writepage, this is called on
643 * delalloc/unwritten pages only, for the original page it is possible
644 * that the page has no mapping at all.
645 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100646STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647xfs_convert_page(
648 struct inode *inode,
649 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100650 loff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000651 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100652 xfs_ioend_t **ioendp,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000653 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100655 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100656 xfs_off_t end_offset;
657 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100658 unsigned int type;
Nathan Scott24e17b52005-05-05 13:33:20 -0700659 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100660 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100661 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100663 if (page->index != tindex)
664 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200665 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100666 goto fail;
667 if (PageWriteback(page))
668 goto fail_unlock_page;
669 if (page->mapping != inode->i_mapping)
670 goto fail_unlock_page;
671 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
672 goto fail_unlock_page;
673
Nathan Scott24e17b52005-05-05 13:33:20 -0700674 /*
675 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000676 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100677 *
678 * Derivation:
679 *
680 * End offset is the highest offset that this page should represent.
681 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
682 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
683 * hence give us the correct page_dirty count. On any other page,
684 * it will be zero and in that case we need page_dirty to be the
685 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700686 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100687 end_offset = min_t(unsigned long long,
688 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
689 i_size_read(inode));
690
Nathan Scott24e17b52005-05-05 13:33:20 -0700691 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100692 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
693 PAGE_CACHE_SIZE);
694 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
695 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700696
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 bh = head = page_buffers(page);
698 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100699 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100701 if (!buffer_uptodate(bh))
702 uptodate = 0;
703 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
704 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100706 }
707
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000708 if (buffer_unwritten(bh) || buffer_delay(bh) ||
709 buffer_mapped(bh)) {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100710 if (buffer_unwritten(bh))
Christoph Hellwig34a52c62010-04-28 12:28:57 +0000711 type = IO_UNWRITTEN;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000712 else if (buffer_delay(bh))
Christoph Hellwiga206c812010-12-10 08:42:20 +0000713 type = IO_DELALLOC;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000714 else
715 type = IO_OVERWRITE;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100716
Christoph Hellwig558e6892010-04-28 12:28:58 +0000717 if (!xfs_imap_valid(inode, imap, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100718 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100719 continue;
720 }
721
Christoph Hellwigecff71e2010-12-10 08:42:25 +0000722 lock_buffer(bh);
723 if (type != IO_OVERWRITE)
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000724 xfs_map_at_offset(inode, bh, imap, offset);
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000725 xfs_add_to_ioend(inode, bh, offset, type,
726 ioendp, done);
727
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100728 page_dirty--;
729 count++;
730 } else {
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000731 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100733 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100735 if (uptodate && bh == head)
736 SetPageUptodate(page);
737
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000738 if (count) {
Dave Chinnerefceab12010-08-24 11:44:56 +1000739 if (--wbc->nr_to_write <= 0 &&
740 wbc->sync_mode == WB_SYNC_NONE)
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000741 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 }
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000743 xfs_start_page_writeback(page, !page_dirty, count);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100744
745 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100746 fail_unlock_page:
747 unlock_page(page);
748 fail:
749 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
752/*
753 * Convert & write out a cluster of pages in the same extent as defined
754 * by mp and following the start page.
755 */
756STATIC void
757xfs_cluster_write(
758 struct inode *inode,
759 pgoff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000760 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100761 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 pgoff_t tlast)
764{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100765 struct pagevec pvec;
766 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100768 pagevec_init(&pvec, 0);
769 while (!done && tindex <= tlast) {
770 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
771
772 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100774
775 for (i = 0; i < pagevec_count(&pvec); i++) {
776 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000777 imap, ioendp, wbc);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100778 if (done)
779 break;
780 }
781
782 pagevec_release(&pvec);
783 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 }
785}
786
Dave Chinner3ed3a432010-03-05 02:00:42 +0000787STATIC void
788xfs_vm_invalidatepage(
789 struct page *page,
790 unsigned long offset)
791{
792 trace_xfs_invalidatepage(page->mapping->host, page, offset);
793 block_invalidatepage(page, offset);
794}
795
796/*
797 * If the page has delalloc buffers on it, we need to punch them out before we
798 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
799 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
800 * is done on that same region - the delalloc extent is returned when none is
801 * supposed to be there.
802 *
803 * We prevent this by truncating away the delalloc regions on the page before
804 * invalidating it. Because they are delalloc, we can do this without needing a
805 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
806 * truncation without a transaction as there is no space left for block
807 * reservation (typically why we see a ENOSPC in writeback).
808 *
809 * This is not a performance critical path, so for now just do the punching a
810 * buffer head at a time.
811 */
812STATIC void
813xfs_aops_discard_page(
814 struct page *page)
815{
816 struct inode *inode = page->mapping->host;
817 struct xfs_inode *ip = XFS_I(inode);
818 struct buffer_head *bh, *head;
819 loff_t offset = page_offset(page);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000820
Christoph Hellwiga206c812010-12-10 08:42:20 +0000821 if (!xfs_is_delayed_page(page, IO_DELALLOC))
Dave Chinner3ed3a432010-03-05 02:00:42 +0000822 goto out_invalidate;
823
Dave Chinnere8c37532010-03-15 02:36:35 +0000824 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
825 goto out_invalidate;
826
Dave Chinner4f107002011-03-07 10:00:35 +1100827 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000828 "page discard on page %p, inode 0x%llx, offset %llu.",
829 page, ip->i_ino, offset);
830
831 xfs_ilock(ip, XFS_ILOCK_EXCL);
832 bh = head = page_buffers(page);
833 do {
Dave Chinner3ed3a432010-03-05 02:00:42 +0000834 int error;
Dave Chinnerc726de42010-11-30 15:14:39 +1100835 xfs_fileoff_t start_fsb;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000836
837 if (!buffer_delay(bh))
838 goto next_buffer;
839
Dave Chinnerc726de42010-11-30 15:14:39 +1100840 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
841 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000842 if (error) {
843 /* something screwed, just bail */
Dave Chinnere8c37532010-03-15 02:36:35 +0000844 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100845 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000846 "page discard unable to remove delalloc mapping.");
Dave Chinnere8c37532010-03-15 02:36:35 +0000847 }
Dave Chinner3ed3a432010-03-05 02:00:42 +0000848 break;
849 }
850next_buffer:
Dave Chinnerc726de42010-11-30 15:14:39 +1100851 offset += 1 << inode->i_blkbits;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000852
853 } while ((bh = bh->b_this_page) != head);
854
855 xfs_iunlock(ip, XFS_ILOCK_EXCL);
856out_invalidate:
857 xfs_vm_invalidatepage(page, 0);
858 return;
859}
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861/*
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000862 * Write out a dirty page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 *
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000864 * For delalloc space on the page we need to allocate space and flush it.
865 * For unwritten space on the page we need to start the conversion to
866 * regular allocated space.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000867 * For any other dirty buffer heads on the page we should flush them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869STATIC int
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000870xfs_vm_writepage(
871 struct page *page,
872 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873{
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000874 struct inode *inode = page->mapping->host;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100875 struct buffer_head *bh, *head;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000876 struct xfs_bmbt_irec imap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100877 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 loff_t offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100879 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 __uint64_t end_offset;
Christoph Hellwigbd1556a2010-04-28 12:29:00 +0000881 pgoff_t end_index, last_index;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000882 ssize_t len;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000883 int err, imap_valid = 0, uptodate = 1;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000884 int count = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000885 int nonblocking = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000887 trace_xfs_writepage(inode, page, 0);
888
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000889 ASSERT(page_has_buffers(page));
890
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000891 /*
892 * Refuse to write the page out if we are called from reclaim context.
893 *
Christoph Hellwigd4f7a5c2010-06-28 10:34:44 -0400894 * This avoids stack overflows when called from deeply used stacks in
895 * random callers for direct reclaim or memcg reclaim. We explicitly
896 * allow reclaim from kswapd as the stack usage there is relatively low.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000897 *
898 * This should really be done by the core VM, but until that happens
899 * filesystems like XFS, btrfs and ext4 have to take care of this
900 * by themselves.
901 */
Christoph Hellwigd4f7a5c2010-06-28 10:34:44 -0400902 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000903 goto redirty;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000904
905 /*
Christoph Hellwig680a6472011-07-08 14:34:05 +0200906 * Given that we do not allow direct reclaim to call us, we should
907 * never be called while in a filesystem transaction.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000908 */
Christoph Hellwig680a6472011-07-08 14:34:05 +0200909 if (WARN_ON(current->flags & PF_FSTRANS))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000910 goto redirty;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000911
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 /* Is this page beyond the end of the file? */
913 offset = i_size_read(inode);
914 end_index = offset >> PAGE_CACHE_SHIFT;
915 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
916 if (page->index >= end_index) {
917 if ((page->index >= end_index + 1) ||
918 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000919 unlock_page(page);
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100920 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 }
922 }
923
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100924 end_offset = min_t(unsigned long long,
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000925 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
926 offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700927 len = 1 << inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700928
Nathan Scott24e17b52005-05-05 13:33:20 -0700929 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100930 offset = page_offset(page);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000931 type = IO_OVERWRITE;
932
Christoph Hellwigdbcdde32011-07-08 14:34:14 +0200933 if (wbc->sync_mode == WB_SYNC_NONE)
Christoph Hellwiga206c812010-12-10 08:42:20 +0000934 nonblocking = 1;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 do {
Christoph Hellwig6ac72482010-12-10 08:42:18 +0000937 int new_ioend = 0;
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 if (offset >= end_offset)
940 break;
941 if (!buffer_uptodate(bh))
942 uptodate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Eric Sandeen3d9b02e2010-06-24 09:45:30 +1000944 /*
Christoph Hellwigece413f2010-11-10 21:39:11 +0000945 * set_page_dirty dirties all buffers in a page, independent
946 * of their state. The dirty state however is entirely
947 * meaningless for holes (!mapped && uptodate), so skip
948 * buffers covering holes here.
Eric Sandeen3d9b02e2010-06-24 09:45:30 +1000949 */
950 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
Eric Sandeen3d9b02e2010-06-24 09:45:30 +1000951 imap_valid = 0;
952 continue;
953 }
954
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000955 if (buffer_unwritten(bh)) {
956 if (type != IO_UNWRITTEN) {
957 type = IO_UNWRITTEN;
958 imap_valid = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100959 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000960 } else if (buffer_delay(bh)) {
961 if (type != IO_DELALLOC) {
962 type = IO_DELALLOC;
963 imap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 }
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000965 } else if (buffer_uptodate(bh)) {
Christoph Hellwiga206c812010-12-10 08:42:20 +0000966 if (type != IO_OVERWRITE) {
967 type = IO_OVERWRITE;
Christoph Hellwig85da94c2010-12-10 08:42:16 +0000968 imap_valid = 0;
969 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000970 } else {
971 if (PageUptodate(page)) {
972 ASSERT(buffer_mapped(bh));
973 imap_valid = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +1100974 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000975 continue;
976 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000978 if (imap_valid)
979 imap_valid = xfs_imap_valid(inode, &imap, offset);
980 if (!imap_valid) {
981 /*
982 * If we didn't have a valid mapping then we need to
983 * put the new mapping into a separate ioend structure.
984 * This ensures non-contiguous extents always have
985 * separate ioends, which is particularly important
986 * for unwritten extent conversion at I/O completion
987 * time.
988 */
989 new_ioend = 1;
990 err = xfs_map_blocks(inode, offset, &imap, type,
991 nonblocking);
992 if (err)
993 goto error;
994 imap_valid = xfs_imap_valid(inode, &imap, offset);
995 }
996 if (imap_valid) {
Christoph Hellwigecff71e2010-12-10 08:42:25 +0000997 lock_buffer(bh);
998 if (type != IO_OVERWRITE)
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000999 xfs_map_at_offset(inode, bh, &imap, offset);
1000 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1001 new_ioend);
1002 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001004
1005 if (!iohead)
1006 iohead = ioend;
1007
1008 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 if (uptodate && bh == head)
1011 SetPageUptodate(page);
1012
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001013 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Christoph Hellwig558e6892010-04-28 12:28:58 +00001015 if (ioend && imap_valid) {
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001016 xfs_off_t end_index;
Christoph Hellwig8699bb02010-04-28 12:28:54 +00001017
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001018 end_index = imap.br_startoff + imap.br_blockcount;
1019
1020 /* to bytes */
1021 end_index <<= inode->i_blkbits;
1022
1023 /* to pages */
1024 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1025
1026 /* check against file size */
1027 if (end_index > last_index)
1028 end_index = last_index;
1029
Christoph Hellwig207d0412010-04-28 12:28:56 +00001030 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +00001031 wbc, end_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 }
1033
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001034 if (iohead)
Christoph Hellwig06342cf2009-10-30 09:09:15 +00001035 xfs_submit_ioend(wbc, iohead);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001036
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001037 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
1039error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001040 if (iohead)
1041 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Christoph Hellwigb5420f22010-08-24 11:47:51 +10001043 if (err == -EAGAIN)
1044 goto redirty;
1045
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001046 xfs_aops_discard_page(page);
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001047 ClearPageUptodate(page);
1048 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 return err;
Nathan Scottf51623b2006-03-14 13:26:27 +11001050
Christoph Hellwigb5420f22010-08-24 11:47:51 +10001051redirty:
Nathan Scottf51623b2006-03-14 13:26:27 +11001052 redirty_page_for_writepage(wbc, page);
1053 unlock_page(page);
1054 return 0;
Nathan Scottf51623b2006-03-14 13:26:27 +11001055}
1056
Nathan Scott7d4fb402006-06-09 15:27:16 +10001057STATIC int
1058xfs_vm_writepages(
1059 struct address_space *mapping,
1060 struct writeback_control *wbc)
1061{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001062 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001063 return generic_writepages(mapping, wbc);
1064}
1065
Nathan Scottf51623b2006-03-14 13:26:27 +11001066/*
1067 * Called to move a page into cleanable state - and from there
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001068 * to be released. The page should already be clean. We always
Nathan Scottf51623b2006-03-14 13:26:27 +11001069 * have buffer heads in this call.
1070 *
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001071 * Returns 1 if the page is ok to release, 0 otherwise.
Nathan Scottf51623b2006-03-14 13:26:27 +11001072 */
1073STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001074xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001075 struct page *page,
1076 gfp_t gfp_mask)
1077{
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001078 int delalloc, unwritten;
Nathan Scottf51623b2006-03-14 13:26:27 +11001079
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001080 trace_xfs_releasepage(page->mapping->host, page, 0);
Nathan Scott238f4c52006-03-17 17:26:25 +11001081
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001082 xfs_count_page_state(page, &delalloc, &unwritten);
Nathan Scottf51623b2006-03-14 13:26:27 +11001083
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001084 if (WARN_ON(delalloc))
1085 return 0;
1086 if (WARN_ON(unwritten))
Nathan Scottf51623b2006-03-14 13:26:27 +11001087 return 0;
1088
Nathan Scottf51623b2006-03-14 13:26:27 +11001089 return try_to_free_buffers(page);
1090}
1091
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001093__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 struct inode *inode,
1095 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 struct buffer_head *bh_result,
1097 int create,
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001098 int direct)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099{
Christoph Hellwiga206c812010-12-10 08:42:20 +00001100 struct xfs_inode *ip = XFS_I(inode);
1101 struct xfs_mount *mp = ip->i_mount;
1102 xfs_fileoff_t offset_fsb, end_fsb;
1103 int error = 0;
1104 int lockmode = 0;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001105 struct xfs_bmbt_irec imap;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001106 int nimaps = 1;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001107 xfs_off_t offset;
1108 ssize_t size;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001109 int new = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001110
1111 if (XFS_FORCED_SHUTDOWN(mp))
1112 return -XFS_ERROR(EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001114 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001115 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1116 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001117
1118 if (!create && direct && offset >= i_size_read(inode))
1119 return 0;
1120
Christoph Hellwiga206c812010-12-10 08:42:20 +00001121 if (create) {
1122 lockmode = XFS_ILOCK_EXCL;
1123 xfs_ilock(ip, lockmode);
1124 } else {
1125 lockmode = xfs_ilock_map_shared(ip);
1126 }
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001127
Christoph Hellwiga206c812010-12-10 08:42:20 +00001128 ASSERT(offset <= mp->m_maxioffset);
1129 if (offset + size > mp->m_maxioffset)
1130 size = mp->m_maxioffset - offset;
1131 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1132 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1133
1134 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
1135 XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 if (error)
Christoph Hellwiga206c812010-12-10 08:42:20 +00001137 goto out_unlock;
1138
1139 if (create &&
1140 (!nimaps ||
1141 (imap.br_startblock == HOLESTARTBLOCK ||
1142 imap.br_startblock == DELAYSTARTBLOCK))) {
1143 if (direct) {
1144 error = xfs_iomap_write_direct(ip, offset, size,
1145 &imap, nimaps);
1146 } else {
1147 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1148 }
1149 if (error)
1150 goto out_unlock;
1151
1152 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1153 } else if (nimaps) {
1154 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1155 } else {
1156 trace_xfs_get_blocks_notfound(ip, offset, size);
1157 goto out_unlock;
1158 }
1159 xfs_iunlock(ip, lockmode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160
Christoph Hellwig207d0412010-04-28 12:28:56 +00001161 if (imap.br_startblock != HOLESTARTBLOCK &&
1162 imap.br_startblock != DELAYSTARTBLOCK) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001163 /*
1164 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 * the read case (treat as if we're reading into a hole).
1166 */
Christoph Hellwig207d0412010-04-28 12:28:56 +00001167 if (create || !ISUNWRITTEN(&imap))
1168 xfs_map_buffer(inode, bh_result, &imap, offset);
1169 if (create && ISUNWRITTEN(&imap)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 if (direct)
1171 bh_result->b_private = inode;
1172 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 }
1174 }
1175
Nathan Scottc2536662006-03-29 10:44:40 +10001176 /*
1177 * If this is a realtime file, data may be on a different device.
1178 * to that pointed to from the buffer_head b_bdev currently.
1179 */
Christoph Hellwig046f1682010-04-28 12:28:52 +00001180 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
Nathan Scottc2536662006-03-29 10:44:40 +10001182 /*
David Chinner549054a2007-02-10 18:36:35 +11001183 * If we previously allocated a block out beyond eof and we are now
1184 * coming back to use it then we will need to flag it as new even if it
1185 * has a disk address.
1186 *
1187 * With sub-block writes into unwritten extents we also need to mark
1188 * the buffer as new so that the unwritten parts of the buffer gets
1189 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 */
1191 if (create &&
1192 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001193 (offset >= i_size_read(inode)) ||
Christoph Hellwig207d0412010-04-28 12:28:56 +00001194 (new || ISUNWRITTEN(&imap))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Christoph Hellwig207d0412010-04-28 12:28:56 +00001197 if (imap.br_startblock == DELAYSTARTBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 BUG_ON(direct);
1199 if (create) {
1200 set_buffer_uptodate(bh_result);
1201 set_buffer_mapped(bh_result);
1202 set_buffer_delay(bh_result);
1203 }
1204 }
1205
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001206 /*
1207 * If this is O_DIRECT or the mpage code calling tell them how large
1208 * the mapping is, so that we can avoid repeated get_blocks calls.
1209 */
Nathan Scottc2536662006-03-29 10:44:40 +10001210 if (direct || size > (1 << inode->i_blkbits)) {
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001211 xfs_off_t mapping_size;
Christoph Hellwig9563b3d2010-04-28 12:28:53 +00001212
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001213 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1214 mapping_size <<= inode->i_blkbits;
1215
1216 ASSERT(mapping_size > 0);
1217 if (mapping_size > size)
1218 mapping_size = size;
1219 if (mapping_size > LONG_MAX)
1220 mapping_size = LONG_MAX;
1221
1222 bh_result->b_size = mapping_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 }
1224
1225 return 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001226
1227out_unlock:
1228 xfs_iunlock(ip, lockmode);
1229 return -error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230}
1231
1232int
Nathan Scottc2536662006-03-29 10:44:40 +10001233xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 struct inode *inode,
1235 sector_t iblock,
1236 struct buffer_head *bh_result,
1237 int create)
1238{
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001239 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240}
1241
1242STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001243xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 struct inode *inode,
1245 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 struct buffer_head *bh_result,
1247 int create)
1248{
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001249 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250}
1251
Christoph Hellwig209fb872010-07-18 21:17:11 +00001252/*
1253 * Complete a direct I/O write request.
1254 *
1255 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1256 * need to issue a transaction to convert the range from unwritten to written
1257 * extents. In case this is regular synchronous I/O we just call xfs_end_io
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001258 * to do this and we are done. But in case this was a successful AIO
Christoph Hellwig209fb872010-07-18 21:17:11 +00001259 * request this handler is called from interrupt context, from which we
1260 * can't start transactions. In that case offload the I/O completion to
1261 * the workqueues we also use for buffered I/O completion.
1262 */
Christoph Hellwigf0973862005-09-05 08:22:52 +10001263STATIC void
Christoph Hellwig209fb872010-07-18 21:17:11 +00001264xfs_end_io_direct_write(
1265 struct kiocb *iocb,
1266 loff_t offset,
1267 ssize_t size,
1268 void *private,
1269 int ret,
1270 bool is_async)
Christoph Hellwigf0973862005-09-05 08:22:52 +10001271{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001272 struct xfs_ioend *ioend = iocb->private;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001273
1274 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001275 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001276 * completion handler was called. Thus we need to protect
1277 * against double-freeing.
1278 */
1279 iocb->private = NULL;
Christoph Hellwig40e2e972010-07-18 21:17:09 +00001280
Christoph Hellwig209fb872010-07-18 21:17:11 +00001281 ioend->io_offset = offset;
1282 ioend->io_size = size;
Christoph Hellwigc859cdd2011-08-23 08:28:10 +00001283 ioend->io_iocb = iocb;
1284 ioend->io_result = ret;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001285 if (private && size > 0)
1286 ioend->io_type = IO_UNWRITTEN;
1287
1288 if (is_async) {
Christoph Hellwigc859cdd2011-08-23 08:28:10 +00001289 ioend->io_isasync = 1;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001290 xfs_finish_ioend(ioend);
1291 } else {
1292 xfs_finish_ioend_sync(ioend);
1293 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001294}
1295
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001297xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 int rw,
1299 struct kiocb *iocb,
1300 const struct iovec *iov,
1301 loff_t offset,
1302 unsigned long nr_segs)
1303{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001304 struct inode *inode = iocb->ki_filp->f_mapping->host;
1305 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1306 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
Christoph Hellwig209fb872010-07-18 21:17:11 +00001308 if (rw & WRITE) {
Christoph Hellwiga206c812010-12-10 08:42:20 +00001309 iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001311 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1312 offset, nr_segs,
1313 xfs_get_blocks_direct,
1314 xfs_end_io_direct_write, NULL, 0);
Christoph Hellwig209fb872010-07-18 21:17:11 +00001315 if (ret != -EIOCBQUEUED && iocb->private)
1316 xfs_destroy_ioend(iocb->private);
1317 } else {
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001318 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1319 offset, nr_segs,
1320 xfs_get_blocks_direct,
1321 NULL, NULL, 0);
Christoph Hellwig209fb872010-07-18 21:17:11 +00001322 }
Christoph Hellwig5fe878a2009-12-15 16:47:50 -08001323
Christoph Hellwigf0973862005-09-05 08:22:52 +10001324 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325}
1326
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001327STATIC void
1328xfs_vm_write_failed(
1329 struct address_space *mapping,
1330 loff_t to)
1331{
1332 struct inode *inode = mapping->host;
1333
1334 if (to > inode->i_size) {
Dave Chinnerc726de42010-11-30 15:14:39 +11001335 /*
1336 * punch out the delalloc blocks we have already allocated. We
1337 * don't call xfs_setattr() to do this as we may be in the
1338 * middle of a multi-iovec write and so the vfs inode->i_size
1339 * will not match the xfs ip->i_size and so it will zero too
1340 * much. Hence we jus truncate the page cache to zero what is
1341 * necessary and punch the delalloc blocks directly.
1342 */
1343 struct xfs_inode *ip = XFS_I(inode);
1344 xfs_fileoff_t start_fsb;
1345 xfs_fileoff_t end_fsb;
1346 int error;
1347
1348 truncate_pagecache(inode, to, inode->i_size);
1349
1350 /*
1351 * Check if there are any blocks that are outside of i_size
1352 * that need to be trimmed back.
1353 */
1354 start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
1355 end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
1356 if (end_fsb <= start_fsb)
1357 return;
1358
1359 xfs_ilock(ip, XFS_ILOCK_EXCL);
1360 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1361 end_fsb - start_fsb);
1362 if (error) {
1363 /* something screwed, just bail */
1364 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner4f107002011-03-07 10:00:35 +11001365 xfs_alert(ip->i_mount,
Dave Chinnerc726de42010-11-30 15:14:39 +11001366 "xfs_vm_write_failed: unable to clean up ino %lld",
1367 ip->i_ino);
1368 }
1369 }
1370 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001371 }
1372}
1373
Nathan Scottf51623b2006-03-14 13:26:27 +11001374STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001375xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001376 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001377 struct address_space *mapping,
1378 loff_t pos,
1379 unsigned len,
1380 unsigned flags,
1381 struct page **pagep,
1382 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001383{
Christoph Hellwig155130a2010-06-04 11:29:58 +02001384 int ret;
1385
1386 ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
1387 pagep, xfs_get_blocks);
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001388 if (unlikely(ret))
1389 xfs_vm_write_failed(mapping, pos + len);
1390 return ret;
1391}
Christoph Hellwig155130a2010-06-04 11:29:58 +02001392
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001393STATIC int
1394xfs_vm_write_end(
1395 struct file *file,
1396 struct address_space *mapping,
1397 loff_t pos,
1398 unsigned len,
1399 unsigned copied,
1400 struct page *page,
1401 void *fsdata)
1402{
1403 int ret;
1404
1405 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1406 if (unlikely(ret < len))
1407 xfs_vm_write_failed(mapping, pos + len);
Christoph Hellwig155130a2010-06-04 11:29:58 +02001408 return ret;
Nathan Scottf51623b2006-03-14 13:26:27 +11001409}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
1411STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001412xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 struct address_space *mapping,
1414 sector_t block)
1415{
1416 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001417 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001419 trace_xfs_vm_bmap(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001420 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001421 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001422 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001423 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424}
1425
1426STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001427xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 struct file *unused,
1429 struct page *page)
1430{
Nathan Scottc2536662006-03-29 10:44:40 +10001431 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433
1434STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001435xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 struct file *unused,
1437 struct address_space *mapping,
1438 struct list_head *pages,
1439 unsigned nr_pages)
1440{
Nathan Scottc2536662006-03-29 10:44:40 +10001441 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442}
1443
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001444const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001445 .readpage = xfs_vm_readpage,
1446 .readpages = xfs_vm_readpages,
1447 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001448 .writepages = xfs_vm_writepages,
Nathan Scott238f4c52006-03-17 17:26:25 +11001449 .releasepage = xfs_vm_releasepage,
1450 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001451 .write_begin = xfs_vm_write_begin,
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001452 .write_end = xfs_vm_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001453 .bmap = xfs_vm_bmap,
1454 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001455 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02001456 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001457 .error_remove_page = generic_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458};