blob: e1ff0770784e6a95aa5ce4d4594b58eb77952e8e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "xfs.h"
Nathan Scotta844f452005-11-02 14:38:42 +110019#include "xfs_bit.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include "xfs_log.h"
Nathan Scotta844f452005-11-02 14:38:42 +110021#include "xfs_inum.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include "xfs_sb.h"
Nathan Scotta844f452005-11-02 14:38:42 +110023#include "xfs_ag.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include "xfs_trans.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include "xfs_mount.h"
26#include "xfs_bmap_btree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "xfs_dinode.h"
28#include "xfs_inode.h"
Nathan Scotta844f452005-11-02 14:38:42 +110029#include "xfs_alloc.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include "xfs_error.h"
31#include "xfs_rw.h"
32#include "xfs_iomap.h"
Christoph Hellwig739bfb22007-08-29 10:58:01 +100033#include "xfs_vnodeops.h"
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000034#include "xfs_trace.h"
Dave Chinner3ed3a432010-03-05 02:00:42 +000035#include "xfs_bmap.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mpage.h>
Christoph Hellwig10ce4442006-01-11 20:48:14 +110038#include <linux/pagevec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/writeback.h>
40
Christoph Hellwig25e41b32008-12-03 12:20:39 +010041
42/*
43 * Prime number of hash buckets since address is used as the key.
44 */
45#define NVSYNC 37
46#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC])
47static wait_queue_head_t xfs_ioend_wq[NVSYNC];
48
49void __init
50xfs_ioend_init(void)
51{
52 int i;
53
54 for (i = 0; i < NVSYNC; i++)
55 init_waitqueue_head(&xfs_ioend_wq[i]);
56}
57
58void
59xfs_ioend_wait(
60 xfs_inode_t *ip)
61{
62 wait_queue_head_t *wq = to_ioend_wq(ip);
63
64 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0));
65}
66
67STATIC void
68xfs_ioend_wake(
69 xfs_inode_t *ip)
70{
71 if (atomic_dec_and_test(&ip->i_iocount))
72 wake_up(to_ioend_wq(ip));
73}
74
Christoph Hellwig0b1b2132009-12-14 23:14:59 +000075void
Nathan Scottf51623b2006-03-14 13:26:27 +110076xfs_count_page_state(
77 struct page *page,
78 int *delalloc,
Nathan Scottf51623b2006-03-14 13:26:27 +110079 int *unwritten)
80{
81 struct buffer_head *bh, *head;
82
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100083 *delalloc = *unwritten = 0;
Nathan Scottf51623b2006-03-14 13:26:27 +110084
85 bh = head = page_buffers(page);
86 do {
Christoph Hellwig20cb52e2010-06-24 09:46:01 +100087 if (buffer_unwritten(bh))
Nathan Scottf51623b2006-03-14 13:26:27 +110088 (*unwritten) = 1;
89 else if (buffer_delay(bh))
90 (*delalloc) = 1;
91 } while ((bh = bh->b_this_page) != head);
92}
93
Christoph Hellwig6214ed42007-09-14 15:23:17 +100094STATIC struct block_device *
95xfs_find_bdev_for_inode(
Christoph Hellwig046f1682010-04-28 12:28:52 +000096 struct inode *inode)
Christoph Hellwig6214ed42007-09-14 15:23:17 +100097{
Christoph Hellwig046f1682010-04-28 12:28:52 +000098 struct xfs_inode *ip = XFS_I(inode);
Christoph Hellwig6214ed42007-09-14 15:23:17 +100099 struct xfs_mount *mp = ip->i_mount;
100
Eric Sandeen71ddabb2007-11-23 16:29:42 +1100101 if (XFS_IS_REALTIME_INODE(ip))
Christoph Hellwig6214ed42007-09-14 15:23:17 +1000102 return mp->m_rtdev_targp->bt_bdev;
103 else
104 return mp->m_ddev_targp->bt_bdev;
105}
106
Christoph Hellwig0829c362005-09-02 16:58:49 +1000107/*
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100108 * We're now finished for good with this ioend structure.
109 * Update the page state via the associated buffer_heads,
110 * release holds on the inode and bio, and finally free
111 * up memory. Do not use the ioend after this.
112 */
Christoph Hellwig0829c362005-09-02 16:58:49 +1000113STATIC void
114xfs_destroy_ioend(
115 xfs_ioend_t *ioend)
116{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100117 struct buffer_head *bh, *next;
Christoph Hellwig583fa582008-12-03 12:20:38 +0100118 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100119
120 for (bh = ioend->io_buffer_head; bh; bh = next) {
121 next = bh->b_private;
Nathan Scott7d04a332006-06-09 14:58:38 +1000122 bh->b_end_io(bh, !ioend->io_error);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100123 }
Christoph Hellwig583fa582008-12-03 12:20:38 +0100124
Christoph Hellwigc859cdd2011-08-23 08:28:10 +0000125 if (ioend->io_iocb) {
126 if (ioend->io_isasync)
127 aio_complete(ioend->io_iocb, ioend->io_result, 0);
128 inode_dio_done(ioend->io_inode);
129 }
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100130 xfs_ioend_wake(ip);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000131 mempool_free(ioend, xfs_ioend_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
134/*
Dave Chinner932640e2009-10-06 20:29:29 +0000135 * If the end of the current ioend is beyond the current EOF,
136 * return the new EOF value, otherwise zero.
137 */
138STATIC xfs_fsize_t
139xfs_ioend_new_eof(
140 xfs_ioend_t *ioend)
141{
142 xfs_inode_t *ip = XFS_I(ioend->io_inode);
143 xfs_fsize_t isize;
144 xfs_fsize_t bsize;
145
146 bsize = ioend->io_offset + ioend->io_size;
147 isize = MAX(ip->i_size, ip->i_new_size);
148 isize = MIN(isize, bsize);
149 return isize > ip->i_d.di_size ? isize : 0;
150}
151
152/*
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000153 * Fast and loose check if this write could update the on-disk inode size.
154 */
155static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
156{
157 return ioend->io_offset + ioend->io_size >
158 XFS_I(ioend->io_inode)->i_d.di_size;
159}
160
161/*
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000162 * Update on-disk file size now that data has been written to disk. The
163 * current in-memory file size is i_size. If a write is beyond eof i_new_size
164 * will be the intended file size until i_size is updated. If this write does
165 * not extend all the way to the valid file size then restrict this update to
166 * the end of the write.
167 *
168 * This function does not block as blocking on the inode lock in IO completion
169 * can lead to IO completion order dependency deadlocks.. If it can't get the
170 * inode ilock it will return EAGAIN. Callers must handle this.
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000171 */
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000172STATIC int
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000173xfs_setfilesize(
174 xfs_ioend_t *ioend)
175{
Christoph Hellwigb677c212007-08-29 11:46:28 +1000176 xfs_inode_t *ip = XFS_I(ioend->io_inode);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000177 xfs_fsize_t isize;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000178
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000179 if (unlikely(ioend->io_error))
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000180 return 0;
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000181
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000182 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
183 return EAGAIN;
184
Dave Chinner932640e2009-10-06 20:29:29 +0000185 isize = xfs_ioend_new_eof(ioend);
186 if (isize) {
Dave Chinner55fb25d52011-07-18 03:40:19 +0000187 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000188 ip->i_d.di_size = isize;
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000189 xfs_mark_inode_dirty(ip);
Lachlan McIlroyba87ea62007-05-08 13:49:46 +1000190 }
191
192 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000193 return 0;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000194}
195
196/*
Christoph Hellwig209fb872010-07-18 21:17:11 +0000197 * Schedule IO completion handling on the final put of an ioend.
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000198 *
199 * If there is no work to do we might as well call it a day and free the
200 * ioend right now.
Dave Chinnerc626d172009-04-06 18:42:11 +0200201 */
202STATIC void
203xfs_finish_ioend(
Christoph Hellwig209fb872010-07-18 21:17:11 +0000204 struct xfs_ioend *ioend)
Dave Chinnerc626d172009-04-06 18:42:11 +0200205{
206 if (atomic_dec_and_test(&ioend->io_remaining)) {
Christoph Hellwig209fb872010-07-18 21:17:11 +0000207 if (ioend->io_type == IO_UNWRITTEN)
208 queue_work(xfsconvertd_workqueue, &ioend->io_work);
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000209 else if (xfs_ioend_is_append(ioend))
Christoph Hellwig209fb872010-07-18 21:17:11 +0000210 queue_work(xfsdatad_workqueue, &ioend->io_work);
Christoph Hellwigfc0063c2011-08-23 08:28:11 +0000211 else
212 xfs_destroy_ioend(ioend);
Dave Chinnerc626d172009-04-06 18:42:11 +0200213 }
214}
215
216/*
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000217 * IO write completion.
218 */
219STATIC void
220xfs_end_io(
221 struct work_struct *work)
222{
223 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
224 struct xfs_inode *ip = XFS_I(ioend->io_inode);
Dave Chinner69418932010-03-04 00:57:09 +0000225 int error = 0;
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000226
227 /*
228 * For unwritten extents we need to issue transactions to convert a
229 * range to normal written extens after the data I/O has finished.
230 */
Christoph Hellwig34a52c62010-04-28 12:28:57 +0000231 if (ioend->io_type == IO_UNWRITTEN &&
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000232 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) {
233
234 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
235 ioend->io_size);
236 if (error)
237 ioend->io_error = error;
238 }
239
240 /*
241 * We might have to update the on-disk file size after extending
242 * writes.
243 */
Christoph Hellwiga206c812010-12-10 08:42:20 +0000244 error = xfs_setfilesize(ioend);
245 ASSERT(!error || error == EAGAIN);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000246
247 /*
248 * If we didn't complete processing of the ioend, requeue it to the
249 * tail of the workqueue for another attempt later. Otherwise destroy
250 * it.
251 */
252 if (error == EAGAIN) {
253 atomic_inc(&ioend->io_remaining);
Christoph Hellwig209fb872010-07-18 21:17:11 +0000254 xfs_finish_ioend(ioend);
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000255 /* ensure we don't spin on blocked ioends */
256 delay(1);
Christoph Hellwigfb511f22010-07-18 21:17:10 +0000257 } else {
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000258 xfs_destroy_ioend(ioend);
Christoph Hellwigfb511f22010-07-18 21:17:10 +0000259 }
Dave Chinner77d7a0c2010-02-17 05:36:29 +0000260}
261
262/*
Christoph Hellwig209fb872010-07-18 21:17:11 +0000263 * Call IO completion handling in caller context on the final put of an ioend.
264 */
265STATIC void
266xfs_finish_ioend_sync(
267 struct xfs_ioend *ioend)
268{
269 if (atomic_dec_and_test(&ioend->io_remaining))
270 xfs_end_io(&ioend->io_work);
271}
272
273/*
Christoph Hellwig0829c362005-09-02 16:58:49 +1000274 * Allocate and initialise an IO completion structure.
275 * We need to track unwritten extent write completion here initially.
276 * We'll need to extend this for updating the ondisk inode size later
277 * (vs. incore size).
278 */
279STATIC xfs_ioend_t *
280xfs_alloc_ioend(
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100281 struct inode *inode,
282 unsigned int type)
Christoph Hellwig0829c362005-09-02 16:58:49 +1000283{
284 xfs_ioend_t *ioend;
285
286 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
287
288 /*
289 * Set the count to 1 initially, which will prevent an I/O
290 * completion callback from happening before we have started
291 * all the I/O from calling the completion routine too early.
292 */
293 atomic_set(&ioend->io_remaining, 1);
Christoph Hellwigc859cdd2011-08-23 08:28:10 +0000294 ioend->io_isasync = 0;
Nathan Scott7d04a332006-06-09 14:58:38 +1000295 ioend->io_error = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100296 ioend->io_list = NULL;
297 ioend->io_type = type;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000298 ioend->io_inode = inode;
Christoph Hellwigc1a073b2005-09-05 08:23:35 +1000299 ioend->io_buffer_head = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100300 ioend->io_buffer_tail = NULL;
Christoph Hellwigb677c212007-08-29 11:46:28 +1000301 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000302 ioend->io_offset = 0;
303 ioend->io_size = 0;
Christoph Hellwigfb511f22010-07-18 21:17:10 +0000304 ioend->io_iocb = NULL;
305 ioend->io_result = 0;
Christoph Hellwig0829c362005-09-02 16:58:49 +1000306
Christoph Hellwig5ec4fab2009-10-30 09:11:47 +0000307 INIT_WORK(&ioend->io_work, xfs_end_io);
Christoph Hellwig0829c362005-09-02 16:58:49 +1000308 return ioend;
309}
310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311STATIC int
312xfs_map_blocks(
313 struct inode *inode,
314 loff_t offset,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000315 struct xfs_bmbt_irec *imap,
Christoph Hellwiga206c812010-12-10 08:42:20 +0000316 int type,
317 int nonblocking)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Christoph Hellwiga206c812010-12-10 08:42:20 +0000319 struct xfs_inode *ip = XFS_I(inode);
320 struct xfs_mount *mp = ip->i_mount;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000321 ssize_t count = 1 << inode->i_blkbits;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000322 xfs_fileoff_t offset_fsb, end_fsb;
323 int error = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000324 int bmapi_flags = XFS_BMAPI_ENTIRE;
325 int nimaps = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
Christoph Hellwiga206c812010-12-10 08:42:20 +0000327 if (XFS_FORCED_SHUTDOWN(mp))
328 return -XFS_ERROR(EIO);
329
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000330 if (type == IO_UNWRITTEN)
Christoph Hellwiga206c812010-12-10 08:42:20 +0000331 bmapi_flags |= XFS_BMAPI_IGSTATE;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000332
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000333 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
334 if (nonblocking)
335 return -XFS_ERROR(EAGAIN);
336 xfs_ilock(ip, XFS_ILOCK_SHARED);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000337 }
338
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000339 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
340 (ip->i_df.if_flags & XFS_IFEXTENTS));
Christoph Hellwiga206c812010-12-10 08:42:20 +0000341 ASSERT(offset <= mp->m_maxioffset);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000342
Christoph Hellwiga206c812010-12-10 08:42:20 +0000343 if (offset + count > mp->m_maxioffset)
344 count = mp->m_maxioffset - offset;
345 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
346 offset_fsb = XFS_B_TO_FSBT(mp, offset);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000347 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
348 bmapi_flags, NULL, 0, imap, &nimaps, NULL);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000349 xfs_iunlock(ip, XFS_ILOCK_SHARED);
350
Christoph Hellwiga206c812010-12-10 08:42:20 +0000351 if (error)
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000352 return -XFS_ERROR(error);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000353
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000354 if (type == IO_DELALLOC &&
355 (!nimaps || isnullstartblock(imap->br_startblock))) {
Christoph Hellwiga206c812010-12-10 08:42:20 +0000356 error = xfs_iomap_write_allocate(ip, offset, count, imap);
357 if (!error)
358 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000359 return -XFS_ERROR(error);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000360 }
361
Christoph Hellwig8ff29572010-12-10 08:42:21 +0000362#ifdef DEBUG
363 if (type == IO_UNWRITTEN) {
364 ASSERT(nimaps);
365 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
366 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
367 }
368#endif
369 if (nimaps)
370 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
371 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372}
373
Christoph Hellwigb8f82a42009-11-14 16:17:22 +0000374STATIC int
Christoph Hellwig558e6892010-04-28 12:28:58 +0000375xfs_imap_valid(
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000376 struct inode *inode,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000377 struct xfs_bmbt_irec *imap,
Christoph Hellwig558e6892010-04-28 12:28:58 +0000378 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379{
Christoph Hellwig558e6892010-04-28 12:28:58 +0000380 offset >>= inode->i_blkbits;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000381
Christoph Hellwig558e6892010-04-28 12:28:58 +0000382 return offset >= imap->br_startoff &&
383 offset < imap->br_startoff + imap->br_blockcount;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384}
385
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100386/*
387 * BIO completion handler for buffered IO.
388 */
Al Viro782e3b32007-10-12 07:17:47 +0100389STATIC void
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100390xfs_end_bio(
391 struct bio *bio,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100392 int error)
393{
394 xfs_ioend_t *ioend = bio->bi_private;
395
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100396 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
Nathan Scott7d04a332006-06-09 14:58:38 +1000397 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100398
399 /* Toss bio and pass work off to an xfsdatad thread */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100400 bio->bi_private = NULL;
401 bio->bi_end_io = NULL;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100402 bio_put(bio);
Nathan Scott7d04a332006-06-09 14:58:38 +1000403
Christoph Hellwig209fb872010-07-18 21:17:11 +0000404 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100405}
406
407STATIC void
408xfs_submit_ioend_bio(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000409 struct writeback_control *wbc,
410 xfs_ioend_t *ioend,
411 struct bio *bio)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100412{
413 atomic_inc(&ioend->io_remaining);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100414 bio->bi_private = ioend;
415 bio->bi_end_io = xfs_end_bio;
416
Dave Chinner932640e2009-10-06 20:29:29 +0000417 /*
418 * If the I/O is beyond EOF we mark the inode dirty immediately
419 * but don't update the inode size until I/O completion.
420 */
421 if (xfs_ioend_new_eof(ioend))
Christoph Hellwig66d834e2010-02-15 09:44:49 +0000422 xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
Dave Chinner932640e2009-10-06 20:29:29 +0000423
Jens Axboe721a9602011-03-09 11:56:30 +0100424 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100425}
426
427STATIC struct bio *
428xfs_alloc_ioend_bio(
429 struct buffer_head *bh)
430{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100431 int nvecs = bio_get_nr_vecs(bh->b_bdev);
Christoph Hellwig221cb252010-12-10 08:42:17 +0000432 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100433
434 ASSERT(bio->bi_private == NULL);
435 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
436 bio->bi_bdev = bh->b_bdev;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100437 return bio;
438}
439
440STATIC void
441xfs_start_buffer_writeback(
442 struct buffer_head *bh)
443{
444 ASSERT(buffer_mapped(bh));
445 ASSERT(buffer_locked(bh));
446 ASSERT(!buffer_delay(bh));
447 ASSERT(!buffer_unwritten(bh));
448
449 mark_buffer_async_write(bh);
450 set_buffer_uptodate(bh);
451 clear_buffer_dirty(bh);
452}
453
454STATIC void
455xfs_start_page_writeback(
456 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100457 int clear_dirty,
458 int buffers)
459{
460 ASSERT(PageLocked(page));
461 ASSERT(!PageWriteback(page));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100462 if (clear_dirty)
David Chinner92132022006-12-21 10:24:01 +1100463 clear_page_dirty_for_io(page);
464 set_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100465 unlock_page(page);
Fengguang Wu1f7decf2007-10-16 23:30:42 -0700466 /* If no buffers on the page are to be written, finish it here */
467 if (!buffers)
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100468 end_page_writeback(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100469}
470
471static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
472{
473 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
474}
475
476/*
David Chinnerd88992f2006-01-18 13:38:12 +1100477 * Submit all of the bios for all of the ioends we have saved up, covering the
478 * initial writepage page and also any probed pages.
479 *
480 * Because we may have multiple ioends spanning a page, we need to start
481 * writeback on all the buffers before we submit them for I/O. If we mark the
482 * buffers as we got, then we can end up with a page that only has buffers
483 * marked async write and I/O complete on can occur before we mark the other
484 * buffers async write.
485 *
486 * The end result of this is that we trip a bug in end_page_writeback() because
487 * we call it twice for the one page as the code in end_buffer_async_write()
488 * assumes that all buffers on the page are started at the same time.
489 *
490 * The fix is two passes across the ioend list - one to start writeback on the
Nathan Scottc41564b2006-03-29 08:55:14 +1000491 * buffer_heads, and then submit them for I/O on the second pass.
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100492 */
493STATIC void
494xfs_submit_ioend(
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000495 struct writeback_control *wbc,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100496 xfs_ioend_t *ioend)
497{
David Chinnerd88992f2006-01-18 13:38:12 +1100498 xfs_ioend_t *head = ioend;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100499 xfs_ioend_t *next;
500 struct buffer_head *bh;
501 struct bio *bio;
502 sector_t lastblock = 0;
503
David Chinnerd88992f2006-01-18 13:38:12 +1100504 /* Pass 1 - start writeback */
505 do {
506 next = ioend->io_list;
Christoph Hellwig221cb252010-12-10 08:42:17 +0000507 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
David Chinnerd88992f2006-01-18 13:38:12 +1100508 xfs_start_buffer_writeback(bh);
David Chinnerd88992f2006-01-18 13:38:12 +1100509 } while ((ioend = next) != NULL);
510
511 /* Pass 2 - submit I/O */
512 ioend = head;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100513 do {
514 next = ioend->io_list;
515 bio = NULL;
516
517 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100518
519 if (!bio) {
520 retry:
521 bio = xfs_alloc_ioend_bio(bh);
522 } else if (bh->b_blocknr != lastblock + 1) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000523 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100524 goto retry;
525 }
526
527 if (bio_add_buffer(bio, bh) != bh->b_size) {
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000528 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100529 goto retry;
530 }
531
532 lastblock = bh->b_blocknr;
533 }
534 if (bio)
Christoph Hellwig06342cf2009-10-30 09:09:15 +0000535 xfs_submit_ioend_bio(wbc, ioend, bio);
Christoph Hellwig209fb872010-07-18 21:17:11 +0000536 xfs_finish_ioend(ioend);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100537 } while ((ioend = next) != NULL);
538}
539
540/*
541 * Cancel submission of all buffer_heads so far in this endio.
542 * Toss the endio too. Only ever called for the initial page
543 * in a writepage request, so only ever one page.
544 */
545STATIC void
546xfs_cancel_ioend(
547 xfs_ioend_t *ioend)
548{
549 xfs_ioend_t *next;
550 struct buffer_head *bh, *next_bh;
551
552 do {
553 next = ioend->io_list;
554 bh = ioend->io_buffer_head;
555 do {
556 next_bh = bh->b_private;
557 clear_buffer_async_write(bh);
558 unlock_buffer(bh);
559 } while ((bh = next_bh) != NULL);
560
Christoph Hellwig25e41b32008-12-03 12:20:39 +0100561 xfs_ioend_wake(XFS_I(ioend->io_inode));
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100562 mempool_free(ioend, xfs_ioend_pool);
563 } while ((ioend = next) != NULL);
564}
565
566/*
567 * Test to see if we've been building up a completion structure for
568 * earlier buffers -- if so, we try to append to this ioend if we
569 * can, otherwise we finish off any current ioend and start another.
570 * Return true if we've finished the given ioend.
571 */
572STATIC void
573xfs_add_to_ioend(
574 struct inode *inode,
575 struct buffer_head *bh,
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100576 xfs_off_t offset,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100577 unsigned int type,
578 xfs_ioend_t **result,
579 int need_ioend)
580{
581 xfs_ioend_t *ioend = *result;
582
583 if (!ioend || need_ioend || type != ioend->io_type) {
584 xfs_ioend_t *previous = *result;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100585
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100586 ioend = xfs_alloc_ioend(inode, type);
587 ioend->io_offset = offset;
588 ioend->io_buffer_head = bh;
589 ioend->io_buffer_tail = bh;
590 if (previous)
591 previous->io_list = ioend;
592 *result = ioend;
593 } else {
594 ioend->io_buffer_tail->b_private = bh;
595 ioend->io_buffer_tail = bh;
596 }
597
598 bh->b_private = NULL;
599 ioend->io_size += bh->b_size;
600}
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602STATIC void
Nathan Scott87cbc492006-03-14 13:26:43 +1100603xfs_map_buffer(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000604 struct inode *inode,
Nathan Scott87cbc492006-03-14 13:26:43 +1100605 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000606 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000607 xfs_off_t offset)
Nathan Scott87cbc492006-03-14 13:26:43 +1100608{
609 sector_t bn;
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000610 struct xfs_mount *m = XFS_I(inode)->i_mount;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000611 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
612 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
Nathan Scott87cbc492006-03-14 13:26:43 +1100613
Christoph Hellwig207d0412010-04-28 12:28:56 +0000614 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
615 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Nathan Scott87cbc492006-03-14 13:26:43 +1100616
Christoph Hellwige5131822010-04-28 12:28:55 +0000617 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
Christoph Hellwig8699bb02010-04-28 12:28:54 +0000618 ((offset - iomap_offset) >> inode->i_blkbits);
Nathan Scott87cbc492006-03-14 13:26:43 +1100619
Christoph Hellwig046f1682010-04-28 12:28:52 +0000620 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
Nathan Scott87cbc492006-03-14 13:26:43 +1100621
622 bh->b_blocknr = bn;
623 set_buffer_mapped(bh);
624}
625
626STATIC void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627xfs_map_at_offset(
Christoph Hellwig046f1682010-04-28 12:28:52 +0000628 struct inode *inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 struct buffer_head *bh,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000630 struct xfs_bmbt_irec *imap,
Christoph Hellwig046f1682010-04-28 12:28:52 +0000631 xfs_off_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
Christoph Hellwig207d0412010-04-28 12:28:56 +0000633 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
634 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
Christoph Hellwig207d0412010-04-28 12:28:56 +0000636 xfs_map_buffer(inode, bh, imap, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 set_buffer_mapped(bh);
638 clear_buffer_delay(bh);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100639 clear_buffer_unwritten(bh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640}
641
642/*
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100643 * Test if a given page is suitable for writing as part of an unwritten
644 * or delayed allocate extent.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 */
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100646STATIC int
647xfs_is_delayed_page(
648 struct page *page,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100649 unsigned int type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 if (PageWriteback(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100652 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654 if (page->mapping && page_has_buffers(page)) {
655 struct buffer_head *bh, *head;
656 int acceptable = 0;
657
658 bh = head = page_buffers(page);
659 do {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100660 if (buffer_unwritten(bh))
Christoph Hellwig34a52c62010-04-28 12:28:57 +0000661 acceptable = (type == IO_UNWRITTEN);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100662 else if (buffer_delay(bh))
Christoph Hellwiga206c812010-12-10 08:42:20 +0000663 acceptable = (type == IO_DELALLOC);
David Chinner2ddee842006-03-22 12:47:40 +1100664 else if (buffer_dirty(bh) && buffer_mapped(bh))
Christoph Hellwiga206c812010-12-10 08:42:20 +0000665 acceptable = (type == IO_OVERWRITE);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100666 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 } while ((bh = bh->b_this_page) != head);
669
670 if (acceptable)
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100671 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 }
673
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100674 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675}
676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677/*
678 * Allocate & map buffers for page given the extent map. Write it out.
679 * except for the original page of a writepage, this is called on
680 * delalloc/unwritten pages only, for the original page it is possible
681 * that the page has no mapping at all.
682 */
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100683STATIC int
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684xfs_convert_page(
685 struct inode *inode,
686 struct page *page,
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100687 loff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000688 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100689 xfs_ioend_t **ioendp,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000690 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691{
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100692 struct buffer_head *bh, *head;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100693 xfs_off_t end_offset;
694 unsigned long p_offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100695 unsigned int type;
Nathan Scott24e17b52005-05-05 13:33:20 -0700696 int len, page_dirty;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100697 int count = 0, done = 0, uptodate = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100698 xfs_off_t offset = page_offset(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100700 if (page->index != tindex)
701 goto fail;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200702 if (!trylock_page(page))
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100703 goto fail;
704 if (PageWriteback(page))
705 goto fail_unlock_page;
706 if (page->mapping != inode->i_mapping)
707 goto fail_unlock_page;
708 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
709 goto fail_unlock_page;
710
Nathan Scott24e17b52005-05-05 13:33:20 -0700711 /*
712 * page_dirty is initially a count of buffers on the page before
Nathan Scottc41564b2006-03-29 08:55:14 +1000713 * EOF and is decremented as we move each into a cleanable state.
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100714 *
715 * Derivation:
716 *
717 * End offset is the highest offset that this page should represent.
718 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
719 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
720 * hence give us the correct page_dirty count. On any other page,
721 * it will be zero and in that case we need page_dirty to be the
722 * count of buffers on the page.
Nathan Scott24e17b52005-05-05 13:33:20 -0700723 */
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100724 end_offset = min_t(unsigned long long,
725 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
726 i_size_read(inode));
727
Nathan Scott24e17b52005-05-05 13:33:20 -0700728 len = 1 << inode->i_blkbits;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100729 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
730 PAGE_CACHE_SIZE);
731 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
732 page_dirty = p_offset / len;
Nathan Scott24e17b52005-05-05 13:33:20 -0700733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 bh = head = page_buffers(page);
735 do {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100736 if (offset >= end_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 break;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100738 if (!buffer_uptodate(bh))
739 uptodate = 0;
740 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
741 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 continue;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100743 }
744
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000745 if (buffer_unwritten(bh) || buffer_delay(bh) ||
746 buffer_mapped(bh)) {
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100747 if (buffer_unwritten(bh))
Christoph Hellwig34a52c62010-04-28 12:28:57 +0000748 type = IO_UNWRITTEN;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000749 else if (buffer_delay(bh))
Christoph Hellwiga206c812010-12-10 08:42:20 +0000750 type = IO_DELALLOC;
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000751 else
752 type = IO_OVERWRITE;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100753
Christoph Hellwig558e6892010-04-28 12:28:58 +0000754 if (!xfs_imap_valid(inode, imap, offset)) {
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100755 done = 1;
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100756 continue;
757 }
758
Christoph Hellwigecff71e2010-12-10 08:42:25 +0000759 lock_buffer(bh);
760 if (type != IO_OVERWRITE)
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000761 xfs_map_at_offset(inode, bh, imap, offset);
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000762 xfs_add_to_ioend(inode, bh, offset, type,
763 ioendp, done);
764
Christoph Hellwig9260dc62006-01-11 20:48:47 +1100765 page_dirty--;
766 count++;
767 } else {
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000768 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 }
Christoph Hellwig7336cea2006-01-11 20:49:16 +1100770 } while (offset += len, (bh = bh->b_this_page) != head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100772 if (uptodate && bh == head)
773 SetPageUptodate(page);
774
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000775 if (count) {
Dave Chinnerefceab12010-08-24 11:44:56 +1000776 if (--wbc->nr_to_write <= 0 &&
777 wbc->sync_mode == WB_SYNC_NONE)
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000778 done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 }
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000780 xfs_start_page_writeback(page, !page_dirty, count);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100781
782 return done;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100783 fail_unlock_page:
784 unlock_page(page);
785 fail:
786 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
788
789/*
790 * Convert & write out a cluster of pages in the same extent as defined
791 * by mp and following the start page.
792 */
793STATIC void
794xfs_cluster_write(
795 struct inode *inode,
796 pgoff_t tindex,
Christoph Hellwig207d0412010-04-28 12:28:56 +0000797 struct xfs_bmbt_irec *imap,
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100798 xfs_ioend_t **ioendp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 struct writeback_control *wbc,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 pgoff_t tlast)
801{
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100802 struct pagevec pvec;
803 int done = 0, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100805 pagevec_init(&pvec, 0);
806 while (!done && tindex <= tlast) {
807 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
808
809 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 break;
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100811
812 for (i = 0; i < pagevec_count(&pvec); i++) {
813 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +0000814 imap, ioendp, wbc);
Christoph Hellwig10ce4442006-01-11 20:48:14 +1100815 if (done)
816 break;
817 }
818
819 pagevec_release(&pvec);
820 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 }
822}
823
Dave Chinner3ed3a432010-03-05 02:00:42 +0000824STATIC void
825xfs_vm_invalidatepage(
826 struct page *page,
827 unsigned long offset)
828{
829 trace_xfs_invalidatepage(page->mapping->host, page, offset);
830 block_invalidatepage(page, offset);
831}
832
833/*
834 * If the page has delalloc buffers on it, we need to punch them out before we
835 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
836 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
837 * is done on that same region - the delalloc extent is returned when none is
838 * supposed to be there.
839 *
840 * We prevent this by truncating away the delalloc regions on the page before
841 * invalidating it. Because they are delalloc, we can do this without needing a
842 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
843 * truncation without a transaction as there is no space left for block
844 * reservation (typically why we see a ENOSPC in writeback).
845 *
846 * This is not a performance critical path, so for now just do the punching a
847 * buffer head at a time.
848 */
849STATIC void
850xfs_aops_discard_page(
851 struct page *page)
852{
853 struct inode *inode = page->mapping->host;
854 struct xfs_inode *ip = XFS_I(inode);
855 struct buffer_head *bh, *head;
856 loff_t offset = page_offset(page);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000857
Christoph Hellwiga206c812010-12-10 08:42:20 +0000858 if (!xfs_is_delayed_page(page, IO_DELALLOC))
Dave Chinner3ed3a432010-03-05 02:00:42 +0000859 goto out_invalidate;
860
Dave Chinnere8c37532010-03-15 02:36:35 +0000861 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
862 goto out_invalidate;
863
Dave Chinner4f107002011-03-07 10:00:35 +1100864 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000865 "page discard on page %p, inode 0x%llx, offset %llu.",
866 page, ip->i_ino, offset);
867
868 xfs_ilock(ip, XFS_ILOCK_EXCL);
869 bh = head = page_buffers(page);
870 do {
Dave Chinner3ed3a432010-03-05 02:00:42 +0000871 int error;
Dave Chinnerc726de42010-11-30 15:14:39 +1100872 xfs_fileoff_t start_fsb;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000873
874 if (!buffer_delay(bh))
875 goto next_buffer;
876
Dave Chinnerc726de42010-11-30 15:14:39 +1100877 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
878 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
Dave Chinner3ed3a432010-03-05 02:00:42 +0000879 if (error) {
880 /* something screwed, just bail */
Dave Chinnere8c37532010-03-15 02:36:35 +0000881 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner4f107002011-03-07 10:00:35 +1100882 xfs_alert(ip->i_mount,
Dave Chinner3ed3a432010-03-05 02:00:42 +0000883 "page discard unable to remove delalloc mapping.");
Dave Chinnere8c37532010-03-15 02:36:35 +0000884 }
Dave Chinner3ed3a432010-03-05 02:00:42 +0000885 break;
886 }
887next_buffer:
Dave Chinnerc726de42010-11-30 15:14:39 +1100888 offset += 1 << inode->i_blkbits;
Dave Chinner3ed3a432010-03-05 02:00:42 +0000889
890 } while ((bh = bh->b_this_page) != head);
891
892 xfs_iunlock(ip, XFS_ILOCK_EXCL);
893out_invalidate:
894 xfs_vm_invalidatepage(page, 0);
895 return;
896}
897
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898/*
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000899 * Write out a dirty page.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 *
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000901 * For delalloc space on the page we need to allocate space and flush it.
902 * For unwritten space on the page we need to start the conversion to
903 * regular allocated space.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000904 * For any other dirty buffer heads on the page we should flush them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906STATIC int
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000907xfs_vm_writepage(
908 struct page *page,
909 struct writeback_control *wbc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000911 struct inode *inode = page->mapping->host;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100912 struct buffer_head *bh, *head;
Christoph Hellwig207d0412010-04-28 12:28:56 +0000913 struct xfs_bmbt_irec imap;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100914 xfs_ioend_t *ioend = NULL, *iohead = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 loff_t offset;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100916 unsigned int type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 __uint64_t end_offset;
Christoph Hellwigbd1556a2010-04-28 12:29:00 +0000918 pgoff_t end_index, last_index;
Christoph Hellwiged1e7b72010-12-10 08:42:22 +0000919 ssize_t len;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000920 int err, imap_valid = 0, uptodate = 1;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000921 int count = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +0000922 int nonblocking = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000924 trace_xfs_writepage(inode, page, 0);
925
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000926 ASSERT(page_has_buffers(page));
927
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000928 /*
929 * Refuse to write the page out if we are called from reclaim context.
930 *
Christoph Hellwigd4f7a5c2010-06-28 10:34:44 -0400931 * This avoids stack overflows when called from deeply used stacks in
932 * random callers for direct reclaim or memcg reclaim. We explicitly
933 * allow reclaim from kswapd as the stack usage there is relatively low.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000934 *
935 * This should really be done by the core VM, but until that happens
936 * filesystems like XFS, btrfs and ext4 have to take care of this
937 * by themselves.
938 */
Christoph Hellwigd4f7a5c2010-06-28 10:34:44 -0400939 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC)
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000940 goto redirty;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000941
942 /*
Christoph Hellwig680a6472011-07-08 14:34:05 +0200943 * Given that we do not allow direct reclaim to call us, we should
944 * never be called while in a filesystem transaction.
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000945 */
Christoph Hellwig680a6472011-07-08 14:34:05 +0200946 if (WARN_ON(current->flags & PF_FSTRANS))
Christoph Hellwigb5420f22010-08-24 11:47:51 +1000947 goto redirty;
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 /* Is this page beyond the end of the file? */
950 offset = i_size_read(inode);
951 end_index = offset >> PAGE_CACHE_SHIFT;
952 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
953 if (page->index >= end_index) {
954 if ((page->index >= end_index + 1) ||
955 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
Christoph Hellwig89f3b362010-06-24 09:45:48 +1000956 unlock_page(page);
Nathan Scott19d5bcf2005-11-02 15:14:09 +1100957 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
959 }
960
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100961 end_offset = min_t(unsigned long long,
Christoph Hellwig20cb52e2010-06-24 09:46:01 +1000962 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
963 offset);
Nathan Scott24e17b52005-05-05 13:33:20 -0700964 len = 1 << inode->i_blkbits;
Nathan Scott24e17b52005-05-05 13:33:20 -0700965
Nathan Scott24e17b52005-05-05 13:33:20 -0700966 bh = head = page_buffers(page);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100967 offset = page_offset(page);
Christoph Hellwiga206c812010-12-10 08:42:20 +0000968 type = IO_OVERWRITE;
969
Christoph Hellwigdbcdde32011-07-08 14:34:14 +0200970 if (wbc->sync_mode == WB_SYNC_NONE)
Christoph Hellwiga206c812010-12-10 08:42:20 +0000971 nonblocking = 1;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100972
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 do {
Christoph Hellwig6ac72482010-12-10 08:42:18 +0000974 int new_ioend = 0;
975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 if (offset >= end_offset)
977 break;
978 if (!buffer_uptodate(bh))
979 uptodate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Eric Sandeen3d9b02e2010-06-24 09:45:30 +1000981 /*
Christoph Hellwigece413f2010-11-10 21:39:11 +0000982 * set_page_dirty dirties all buffers in a page, independent
983 * of their state. The dirty state however is entirely
984 * meaningless for holes (!mapped && uptodate), so skip
985 * buffers covering holes here.
Eric Sandeen3d9b02e2010-06-24 09:45:30 +1000986 */
987 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
Eric Sandeen3d9b02e2010-06-24 09:45:30 +1000988 imap_valid = 0;
989 continue;
990 }
991
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000992 if (buffer_unwritten(bh)) {
993 if (type != IO_UNWRITTEN) {
994 type = IO_UNWRITTEN;
995 imap_valid = 0;
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +1100996 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +0000997 } else if (buffer_delay(bh)) {
998 if (type != IO_DELALLOC) {
999 type = IO_DELALLOC;
1000 imap_valid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 }
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001002 } else if (buffer_uptodate(bh)) {
Christoph Hellwiga206c812010-12-10 08:42:20 +00001003 if (type != IO_OVERWRITE) {
1004 type = IO_OVERWRITE;
Christoph Hellwig85da94c2010-12-10 08:42:16 +00001005 imap_valid = 0;
1006 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001007 } else {
1008 if (PageUptodate(page)) {
1009 ASSERT(buffer_mapped(bh));
1010 imap_valid = 0;
Christoph Hellwig6c4fe192006-01-11 20:49:28 +11001011 }
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001012 continue;
1013 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001015 if (imap_valid)
1016 imap_valid = xfs_imap_valid(inode, &imap, offset);
1017 if (!imap_valid) {
1018 /*
1019 * If we didn't have a valid mapping then we need to
1020 * put the new mapping into a separate ioend structure.
1021 * This ensures non-contiguous extents always have
1022 * separate ioends, which is particularly important
1023 * for unwritten extent conversion at I/O completion
1024 * time.
1025 */
1026 new_ioend = 1;
1027 err = xfs_map_blocks(inode, offset, &imap, type,
1028 nonblocking);
1029 if (err)
1030 goto error;
1031 imap_valid = xfs_imap_valid(inode, &imap, offset);
1032 }
1033 if (imap_valid) {
Christoph Hellwigecff71e2010-12-10 08:42:25 +00001034 lock_buffer(bh);
1035 if (type != IO_OVERWRITE)
Christoph Hellwigaeea1b12010-12-10 08:42:24 +00001036 xfs_map_at_offset(inode, bh, &imap, offset);
1037 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1038 new_ioend);
1039 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 }
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001041
1042 if (!iohead)
1043 iohead = ioend;
1044
1045 } while (offset += len, ((bh = bh->b_this_page) != head));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
1047 if (uptodate && bh == head)
1048 SetPageUptodate(page);
1049
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001050 xfs_start_page_writeback(page, 1, count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Christoph Hellwig558e6892010-04-28 12:28:58 +00001052 if (ioend && imap_valid) {
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001053 xfs_off_t end_index;
Christoph Hellwig8699bb02010-04-28 12:28:54 +00001054
Christoph Hellwigbd1556a2010-04-28 12:29:00 +00001055 end_index = imap.br_startoff + imap.br_blockcount;
1056
1057 /* to bytes */
1058 end_index <<= inode->i_blkbits;
1059
1060 /* to pages */
1061 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1062
1063 /* check against file size */
1064 if (end_index > last_index)
1065 end_index = last_index;
1066
Christoph Hellwig207d0412010-04-28 12:28:56 +00001067 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
Christoph Hellwig2fa24f92010-12-10 08:42:23 +00001068 wbc, end_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 }
1070
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001071 if (iohead)
Christoph Hellwig06342cf2009-10-30 09:09:15 +00001072 xfs_submit_ioend(wbc, iohead);
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001073
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001074 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
1076error:
Christoph Hellwigf6d6d4f2006-01-11 15:40:13 +11001077 if (iohead)
1078 xfs_cancel_ioend(iohead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Christoph Hellwigb5420f22010-08-24 11:47:51 +10001080 if (err == -EAGAIN)
1081 goto redirty;
1082
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001083 xfs_aops_discard_page(page);
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001084 ClearPageUptodate(page);
1085 unlock_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 return err;
Nathan Scottf51623b2006-03-14 13:26:27 +11001087
Christoph Hellwigb5420f22010-08-24 11:47:51 +10001088redirty:
Nathan Scottf51623b2006-03-14 13:26:27 +11001089 redirty_page_for_writepage(wbc, page);
1090 unlock_page(page);
1091 return 0;
Nathan Scottf51623b2006-03-14 13:26:27 +11001092}
1093
Nathan Scott7d4fb402006-06-09 15:27:16 +10001094STATIC int
1095xfs_vm_writepages(
1096 struct address_space *mapping,
1097 struct writeback_control *wbc)
1098{
Christoph Hellwigb3aea4e2007-08-29 11:44:37 +10001099 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
Nathan Scott7d4fb402006-06-09 15:27:16 +10001100 return generic_writepages(mapping, wbc);
1101}
1102
Nathan Scottf51623b2006-03-14 13:26:27 +11001103/*
1104 * Called to move a page into cleanable state - and from there
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001105 * to be released. The page should already be clean. We always
Nathan Scottf51623b2006-03-14 13:26:27 +11001106 * have buffer heads in this call.
1107 *
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001108 * Returns 1 if the page is ok to release, 0 otherwise.
Nathan Scottf51623b2006-03-14 13:26:27 +11001109 */
1110STATIC int
Nathan Scott238f4c52006-03-17 17:26:25 +11001111xfs_vm_releasepage(
Nathan Scottf51623b2006-03-14 13:26:27 +11001112 struct page *page,
1113 gfp_t gfp_mask)
1114{
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001115 int delalloc, unwritten;
Nathan Scottf51623b2006-03-14 13:26:27 +11001116
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001117 trace_xfs_releasepage(page->mapping->host, page, 0);
Nathan Scott238f4c52006-03-17 17:26:25 +11001118
Christoph Hellwig20cb52e2010-06-24 09:46:01 +10001119 xfs_count_page_state(page, &delalloc, &unwritten);
Nathan Scottf51623b2006-03-14 13:26:27 +11001120
Christoph Hellwig89f3b362010-06-24 09:45:48 +10001121 if (WARN_ON(delalloc))
1122 return 0;
1123 if (WARN_ON(unwritten))
Nathan Scottf51623b2006-03-14 13:26:27 +11001124 return 0;
1125
Nathan Scottf51623b2006-03-14 13:26:27 +11001126 return try_to_free_buffers(page);
1127}
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129STATIC int
Nathan Scottc2536662006-03-29 10:44:40 +10001130__xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 struct inode *inode,
1132 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 struct buffer_head *bh_result,
1134 int create,
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001135 int direct)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136{
Christoph Hellwiga206c812010-12-10 08:42:20 +00001137 struct xfs_inode *ip = XFS_I(inode);
1138 struct xfs_mount *mp = ip->i_mount;
1139 xfs_fileoff_t offset_fsb, end_fsb;
1140 int error = 0;
1141 int lockmode = 0;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001142 struct xfs_bmbt_irec imap;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001143 int nimaps = 1;
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001144 xfs_off_t offset;
1145 ssize_t size;
Christoph Hellwig207d0412010-04-28 12:28:56 +00001146 int new = 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001147
1148 if (XFS_FORCED_SHUTDOWN(mp))
1149 return -XFS_ERROR(EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Nathan Scottfdc7ed72005-11-02 15:13:13 +11001151 offset = (xfs_off_t)iblock << inode->i_blkbits;
Nathan Scottc2536662006-03-29 10:44:40 +10001152 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1153 size = bh_result->b_size;
Lachlan McIlroy364f3582008-09-17 16:50:14 +10001154
1155 if (!create && direct && offset >= i_size_read(inode))
1156 return 0;
1157
Christoph Hellwiga206c812010-12-10 08:42:20 +00001158 if (create) {
1159 lockmode = XFS_ILOCK_EXCL;
1160 xfs_ilock(ip, lockmode);
1161 } else {
1162 lockmode = xfs_ilock_map_shared(ip);
1163 }
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001164
Christoph Hellwiga206c812010-12-10 08:42:20 +00001165 ASSERT(offset <= mp->m_maxioffset);
1166 if (offset + size > mp->m_maxioffset)
1167 size = mp->m_maxioffset - offset;
1168 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1169 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1170
1171 error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
1172 XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173 if (error)
Christoph Hellwiga206c812010-12-10 08:42:20 +00001174 goto out_unlock;
1175
1176 if (create &&
1177 (!nimaps ||
1178 (imap.br_startblock == HOLESTARTBLOCK ||
1179 imap.br_startblock == DELAYSTARTBLOCK))) {
1180 if (direct) {
1181 error = xfs_iomap_write_direct(ip, offset, size,
1182 &imap, nimaps);
1183 } else {
1184 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1185 }
1186 if (error)
1187 goto out_unlock;
1188
1189 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1190 } else if (nimaps) {
1191 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1192 } else {
1193 trace_xfs_get_blocks_notfound(ip, offset, size);
1194 goto out_unlock;
1195 }
1196 xfs_iunlock(ip, lockmode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Christoph Hellwig207d0412010-04-28 12:28:56 +00001198 if (imap.br_startblock != HOLESTARTBLOCK &&
1199 imap.br_startblock != DELAYSTARTBLOCK) {
Nathan Scott87cbc492006-03-14 13:26:43 +11001200 /*
1201 * For unwritten extents do not report a disk address on
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 * the read case (treat as if we're reading into a hole).
1203 */
Christoph Hellwig207d0412010-04-28 12:28:56 +00001204 if (create || !ISUNWRITTEN(&imap))
1205 xfs_map_buffer(inode, bh_result, &imap, offset);
1206 if (create && ISUNWRITTEN(&imap)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 if (direct)
1208 bh_result->b_private = inode;
1209 set_buffer_unwritten(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 }
1211 }
1212
Nathan Scottc2536662006-03-29 10:44:40 +10001213 /*
1214 * If this is a realtime file, data may be on a different device.
1215 * to that pointed to from the buffer_head b_bdev currently.
1216 */
Christoph Hellwig046f1682010-04-28 12:28:52 +00001217 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Nathan Scottc2536662006-03-29 10:44:40 +10001219 /*
David Chinner549054a2007-02-10 18:36:35 +11001220 * If we previously allocated a block out beyond eof and we are now
1221 * coming back to use it then we will need to flag it as new even if it
1222 * has a disk address.
1223 *
1224 * With sub-block writes into unwritten extents we also need to mark
1225 * the buffer as new so that the unwritten parts of the buffer gets
1226 * correctly zeroed.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 */
1228 if (create &&
1229 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
David Chinner549054a2007-02-10 18:36:35 +11001230 (offset >= i_size_read(inode)) ||
Christoph Hellwig207d0412010-04-28 12:28:56 +00001231 (new || ISUNWRITTEN(&imap))))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 set_buffer_new(bh_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Christoph Hellwig207d0412010-04-28 12:28:56 +00001234 if (imap.br_startblock == DELAYSTARTBLOCK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 BUG_ON(direct);
1236 if (create) {
1237 set_buffer_uptodate(bh_result);
1238 set_buffer_mapped(bh_result);
1239 set_buffer_delay(bh_result);
1240 }
1241 }
1242
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001243 /*
1244 * If this is O_DIRECT or the mpage code calling tell them how large
1245 * the mapping is, so that we can avoid repeated get_blocks calls.
1246 */
Nathan Scottc2536662006-03-29 10:44:40 +10001247 if (direct || size > (1 << inode->i_blkbits)) {
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001248 xfs_off_t mapping_size;
Christoph Hellwig9563b3d2010-04-28 12:28:53 +00001249
Christoph Hellwig2b8f12b2010-04-28 12:28:59 +00001250 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1251 mapping_size <<= inode->i_blkbits;
1252
1253 ASSERT(mapping_size > 0);
1254 if (mapping_size > size)
1255 mapping_size = size;
1256 if (mapping_size > LONG_MAX)
1257 mapping_size = LONG_MAX;
1258
1259 bh_result->b_size = mapping_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 }
1261
1262 return 0;
Christoph Hellwiga206c812010-12-10 08:42:20 +00001263
1264out_unlock:
1265 xfs_iunlock(ip, lockmode);
1266 return -error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267}
1268
1269int
Nathan Scottc2536662006-03-29 10:44:40 +10001270xfs_get_blocks(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 struct inode *inode,
1272 sector_t iblock,
1273 struct buffer_head *bh_result,
1274 int create)
1275{
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001276 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277}
1278
1279STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001280xfs_get_blocks_direct(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 struct inode *inode,
1282 sector_t iblock,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 struct buffer_head *bh_result,
1284 int create)
1285{
Christoph Hellwigf2bde9b2010-06-24 11:44:35 +10001286 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287}
1288
Christoph Hellwig209fb872010-07-18 21:17:11 +00001289/*
1290 * Complete a direct I/O write request.
1291 *
1292 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1293 * need to issue a transaction to convert the range from unwritten to written
1294 * extents. In case this is regular synchronous I/O we just call xfs_end_io
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001295 * to do this and we are done. But in case this was a successful AIO
Christoph Hellwig209fb872010-07-18 21:17:11 +00001296 * request this handler is called from interrupt context, from which we
1297 * can't start transactions. In that case offload the I/O completion to
1298 * the workqueues we also use for buffered I/O completion.
1299 */
Christoph Hellwigf0973862005-09-05 08:22:52 +10001300STATIC void
Christoph Hellwig209fb872010-07-18 21:17:11 +00001301xfs_end_io_direct_write(
1302 struct kiocb *iocb,
1303 loff_t offset,
1304 ssize_t size,
1305 void *private,
1306 int ret,
1307 bool is_async)
Christoph Hellwigf0973862005-09-05 08:22:52 +10001308{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001309 struct xfs_ioend *ioend = iocb->private;
Christoph Hellwigf0973862005-09-05 08:22:52 +10001310
1311 /*
Nathan Scottc41564b2006-03-29 08:55:14 +10001312 * blockdev_direct_IO can return an error even after the I/O
Christoph Hellwigf0973862005-09-05 08:22:52 +10001313 * completion handler was called. Thus we need to protect
1314 * against double-freeing.
1315 */
1316 iocb->private = NULL;
Christoph Hellwig40e2e972010-07-18 21:17:09 +00001317
Christoph Hellwig209fb872010-07-18 21:17:11 +00001318 ioend->io_offset = offset;
1319 ioend->io_size = size;
Christoph Hellwigc859cdd2011-08-23 08:28:10 +00001320 ioend->io_iocb = iocb;
1321 ioend->io_result = ret;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001322 if (private && size > 0)
1323 ioend->io_type = IO_UNWRITTEN;
1324
1325 if (is_async) {
Christoph Hellwigc859cdd2011-08-23 08:28:10 +00001326 ioend->io_isasync = 1;
Christoph Hellwig209fb872010-07-18 21:17:11 +00001327 xfs_finish_ioend(ioend);
1328 } else {
1329 xfs_finish_ioend_sync(ioend);
1330 }
Christoph Hellwigf0973862005-09-05 08:22:52 +10001331}
1332
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333STATIC ssize_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001334xfs_vm_direct_IO(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 int rw,
1336 struct kiocb *iocb,
1337 const struct iovec *iov,
1338 loff_t offset,
1339 unsigned long nr_segs)
1340{
Christoph Hellwig209fb872010-07-18 21:17:11 +00001341 struct inode *inode = iocb->ki_filp->f_mapping->host;
1342 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1343 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
Christoph Hellwig209fb872010-07-18 21:17:11 +00001345 if (rw & WRITE) {
Christoph Hellwiga206c812010-12-10 08:42:20 +00001346 iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001348 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1349 offset, nr_segs,
1350 xfs_get_blocks_direct,
1351 xfs_end_io_direct_write, NULL, 0);
Christoph Hellwig209fb872010-07-18 21:17:11 +00001352 if (ret != -EIOCBQUEUED && iocb->private)
1353 xfs_destroy_ioend(iocb->private);
1354 } else {
Christoph Hellwigeafdc7d2010-06-04 11:29:53 +02001355 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1356 offset, nr_segs,
1357 xfs_get_blocks_direct,
1358 NULL, NULL, 0);
Christoph Hellwig209fb872010-07-18 21:17:11 +00001359 }
Christoph Hellwig5fe878a2009-12-15 16:47:50 -08001360
Christoph Hellwigf0973862005-09-05 08:22:52 +10001361 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362}
1363
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001364STATIC void
1365xfs_vm_write_failed(
1366 struct address_space *mapping,
1367 loff_t to)
1368{
1369 struct inode *inode = mapping->host;
1370
1371 if (to > inode->i_size) {
Dave Chinnerc726de42010-11-30 15:14:39 +11001372 /*
1373 * punch out the delalloc blocks we have already allocated. We
1374 * don't call xfs_setattr() to do this as we may be in the
1375 * middle of a multi-iovec write and so the vfs inode->i_size
1376 * will not match the xfs ip->i_size and so it will zero too
1377 * much. Hence we jus truncate the page cache to zero what is
1378 * necessary and punch the delalloc blocks directly.
1379 */
1380 struct xfs_inode *ip = XFS_I(inode);
1381 xfs_fileoff_t start_fsb;
1382 xfs_fileoff_t end_fsb;
1383 int error;
1384
1385 truncate_pagecache(inode, to, inode->i_size);
1386
1387 /*
1388 * Check if there are any blocks that are outside of i_size
1389 * that need to be trimmed back.
1390 */
1391 start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
1392 end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
1393 if (end_fsb <= start_fsb)
1394 return;
1395
1396 xfs_ilock(ip, XFS_ILOCK_EXCL);
1397 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1398 end_fsb - start_fsb);
1399 if (error) {
1400 /* something screwed, just bail */
1401 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
Dave Chinner4f107002011-03-07 10:00:35 +11001402 xfs_alert(ip->i_mount,
Dave Chinnerc726de42010-11-30 15:14:39 +11001403 "xfs_vm_write_failed: unable to clean up ino %lld",
1404 ip->i_ino);
1405 }
1406 }
1407 xfs_iunlock(ip, XFS_ILOCK_EXCL);
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001408 }
1409}
1410
Nathan Scottf51623b2006-03-14 13:26:27 +11001411STATIC int
Nick Piggind79689c2007-10-16 01:25:06 -07001412xfs_vm_write_begin(
Nathan Scottf51623b2006-03-14 13:26:27 +11001413 struct file *file,
Nick Piggind79689c2007-10-16 01:25:06 -07001414 struct address_space *mapping,
1415 loff_t pos,
1416 unsigned len,
1417 unsigned flags,
1418 struct page **pagep,
1419 void **fsdata)
Nathan Scottf51623b2006-03-14 13:26:27 +11001420{
Christoph Hellwig155130a2010-06-04 11:29:58 +02001421 int ret;
1422
1423 ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS,
1424 pagep, xfs_get_blocks);
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001425 if (unlikely(ret))
1426 xfs_vm_write_failed(mapping, pos + len);
1427 return ret;
1428}
Christoph Hellwig155130a2010-06-04 11:29:58 +02001429
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001430STATIC int
1431xfs_vm_write_end(
1432 struct file *file,
1433 struct address_space *mapping,
1434 loff_t pos,
1435 unsigned len,
1436 unsigned copied,
1437 struct page *page,
1438 void *fsdata)
1439{
1440 int ret;
1441
1442 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1443 if (unlikely(ret < len))
1444 xfs_vm_write_failed(mapping, pos + len);
Christoph Hellwig155130a2010-06-04 11:29:58 +02001445 return ret;
Nathan Scottf51623b2006-03-14 13:26:27 +11001446}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448STATIC sector_t
Nathan Scotte4c573b2006-03-14 13:54:26 +11001449xfs_vm_bmap(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 struct address_space *mapping,
1451 sector_t block)
1452{
1453 struct inode *inode = (struct inode *)mapping->host;
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001454 struct xfs_inode *ip = XFS_I(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Christoph Hellwigcca28fb2010-06-24 11:57:09 +10001456 trace_xfs_vm_bmap(XFS_I(inode));
Christoph Hellwig126468b2008-03-06 13:44:57 +11001457 xfs_ilock(ip, XFS_IOLOCK_SHARED);
Christoph Hellwig739bfb22007-08-29 10:58:01 +10001458 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF);
Christoph Hellwig126468b2008-03-06 13:44:57 +11001459 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
Nathan Scottc2536662006-03-29 10:44:40 +10001460 return generic_block_bmap(mapping, block, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461}
1462
1463STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001464xfs_vm_readpage(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 struct file *unused,
1466 struct page *page)
1467{
Nathan Scottc2536662006-03-29 10:44:40 +10001468 return mpage_readpage(page, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469}
1470
1471STATIC int
Nathan Scotte4c573b2006-03-14 13:54:26 +11001472xfs_vm_readpages(
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 struct file *unused,
1474 struct address_space *mapping,
1475 struct list_head *pages,
1476 unsigned nr_pages)
1477{
Nathan Scottc2536662006-03-29 10:44:40 +10001478 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479}
1480
Christoph Hellwigf5e54d62006-06-28 04:26:44 -07001481const struct address_space_operations xfs_address_space_operations = {
Nathan Scotte4c573b2006-03-14 13:54:26 +11001482 .readpage = xfs_vm_readpage,
1483 .readpages = xfs_vm_readpages,
1484 .writepage = xfs_vm_writepage,
Nathan Scott7d4fb402006-06-09 15:27:16 +10001485 .writepages = xfs_vm_writepages,
Nathan Scott238f4c52006-03-17 17:26:25 +11001486 .releasepage = xfs_vm_releasepage,
1487 .invalidatepage = xfs_vm_invalidatepage,
Nick Piggind79689c2007-10-16 01:25:06 -07001488 .write_begin = xfs_vm_write_begin,
Christoph Hellwigfa9b2272010-06-14 05:17:31 -04001489 .write_end = xfs_vm_write_end,
Nathan Scotte4c573b2006-03-14 13:54:26 +11001490 .bmap = xfs_vm_bmap,
1491 .direct_IO = xfs_vm_direct_IO,
Christoph Lametere965f962006-02-01 03:05:41 -08001492 .migratepage = buffer_migrate_page,
Hisashi Hifumibddaafa2009-03-29 09:53:38 +02001493 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +02001494 .error_remove_page = generic_error_remove_page,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495};