blob: 6210d4429d8454030f7a82b4f5c373ca56ccb986 [file] [log] [blame]
David Teiglandb3b94fa2006-01-16 16:50:04 +00001/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Bob Peterson7eabb772008-01-28 11:24:35 -06003 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
David Teiglandb3b94fa2006-01-16 16:50:04 +00004 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
Steven Whitehousee9fc2aa2006-09-01 11:05:15 -04007 * of the GNU General Public License version 2.
David Teiglandb3b94fa2006-01-16 16:50:04 +00008 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
Steven Whitehousefd88de562006-05-05 16:59:11 -040016#include <linux/pagevec.h>
Steven Whitehouse9b124fb2006-01-30 11:55:32 +000017#include <linux/mpage.h>
Steven Whitehoused1665e42006-02-14 11:54:42 +000018#include <linux/fs.h>
Steven Whitehousea8d638e2007-01-15 13:52:17 +000019#include <linux/writeback.h>
Steven Whitehouse7765ec22007-10-16 01:25:07 -070020#include <linux/swap.h>
Steven Whitehouse5c676f62006-02-27 17:23:27 -050021#include <linux/gfs2_ondisk.h>
Steven Whitehouse47e83b52007-10-18 11:15:50 +010022#include <linux/backing-dev.h>
Christoph Hellwige2e40f22015-02-22 08:58:50 -080023#include <linux/uio.h>
Steven Whitehouse774016b2014-02-06 15:47:47 +000024#include <trace/events/writeback.h>
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010025#include <linux/sched/signal.h>
David Teiglandb3b94fa2006-01-16 16:50:04 +000026
27#include "gfs2.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050028#include "incore.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000029#include "bmap.h"
30#include "glock.h"
31#include "inode.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000032#include "log.h"
33#include "meta_io.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000034#include "quota.h"
35#include "trans.h"
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000036#include "rgrp.h"
Robert Petersoncd81a4b2007-05-14 12:42:18 -050037#include "super.h"
Steven Whitehouse5c676f62006-02-27 17:23:27 -050038#include "util.h"
Steven Whitehouse4340fe62006-07-11 09:46:33 -040039#include "glops.h"
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010040#include "aops.h"
David Teiglandb3b94fa2006-01-16 16:50:04 +000041
Steven Whitehouseba7f7292006-07-26 11:27:10 -040042
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +010043void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
44 unsigned int from, unsigned int len)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040045{
46 struct buffer_head *head = page_buffers(page);
47 unsigned int bsize = head->b_size;
48 struct buffer_head *bh;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010049 unsigned int to = from + len;
Steven Whitehouseba7f7292006-07-26 11:27:10 -040050 unsigned int start, end;
51
52 for (bh = head, start = 0; bh != head || !start;
53 bh = bh->b_this_page, start = end) {
54 end = start + bsize;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010055 if (end <= from)
Steven Whitehouseba7f7292006-07-26 11:27:10 -040056 continue;
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +010057 if (start >= to)
58 break;
Andreas Gruenbacher845802b2018-06-04 07:50:16 -050059 set_buffer_uptodate(bh);
Steven Whitehouse350a9b02012-12-14 12:36:02 +000060 gfs2_trans_add_data(ip->i_gl, bh);
Steven Whitehouseba7f7292006-07-26 11:27:10 -040061 }
62}
63
David Teiglandb3b94fa2006-01-16 16:50:04 +000064/**
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040065 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
David Teiglandb3b94fa2006-01-16 16:50:04 +000066 * @inode: The inode
67 * @lblock: The block number to look up
68 * @bh_result: The buffer head to return the result in
69 * @create: Non-zero if we may add block to the file
70 *
71 * Returns: errno
72 */
73
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040074static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
75 struct buffer_head *bh_result, int create)
David Teiglandb3b94fa2006-01-16 16:50:04 +000076{
David Teiglandb3b94fa2006-01-16 16:50:04 +000077 int error;
78
Bob Petersone9e1ef22007-12-10 14:13:27 -060079 error = gfs2_block_map(inode, lblock, bh_result, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +000080 if (error)
81 return error;
Wendy Chengde986e82007-09-18 09:19:13 -040082 if (!buffer_mapped(bh_result))
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040083 return -EIO;
Steven Whitehouse623d9352006-08-31 12:14:44 -040084 return 0;
85}
Steven Whitehouse7a6bbac2006-09-18 17:18:23 -040086
David Teiglandb3b94fa2006-01-16 16:50:04 +000087/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010088 * gfs2_writepage_common - Common bits of writepage
89 * @page: The page to be written
90 * @wbc: The writeback control
David Teiglandb3b94fa2006-01-16 16:50:04 +000091 *
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010092 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
David Teiglandb3b94fa2006-01-16 16:50:04 +000093 */
94
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +010095static int gfs2_writepage_common(struct page *page,
96 struct writeback_control *wbc)
David Teiglandb3b94fa2006-01-16 16:50:04 +000097{
Steven Whitehouse18ec7d52006-02-08 11:50:51 +000098 struct inode *inode = page->mapping->host;
Steven Whitehousef4387142006-08-08 13:23:19 -040099 struct gfs2_inode *ip = GFS2_I(inode);
100 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000101 loff_t i_size = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102 pgoff_t end_index = i_size >> PAGE_SHIFT;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000103 unsigned offset;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100104
105 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
106 goto out;
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100107 if (current->journal_info)
108 goto redirty;
109 /* Is the page fully outside i_size? (truncate in progress) */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300110 offset = i_size & (PAGE_SIZE-1);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100111 if (page->index > end_index || (page->index == end_index && !offset)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300112 page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100113 goto out;
114 }
115 return 1;
116redirty:
117 redirty_page_for_writepage(wbc, page);
118out:
119 unlock_page(page);
120 return 0;
121}
122
123/**
Steven Whitehouse9d358142013-08-27 21:22:07 +0100124 * gfs2_writepage - Write page for writeback mappings
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100125 * @page: The page
126 * @wbc: The writeback control
127 *
128 */
129
Steven Whitehouse9d358142013-08-27 21:22:07 +0100130static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100131{
132 int ret;
133
134 ret = gfs2_writepage_common(page, wbc);
135 if (ret <= 0)
136 return ret;
137
Steven Whitehouse30116ff2010-06-14 09:58:41 +0100138 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100139}
140
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500141/* This is the same as calling block_write_full_page, but it also
142 * writes pages outside of i_size
143 */
Andrew Pricec548a1c2017-02-03 08:23:47 -0500144static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
145 struct writeback_control *wbc)
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500146{
147 struct inode * const inode = page->mapping->host;
148 loff_t i_size = i_size_read(inode);
149 const pgoff_t end_index = i_size >> PAGE_SHIFT;
150 unsigned offset;
151
152 /*
153 * The page straddles i_size. It must be zeroed out on each and every
154 * writepage invocation because it may be mmapped. "A file is mapped
155 * in multiples of the page size. For a file that is not a multiple of
156 * the page size, the remaining memory is zeroed when mapped, and
157 * writes to that region are not written out to the file."
158 */
159 offset = i_size & (PAGE_SIZE-1);
160 if (page->index == end_index && offset)
161 zero_user_segment(page, offset, PAGE_SIZE);
162
163 return __block_write_full_page(inode, page, get_block, wbc,
164 end_buffer_async_write);
165}
166
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100167/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100168 * __gfs2_jdata_writepage - The core of jdata writepage
169 * @page: The page to write
170 * @wbc: The writeback control
171 *
172 * This is shared between writepage and writepages and implements the
173 * core of the writepage operation. If a transaction is required then
174 * PageChecked will have been set and the transaction will have
175 * already been started before this is called.
176 */
177
178static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
179{
180 struct inode *inode = page->mapping->host;
181 struct gfs2_inode *ip = GFS2_I(inode);
182 struct gfs2_sbd *sdp = GFS2_SB(inode);
183
184 if (PageChecked(page)) {
185 ClearPageChecked(page);
186 if (!page_has_buffers(page)) {
187 create_empty_buffers(page, inode->i_sb->s_blocksize,
Fabian Frederick47a9a522016-08-02 12:05:27 -0500188 BIT(BH_Dirty)|BIT(BH_Uptodate));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100189 }
Andreas Gruenbacher88b65ce2017-11-06 19:58:36 +0100190 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100191 }
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500192 return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100193}
194
195/**
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100196 * gfs2_jdata_writepage - Write complete page
197 * @page: Page to write
Fabian Frederick12725742015-05-05 13:29:54 -0500198 * @wbc: The writeback control
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100199 *
200 * Returns: errno
201 *
202 */
203
204static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
205{
206 struct inode *inode = page->mapping->host;
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500207 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100208 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100209 int ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000210
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500211 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
212 goto out;
213 if (PageChecked(page) || current->journal_info)
214 goto out_ignore;
215 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse1bb73222008-10-15 09:46:39 +0100216 return ret;
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000217
218out_ignore:
219 redirty_page_for_writepage(wbc, page);
Benjamin Marzinskifd4c5742016-06-27 10:01:06 -0500220out:
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000221 unlock_page(page);
222 return 0;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000223}
224
225/**
Steven Whitehouse45138992013-01-28 09:30:07 +0000226 * gfs2_writepages - Write a bunch of dirty pages back to disk
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000227 * @mapping: The mapping to write
228 * @wbc: Write-back control
229 *
Steven Whitehouse45138992013-01-28 09:30:07 +0000230 * Used for both ordered and writeback modes.
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000231 */
Steven Whitehouse45138992013-01-28 09:30:07 +0000232static int gfs2_writepages(struct address_space *mapping,
233 struct writeback_control *wbc)
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000234{
Abhi Dasb066a4eeb2017-08-04 12:15:32 -0500235 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
236 int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
237
238 /*
239 * Even if we didn't write any pages here, we might still be holding
240 * dirty pages in the ail. We forcibly flush the ail because we don't
241 * want balance_dirty_pages() to loop indefinitely trying to write out
242 * pages held in the ail that it can't find.
243 */
244 if (ret == 0)
245 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
246
247 return ret;
Steven Whitehousea8d638e2007-01-15 13:52:17 +0000248}
249
250/**
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100251 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
252 * @mapping: The mapping
253 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100254 * @pvec: The vector of pages
255 * @nr_pages: The number of pages to write
Fabian Frederick12725742015-05-05 13:29:54 -0500256 * @done_index: Page index
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100257 *
258 * Returns: non-zero if loop should terminate, zero otherwise
259 */
260
261static int gfs2_write_jdata_pagevec(struct address_space *mapping,
262 struct writeback_control *wbc,
263 struct pagevec *pvec,
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600264 int nr_pages,
Steven Whitehouse774016b2014-02-06 15:47:47 +0000265 pgoff_t *done_index)
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100266{
267 struct inode *inode = mapping->host;
268 struct gfs2_sbd *sdp = GFS2_SB(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300269 unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100270 int i;
271 int ret;
272
Abhijith Das20b95bf2008-03-06 17:43:52 -0600273 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100274 if (ret < 0)
275 return ret;
276
277 for(i = 0; i < nr_pages; i++) {
278 struct page *page = pvec->pages[i];
279
Steven Whitehouse774016b2014-02-06 15:47:47 +0000280 *done_index = page->index;
281
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100282 lock_page(page);
283
284 if (unlikely(page->mapping != mapping)) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000285continue_unlock:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100286 unlock_page(page);
287 continue;
288 }
289
Steven Whitehouse774016b2014-02-06 15:47:47 +0000290 if (!PageDirty(page)) {
291 /* someone wrote it for us */
292 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100293 }
294
Steven Whitehouse774016b2014-02-06 15:47:47 +0000295 if (PageWriteback(page)) {
296 if (wbc->sync_mode != WB_SYNC_NONE)
297 wait_on_page_writeback(page);
298 else
299 goto continue_unlock;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100300 }
301
Steven Whitehouse774016b2014-02-06 15:47:47 +0000302 BUG_ON(PageWriteback(page));
303 if (!clear_page_dirty_for_io(page))
304 goto continue_unlock;
305
Christoph Hellwigde1414a2015-01-14 10:42:36 +0100306 trace_wbc_writepage(wbc, inode_to_bdi(inode));
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100307
308 ret = __gfs2_jdata_writepage(page, wbc);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000309 if (unlikely(ret)) {
310 if (ret == AOP_WRITEPAGE_ACTIVATE) {
311 unlock_page(page);
312 ret = 0;
313 } else {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100314
Steven Whitehouse774016b2014-02-06 15:47:47 +0000315 /*
316 * done_index is set past this page,
317 * so media errors will not choke
318 * background writeout for the entire
319 * file. This has consequences for
320 * range_cyclic semantics (ie. it may
321 * not be suitable for data integrity
322 * writeout).
323 */
324 *done_index = page->index + 1;
325 ret = 1;
326 break;
327 }
328 }
329
330 /*
331 * We stop writing back only if we are not doing
332 * integrity sync. In case of integrity sync we have to
333 * keep going until we have written all the pages
334 * we tagged for writeback prior to entering this loop.
335 */
336 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100337 ret = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000338 break;
339 }
340
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100341 }
342 gfs2_trans_end(sdp);
343 return ret;
344}
345
346/**
347 * gfs2_write_cache_jdata - Like write_cache_pages but different
348 * @mapping: The mapping to write
349 * @wbc: The writeback control
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100350 *
351 * The reason that we use our own function here is that we need to
352 * start transactions before we grab page locks. This allows us
353 * to get the ordering right.
354 */
355
356static int gfs2_write_cache_jdata(struct address_space *mapping,
357 struct writeback_control *wbc)
358{
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100359 int ret = 0;
360 int done = 0;
361 struct pagevec pvec;
362 int nr_pages;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000363 pgoff_t uninitialized_var(writeback_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100364 pgoff_t index;
365 pgoff_t end;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000366 pgoff_t done_index;
367 int cycled;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100368 int range_whole = 0;
Matthew Wilcox10bbd232017-12-05 17:30:38 -0500369 xa_mark_t tag;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100370
Mel Gorman86679822017-11-15 17:37:52 -0800371 pagevec_init(&pvec);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100372 if (wbc->range_cyclic) {
Steven Whitehouse774016b2014-02-06 15:47:47 +0000373 writeback_index = mapping->writeback_index; /* prev offset */
374 index = writeback_index;
375 if (index == 0)
376 cycled = 1;
377 else
378 cycled = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100379 end = -1;
380 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300381 index = wbc->range_start >> PAGE_SHIFT;
382 end = wbc->range_end >> PAGE_SHIFT;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100383 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
384 range_whole = 1;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000385 cycled = 1; /* ignore range_cyclic tests */
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100386 }
Steven Whitehouse774016b2014-02-06 15:47:47 +0000387 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
388 tag = PAGECACHE_TAG_TOWRITE;
389 else
390 tag = PAGECACHE_TAG_DIRTY;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100391
392retry:
Steven Whitehouse774016b2014-02-06 15:47:47 +0000393 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
394 tag_pages_for_writeback(mapping, index, end);
395 done_index = index;
396 while (!done && (index <= end)) {
Jan Karad2bc5b32017-11-15 17:34:58 -0800397 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
Jan Kara67fd7072017-11-15 17:35:19 -0800398 tag);
Steven Whitehouse774016b2014-02-06 15:47:47 +0000399 if (nr_pages == 0)
400 break;
401
Andreas Gruenbacher9aa01592017-11-27 10:54:55 -0600402 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100403 if (ret)
404 done = 1;
405 if (ret > 0)
406 ret = 0;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100407 pagevec_release(&pvec);
408 cond_resched();
409 }
410
Steven Whitehouse774016b2014-02-06 15:47:47 +0000411 if (!cycled && !done) {
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100412 /*
Steven Whitehouse774016b2014-02-06 15:47:47 +0000413 * range_cyclic:
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100414 * We hit the last page and there is more work to be done: wrap
415 * back to the start of the file
416 */
Steven Whitehouse774016b2014-02-06 15:47:47 +0000417 cycled = 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100418 index = 0;
Steven Whitehouse774016b2014-02-06 15:47:47 +0000419 end = writeback_index - 1;
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100420 goto retry;
421 }
422
423 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
Steven Whitehouse774016b2014-02-06 15:47:47 +0000424 mapping->writeback_index = done_index;
425
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100426 return ret;
427}
428
429
430/**
431 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
432 * @mapping: The mapping to write
433 * @wbc: The writeback control
434 *
435 */
436
437static int gfs2_jdata_writepages(struct address_space *mapping,
438 struct writeback_control *wbc)
439{
440 struct gfs2_inode *ip = GFS2_I(mapping->host);
441 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
442 int ret;
443
444 ret = gfs2_write_cache_jdata(mapping, wbc);
445 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
Bob Peterson805c09072018-01-08 10:34:17 -0500446 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
447 GFS2_LFC_JDATA_WPAGES);
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100448 ret = gfs2_write_cache_jdata(mapping, wbc);
449 }
450 return ret;
451}
452
453/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000454 * stuffed_readpage - Fill in a Linux page with stuffed file data
455 * @ip: the inode
456 * @page: the page
457 *
458 * Returns: errno
459 */
460
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100461int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000462{
463 struct buffer_head *dibh;
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000464 u64 dsize = i_size_read(&ip->i_inode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000465 void *kaddr;
466 int error;
467
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100468 /*
Nick Piggin3c18ddd2008-04-28 02:12:10 -0700469 * Due to the order of unstuffing files and ->fault(), we can be
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100470 * asked for a zero page in the case of a stuffed file being extended,
471 * so we need to supply one here. It doesn't happen often.
472 */
473 if (unlikely(page->index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300474 zero_user(page, 0, PAGE_SIZE);
Abhijith Das0a7ab792009-01-07 16:03:37 -0600475 SetPageUptodate(page);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100476 return 0;
477 }
Steven Whitehousefd88de562006-05-05 16:59:11 -0400478
David Teiglandb3b94fa2006-01-16 16:50:04 +0000479 error = gfs2_meta_inode_buffer(ip, &dibh);
480 if (error)
481 return error;
482
Cong Wangd9349282011-11-25 23:14:30 +0800483 kaddr = kmap_atomic(page);
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100484 if (dsize > gfs2_max_stuffed_size(ip))
485 dsize = gfs2_max_stuffed_size(ip);
Steven Whitehouse602c89d2010-03-25 14:32:43 +0000486 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300487 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
Cong Wangd9349282011-11-25 23:14:30 +0800488 kunmap_atomic(kaddr);
Steven Whitehousebf126ae2007-04-20 09:18:30 +0100489 flush_dcache_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000490 brelse(dibh);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000491 SetPageUptodate(page);
492
493 return 0;
494}
495
David Teiglandb3b94fa2006-01-16 16:50:04 +0000496
497/**
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100498 * __gfs2_readpage - readpage
499 * @file: The file to read a page for
David Teiglandb3b94fa2006-01-16 16:50:04 +0000500 * @page: The page to read
501 *
Andreas Gruenbacher9db115a2017-11-18 18:46:05 +0100502 * This is the core of gfs2's readpage. It's used by the internal file
503 * reading code as in that case we already hold the glock. Also it's
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100504 * called by gfs2_readpage() once the required lock has been granted.
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100505 */
506
507static int __gfs2_readpage(void *file, struct page *page)
508{
509 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
510 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100511
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100512 int error;
513
Andreas Gruenbacherf95cbb42018-06-06 20:30:38 +0100514 if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
515 !page_has_buffers(page)) {
516 error = iomap_readpage(page, &gfs2_iomap_ops);
517 } else if (gfs2_is_stuffed(ip)) {
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100518 error = stuffed_readpage(ip, page);
519 unlock_page(page);
520 } else {
Bob Petersone9e1ef22007-12-10 14:13:27 -0600521 error = mpage_readpage(page, gfs2_block_map);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100522 }
523
524 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
525 return -EIO;
526
527 return error;
528}
529
530/**
531 * gfs2_readpage - read a page of a file
532 * @file: The file to read
533 * @page: The page of the file
534 *
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100535 * This deals with the locking required. We have to unlock and
536 * relock the page in order to get the locking in the right
537 * order.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000538 */
539
540static int gfs2_readpage(struct file *file, struct page *page)
541{
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100542 struct address_space *mapping = page->mapping;
543 struct gfs2_inode *ip = GFS2_I(mapping->host);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100544 struct gfs2_holder gh;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000545 int error;
546
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100547 unlock_page(page);
Steven Whitehouse719ee342008-09-18 13:53:59 +0100548 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
549 error = gfs2_glock_nq(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100550 if (unlikely(error))
Steven Whitehouse6802e342008-05-21 17:03:22 +0100551 goto out;
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100552 error = AOP_TRUNCATED_PAGE;
553 lock_page(page);
554 if (page->mapping == mapping && !PageUptodate(page))
555 error = __gfs2_readpage(file, page);
556 else
557 unlock_page(page);
Steven Whitehouse6802e342008-05-21 17:03:22 +0100558 gfs2_glock_dq(&gh);
Steven Whitehouse7afd88d2008-02-22 16:07:18 +0000559out:
Steven Whitehouse6802e342008-05-21 17:03:22 +0100560 gfs2_holder_uninit(&gh);
Steven Whitehouse01b7c7a2008-06-02 09:14:54 +0100561 if (error && error != AOP_TRUNCATED_PAGE)
562 lock_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100563 return error;
564}
565
566/**
567 * gfs2_internal_read - read an internal file
568 * @ip: The gfs2 inode
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100569 * @buf: The buffer to fill
570 * @pos: The file position
571 * @size: The amount to read
572 *
573 */
574
Andrew Price43066292012-04-16 16:40:55 +0100575int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
576 unsigned size)
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100577{
578 struct address_space *mapping = ip->i_inode.i_mapping;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300579 unsigned long index = *pos / PAGE_SIZE;
580 unsigned offset = *pos & (PAGE_SIZE - 1);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100581 unsigned copied = 0;
582 unsigned amt;
583 struct page *page;
584 void *p;
585
586 do {
587 amt = size - copied;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300588 if (offset + size > PAGE_SIZE)
589 amt = PAGE_SIZE - offset;
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100590 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
591 if (IS_ERR(page))
592 return PTR_ERR(page);
Cong Wangd9349282011-11-25 23:14:30 +0800593 p = kmap_atomic(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100594 memcpy(buf + copied, p + offset, amt);
Cong Wangd9349282011-11-25 23:14:30 +0800595 kunmap_atomic(p);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300596 put_page(page);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100597 copied += amt;
598 index++;
599 offset = 0;
600 } while(copied < size);
601 (*pos) += size;
602 return size;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400603}
604
Steven Whitehousefd88de562006-05-05 16:59:11 -0400605/**
606 * gfs2_readpages - Read a bunch of pages at once
Fabian Frederick12725742015-05-05 13:29:54 -0500607 * @file: The file to read from
608 * @mapping: Address space info
609 * @pages: List of pages to read
610 * @nr_pages: Number of pages to read
Steven Whitehousefd88de562006-05-05 16:59:11 -0400611 *
612 * Some notes:
613 * 1. This is only for readahead, so we can simply ignore any things
614 * which are slightly inconvenient (such as locking conflicts between
615 * the page lock and the glock) and return having done no I/O. Its
616 * obviously not something we'd want to do on too regular a basis.
617 * Any I/O we ignore at this time will be done via readpage later.
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500618 * 2. We don't handle stuffed files here we let readpage do the honours.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400619 * 3. mpage_readpages() does most of the heavy lifting in the common case.
Bob Petersone9e1ef22007-12-10 14:13:27 -0600620 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
Steven Whitehousefd88de562006-05-05 16:59:11 -0400621 */
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100622
Steven Whitehousefd88de562006-05-05 16:59:11 -0400623static int gfs2_readpages(struct file *file, struct address_space *mapping,
624 struct list_head *pages, unsigned nr_pages)
625{
626 struct inode *inode = mapping->host;
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400627 struct gfs2_inode *ip = GFS2_I(inode);
628 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400629 struct gfs2_holder gh;
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100630 int ret;
Steven Whitehousefd88de562006-05-05 16:59:11 -0400631
Steven Whitehouse719ee342008-09-18 13:53:59 +0100632 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
633 ret = gfs2_glock_nq(&gh);
Steven Whitehouse51ff87b2007-10-15 14:42:35 +0100634 if (unlikely(ret))
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100635 goto out_uninit;
Steven Whitehousee1d5b182006-12-15 16:49:51 -0500636 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600637 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
Steven Whitehouse3cc3f712007-10-15 15:40:33 +0100638 gfs2_glock_dq(&gh);
639out_uninit:
640 gfs2_holder_uninit(&gh);
Steven Whitehousefd88de562006-05-05 16:59:11 -0400641 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
642 ret = -EIO;
643 return ret;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000644}
645
646/**
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500647 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
648 * @inode: the rindex inode
649 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100650void adjust_fs_space(struct inode *inode)
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500651{
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100652 struct gfs2_sbd *sdp = GFS2_SB(inode);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500653 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
654 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500655 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
656 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500657 struct buffer_head *m_bh, *l_bh;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500658 u64 fs_total, new_free;
659
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100660 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
661 return;
662
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500663 /* Total up the file system space, according to the latest rindex. */
664 fs_total = gfs2_ri_total(sdp);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500665 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100666 goto out;
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500667
668 spin_lock(&sdp->sd_statfs_spin);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500669 gfs2_statfs_change_in(m_sc, m_bh->b_data +
670 sizeof(struct gfs2_dinode));
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500671 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
672 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
673 else
674 new_free = 0;
675 spin_unlock(&sdp->sd_statfs_spin);
Robert Peterson6c532672007-05-10 16:54:38 -0500676 fs_warn(sdp, "File system extended by %llu blocks.\n",
677 (unsigned long long)new_free);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500678 gfs2_statfs_change(sdp, new_free, new_free, 0);
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500679
680 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100681 goto out2;
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500682 update_statfs(sdp, m_bh, l_bh);
683 brelse(l_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100684out2:
Benjamin Marzinski1946f702009-06-25 15:09:51 -0500685 brelse(m_bh);
Andreas Gruenbacherd0a22a42019-04-29 20:50:30 +0100686out:
687 sdp->sd_rindex_uptodate = 0;
688 gfs2_trans_end(sdp);
Robert Peterson7ae8fa82007-05-09 09:37:57 -0500689}
690
691/**
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700692 * gfs2_stuffed_write_end - Write end for stuffed files
693 * @inode: The inode
694 * @dibh: The buffer_head containing the on-disk inode
695 * @pos: The file position
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700696 * @copied: How much was actually copied by the VFS
697 * @page: The page
698 *
699 * This copies the data from the page into the inode block after
700 * the inode data structure itself.
David Teiglandb3b94fa2006-01-16 16:50:04 +0000701 *
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100702 * Returns: copied bytes or errno
David Teiglandb3b94fa2006-01-16 16:50:04 +0000703 */
Andreas Gruenbacher64bc06b2018-06-24 15:04:04 +0100704int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
705 loff_t pos, unsigned copied,
706 struct page *page)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000707{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400708 struct gfs2_inode *ip = GFS2_I(inode);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700709 u64 to = pos + copied;
710 void *kaddr;
711 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000712
Andreas Gruenbacherd6382a32018-06-04 07:45:53 -0500713 BUG_ON(pos + copied > gfs2_max_stuffed_size(ip));
Andreas Gruenbacher235628c2017-11-14 16:53:12 +0100714
Cong Wangd9349282011-11-25 23:14:30 +0800715 kaddr = kmap_atomic(page);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700716 memcpy(buf + pos, kaddr + pos, copied);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700717 flush_dcache_page(page);
Cong Wangd9349282011-11-25 23:14:30 +0800718 kunmap_atomic(kaddr);
Steven Whitehouse18ec7d52006-02-08 11:50:51 +0000719
Al Viro43388b22016-09-05 22:06:35 -0400720 WARN_ON(!PageUptodate(page));
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700721 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300722 put_page(page);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000723
Abhijith Das7537d81a2009-05-12 11:16:20 -0500724 if (copied) {
Steven Whitehousea2e0f792010-08-11 09:53:11 +0100725 if (inode->i_size < to)
Abhijith Das7537d81a2009-05-12 11:16:20 -0500726 i_size_write(inode, to);
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700727 mark_inode_dirty(inode);
Steven Whitehouse48516ce2006-10-02 12:39:19 -0400728 }
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700729 return copied;
730}
David Teiglandb3b94fa2006-01-16 16:50:04 +0000731
Steven Whitehouse7765ec22007-10-16 01:25:07 -0700732/**
Bob Petersonb9e03f12018-02-14 09:32:39 -0700733 * jdata_set_page_dirty - Page dirtying function
Robert Peterson8fb68592007-06-12 11:24:36 -0500734 * @page: The page to dirty
735 *
736 * Returns: 1 if it dirtyed the page, or 0 otherwise
737 */
738
Bob Petersonb9e03f12018-02-14 09:32:39 -0700739static int jdata_set_page_dirty(struct page *page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500740{
Steven Whitehouse55610932007-10-17 08:47:38 +0100741 SetPageChecked(page);
Robert Peterson8fb68592007-06-12 11:24:36 -0500742 return __set_page_dirty_buffers(page);
743}
744
745/**
David Teiglandb3b94fa2006-01-16 16:50:04 +0000746 * gfs2_bmap - Block map function
747 * @mapping: Address space info
748 * @lblock: The block to map
749 *
750 * Returns: The disk address for the block or 0 on hole or error
751 */
752
753static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
754{
Steven Whitehousefeaa7bb2006-06-14 15:32:57 -0400755 struct gfs2_inode *ip = GFS2_I(mapping->host);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000756 struct gfs2_holder i_gh;
757 sector_t dblock = 0;
758 int error;
759
David Teiglandb3b94fa2006-01-16 16:50:04 +0000760 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
761 if (error)
762 return 0;
763
764 if (!gfs2_is_stuffed(ip))
Bob Petersone9e1ef22007-12-10 14:13:27 -0600765 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000766
767 gfs2_glock_dq_uninit(&i_gh);
768
769 return dblock;
770}
771
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100772static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
773{
774 struct gfs2_bufdata *bd;
775
776 lock_buffer(bh);
777 gfs2_log_lock(sdp);
778 clear_buffer_dirty(bh);
779 bd = bh->b_private;
780 if (bd) {
Bob Petersonc0752aa2012-05-01 12:00:34 -0400781 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
782 list_del_init(&bd->bd_list);
Steven Whitehouse16615be2007-09-17 10:59:52 +0100783 else
Bob Peterson68cd4ce2016-05-02 11:53:35 -0500784 gfs2_remove_from_journal(bh, REMOVE_JDATA);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100785 }
786 bh->b_bdev = NULL;
787 clear_buffer_mapped(bh);
788 clear_buffer_req(bh);
789 clear_buffer_new(bh);
790 gfs2_log_unlock(sdp);
791 unlock_buffer(bh);
792}
793
Lukas Czernerd47992f2013-05-21 23:17:23 -0400794static void gfs2_invalidatepage(struct page *page, unsigned int offset,
795 unsigned int length)
David Teiglandb3b94fa2006-01-16 16:50:04 +0000796{
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100797 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400798 unsigned int stop = offset + length;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300799 int partial_page = (offset || length < PAGE_SIZE);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100800 struct buffer_head *bh, *head;
801 unsigned long pos = 0;
802
David Teiglandb3b94fa2006-01-16 16:50:04 +0000803 BUG_ON(!PageLocked(page));
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400804 if (!partial_page)
Robert Peterson8fb68592007-06-12 11:24:36 -0500805 ClearPageChecked(page);
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100806 if (!page_has_buffers(page))
807 goto out;
David Teiglandb3b94fa2006-01-16 16:50:04 +0000808
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100809 bh = head = page_buffers(page);
810 do {
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400811 if (pos + bh->b_size > stop)
812 return;
813
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100814 if (offset <= pos)
815 gfs2_discard(sdp, bh);
816 pos += bh->b_size;
817 bh = bh->b_this_page;
818 } while (bh != head);
819out:
Lukas Czerner5c0bb972013-05-21 23:58:49 -0400820 if (!partial_page)
Steven Whitehoused7b616e2007-09-02 10:48:13 +0100821 try_to_release_page(page, 0);
David Teiglandb3b94fa2006-01-16 16:50:04 +0000822}
823
Steven Whitehousec7b33832006-12-14 18:24:26 +0000824/**
Steven Whitehouse623d9352006-08-31 12:14:44 -0400825 * gfs2_releasepage - free the metadata associated with a page
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400826 * @page: the page that's being released
827 * @gfp_mask: passed from Linux VFS, ignored by us
828 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000829 * Calls try_to_free_buffers() to free the buffers and put the page if the
830 * buffers can be released.
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400831 *
Andreas Gruenbacher0ebbe4f2018-11-06 10:31:33 +0000832 * Returns: 1 if the page was put or else 0
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400833 */
834
835int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
836{
Steven Whitehouse009d8512009-12-08 12:12:13 +0000837 struct address_space *mapping = page->mapping;
838 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400839 struct buffer_head *bh, *head;
840 struct gfs2_bufdata *bd;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400841
842 if (!page_has_buffers(page))
Steven Whitehouse891ba6d2007-09-20 15:26:33 +0100843 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400844
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500845 /*
846 * From xfs_vm_releasepage: mm accommodates an old ext3 case where
847 * clean pages might not have had the dirty bit cleared. Thus, it can
848 * send actual dirty pages to ->releasepage() via shrink_active_list().
849 *
850 * As a workaround, we skip pages that contain dirty buffers below.
851 * Once ->releasepage isn't called on dirty pages anymore, we can warn
852 * on dirty buffers like we used to here again.
853 */
854
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100855 gfs2_log_lock(sdp);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100856 spin_lock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400857 head = bh = page_buffers(page);
858 do {
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100859 if (atomic_read(&bh->b_count))
860 goto cannot_release;
861 bd = bh->b_private;
Benjamin Marzinski16ca9412013-04-05 20:31:46 -0500862 if (bd && bd->bd_tr)
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100863 goto cannot_release;
Andreas Gruenbacher1c185c02016-08-18 08:57:04 -0500864 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
865 goto cannot_release;
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100866 bh = bh->b_this_page;
867 } while(bh != head);
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100868 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400869
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100870 head = bh = page_buffers(page);
871 do {
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400872 bd = bh->b_private;
873 if (bd) {
874 gfs2_assert_warn(sdp, bd->bd_bh == bh);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000875 if (!list_empty(&bd->bd_list))
876 list_del_init(&bd->bd_list);
877 bd->bd_bh = NULL;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400878 bh->b_private = NULL;
Steven Whitehouse623d9352006-08-31 12:14:44 -0400879 kmem_cache_free(gfs2_bufdata_cachep, bd);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000880 }
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400881
882 bh = bh->b_this_page;
Steven Whitehouse166afcc2006-08-24 15:59:40 -0400883 } while (bh != head);
Steven Whitehousee4f29202013-11-26 13:21:08 +0000884 gfs2_log_unlock(sdp);
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400885
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400886 return try_to_free_buffers(page);
Steven Whitehouse8f065d32011-05-03 11:49:19 +0100887
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100888cannot_release:
Steven Whitehouse380f7c62011-07-14 08:59:44 +0100889 spin_unlock(&sdp->sd_ail_lock);
Steven Whitehousebb3b0e32007-08-16 16:03:57 +0100890 gfs2_log_unlock(sdp);
891 return 0;
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400892}
893
Steven Whitehouse55610932007-10-17 08:47:38 +0100894static const struct address_space_operations gfs2_writeback_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +0100895 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +0000896 .writepages = gfs2_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100897 .readpage = gfs2_readpage,
898 .readpages = gfs2_readpages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100899 .bmap = gfs2_bmap,
900 .invalidatepage = gfs2_invalidatepage,
901 .releasepage = gfs2_releasepage,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100902 .direct_IO = noop_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +0000903 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900904 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200905 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100906};
907
908static const struct address_space_operations gfs2_ordered_aops = {
Steven Whitehouse9d358142013-08-27 21:22:07 +0100909 .writepage = gfs2_writepage,
Steven Whitehouse45138992013-01-28 09:30:07 +0000910 .writepages = gfs2_writepages,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000911 .readpage = gfs2_readpage,
Steven Whitehousefd88de562006-05-05 16:59:11 -0400912 .readpages = gfs2_readpages,
Bob Petersonb9e03f12018-02-14 09:32:39 -0700913 .set_page_dirty = __set_page_dirty_buffers,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000914 .bmap = gfs2_bmap,
915 .invalidatepage = gfs2_invalidatepage,
Steven Whitehouse4340fe62006-07-11 09:46:33 -0400916 .releasepage = gfs2_releasepage,
Andreas Gruenbacher967bcc92018-06-19 15:08:02 +0100917 .direct_IO = noop_direct_IO,
Steven Whitehousee5d9dc22008-01-03 11:31:38 +0000918 .migratepage = buffer_migrate_page,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900919 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200920 .error_remove_page = generic_error_remove_page,
David Teiglandb3b94fa2006-01-16 16:50:04 +0000921};
922
Steven Whitehouse55610932007-10-17 08:47:38 +0100923static const struct address_space_operations gfs2_jdata_aops = {
Steven Whitehouse9ff8ec32007-09-28 13:49:05 +0100924 .writepage = gfs2_jdata_writepage,
Steven Whitehouseb8e7cbb2007-10-17 09:04:24 +0100925 .writepages = gfs2_jdata_writepages,
Steven Whitehouse55610932007-10-17 08:47:38 +0100926 .readpage = gfs2_readpage,
927 .readpages = gfs2_readpages,
Bob Petersonb9e03f12018-02-14 09:32:39 -0700928 .set_page_dirty = jdata_set_page_dirty,
Steven Whitehouse55610932007-10-17 08:47:38 +0100929 .bmap = gfs2_bmap,
930 .invalidatepage = gfs2_invalidatepage,
931 .releasepage = gfs2_releasepage,
Hisashi Hifumi229615d2009-03-03 11:45:20 +0900932 .is_partially_uptodate = block_is_partially_uptodate,
Andi Kleenaa261f52009-09-16 11:50:16 +0200933 .error_remove_page = generic_error_remove_page,
Steven Whitehouse55610932007-10-17 08:47:38 +0100934};
935
936void gfs2_set_aops(struct inode *inode)
937{
938 struct gfs2_inode *ip = GFS2_I(inode);
Andreas Gruenbacher977767a2018-10-12 20:07:27 +0200939 struct gfs2_sbd *sdp = GFS2_SB(inode);
Steven Whitehouse55610932007-10-17 08:47:38 +0100940
Andreas Gruenbacher977767a2018-10-12 20:07:27 +0200941 if (gfs2_is_jdata(ip))
Steven Whitehouse55610932007-10-17 08:47:38 +0100942 inode->i_mapping->a_ops = &gfs2_jdata_aops;
Andreas Gruenbacher977767a2018-10-12 20:07:27 +0200943 else if (gfs2_is_writeback(sdp))
944 inode->i_mapping->a_ops = &gfs2_writeback_aops;
945 else if (gfs2_is_ordered(sdp))
946 inode->i_mapping->a_ops = &gfs2_ordered_aops;
Steven Whitehouse55610932007-10-17 08:47:38 +0100947 else
948 BUG();
949}