blob: 70a563c14e6f3393d7b963aff6d6b779a225739a [file] [log] [blame]
David Howells31143d52007-05-09 02:33:46 -07001/* handling of writes to regular files and writing back to the server
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
David Howells4343d002017-11-02 15:27:52 +000011
Alexey Dobriyan4af3c9c2007-10-16 23:29:23 -070012#include <linux/backing-dev.h>
David Howells31143d52007-05-09 02:33:46 -070013#include <linux/slab.h>
14#include <linux/fs.h>
15#include <linux/pagemap.h>
16#include <linux/writeback.h>
17#include <linux/pagevec.h>
18#include "internal.h"
19
David Howells31143d52007-05-09 02:33:46 -070020/*
21 * mark a page as having been made dirty and thus needing writeback
22 */
23int afs_set_page_dirty(struct page *page)
24{
25 _enter("");
26 return __set_page_dirty_nobuffers(page);
27}
28
29/*
David Howells31143d52007-05-09 02:33:46 -070030 * partly or wholly fill a page that's under preparation for writing
31 */
32static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
David Howellse8e581a2017-03-16 16:27:44 +000033 loff_t pos, unsigned int len, struct page *page)
David Howells31143d52007-05-09 02:33:46 -070034{
David Howells196ee9c2017-01-05 10:38:34 +000035 struct afs_read *req;
David Howells31143d52007-05-09 02:33:46 -070036 int ret;
37
Anton Blanchard5e7f2332011-06-13 22:31:12 +010038 _enter(",,%llu", (unsigned long long)pos);
David Howells31143d52007-05-09 02:33:46 -070039
David Howells196ee9c2017-01-05 10:38:34 +000040 req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
41 GFP_KERNEL);
42 if (!req)
43 return -ENOMEM;
44
David Howellsf3ddee82018-04-06 14:17:25 +010045 refcount_set(&req->usage, 1);
David Howells196ee9c2017-01-05 10:38:34 +000046 req->pos = pos;
David Howellse8e581a2017-03-16 16:27:44 +000047 req->len = len;
David Howells196ee9c2017-01-05 10:38:34 +000048 req->nr_pages = 1;
David Howellsf3ddee82018-04-06 14:17:25 +010049 req->pages = req->array;
David Howells196ee9c2017-01-05 10:38:34 +000050 req->pages[0] = page;
David Howells5611ef22017-03-16 16:27:43 +000051 get_page(page);
David Howells196ee9c2017-01-05 10:38:34 +000052
David Howellsd2ddc772017-11-02 15:27:50 +000053 ret = afs_fetch_data(vnode, key, req);
David Howells196ee9c2017-01-05 10:38:34 +000054 afs_put_read(req);
David Howells31143d52007-05-09 02:33:46 -070055 if (ret < 0) {
56 if (ret == -ENOENT) {
57 _debug("got NOENT from server"
58 " - marking file deleted and stale");
59 set_bit(AFS_VNODE_DELETED, &vnode->flags);
60 ret = -ESTALE;
61 }
62 }
63
64 _leave(" = %d", ret);
65 return ret;
66}
67
68/*
David Howells31143d52007-05-09 02:33:46 -070069 * prepare to perform part of a write to a page
David Howells31143d52007-05-09 02:33:46 -070070 */
Nick Piggin15b46502008-10-15 22:04:32 -070071int afs_write_begin(struct file *file, struct address_space *mapping,
72 loff_t pos, unsigned len, unsigned flags,
73 struct page **pagep, void **fsdata)
David Howells31143d52007-05-09 02:33:46 -070074{
Al Viro496ad9a2013-01-23 17:07:38 -050075 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
Nick Piggin15b46502008-10-15 22:04:32 -070076 struct page *page;
David Howells215804a2017-11-02 15:27:52 +000077 struct key *key = afs_file_key(file);
David Howells4343d002017-11-02 15:27:52 +000078 unsigned long priv;
79 unsigned f, from = pos & (PAGE_SIZE - 1);
80 unsigned t, to = from + len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030081 pgoff_t index = pos >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -070082 int ret;
83
84 _enter("{%x:%u},{%lx},%u,%u",
Nick Piggin15b46502008-10-15 22:04:32 -070085 vnode->fid.vid, vnode->fid.vnode, index, from, to);
David Howells31143d52007-05-09 02:33:46 -070086
David Howells4343d002017-11-02 15:27:52 +000087 /* We want to store information about how much of a page is altered in
88 * page->private.
89 */
90 BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
David Howells31143d52007-05-09 02:33:46 -070091
Nick Piggin54566b22009-01-04 12:00:53 -080092 page = grab_cache_page_write_begin(mapping, index, flags);
David Howells4343d002017-11-02 15:27:52 +000093 if (!page)
Nick Piggin15b46502008-10-15 22:04:32 -070094 return -ENOMEM;
Nick Piggin15b46502008-10-15 22:04:32 -070095
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030096 if (!PageUptodate(page) && len != PAGE_SIZE) {
David Howellse8e581a2017-03-16 16:27:44 +000097 ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
David Howells31143d52007-05-09 02:33:46 -070098 if (ret < 0) {
David Howells6d06b0d2017-03-16 16:27:48 +000099 unlock_page(page);
100 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700101 _leave(" = %d [prep]", ret);
102 return ret;
103 }
Nick Piggin15b46502008-10-15 22:04:32 -0700104 SetPageUptodate(page);
David Howells31143d52007-05-09 02:33:46 -0700105 }
106
David Howells6d06b0d2017-03-16 16:27:48 +0000107 /* page won't leak in error case: it eventually gets cleaned off LRU */
108 *pagep = page;
109
David Howells31143d52007-05-09 02:33:46 -0700110try_again:
David Howells4343d002017-11-02 15:27:52 +0000111 /* See if this page is already partially written in a way that we can
112 * merge the new write with.
113 */
114 t = f = 0;
115 if (PagePrivate(page)) {
116 priv = page_private(page);
117 f = priv & AFS_PRIV_MAX;
118 t = priv >> AFS_PRIV_SHIFT;
119 ASSERTCMP(f, <=, t);
David Howells31143d52007-05-09 02:33:46 -0700120 }
121
David Howells4343d002017-11-02 15:27:52 +0000122 if (f != t) {
David Howells5a039c32017-11-18 00:13:30 +0000123 if (PageWriteback(page)) {
124 trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
125 page->index, priv);
126 goto flush_conflicting_write;
127 }
David Howells4343d002017-11-02 15:27:52 +0000128 if (to < f || from > t)
129 goto flush_conflicting_write;
130 if (from < f)
131 f = from;
132 if (to > t)
133 t = to;
134 } else {
135 f = from;
136 t = to;
David Howells31143d52007-05-09 02:33:46 -0700137 }
138
David Howells4343d002017-11-02 15:27:52 +0000139 priv = (unsigned long)t << AFS_PRIV_SHIFT;
140 priv |= f;
David Howells13524ab2017-11-02 15:27:53 +0000141 trace_afs_page_dirty(vnode, tracepoint_string("begin"),
142 page->index, priv);
David Howells31143d52007-05-09 02:33:46 -0700143 SetPagePrivate(page);
David Howells4343d002017-11-02 15:27:52 +0000144 set_page_private(page, priv);
145 _leave(" = 0");
David Howells31143d52007-05-09 02:33:46 -0700146 return 0;
147
David Howells4343d002017-11-02 15:27:52 +0000148 /* The previous write and this write aren't adjacent or overlapping, so
149 * flush the page out.
150 */
151flush_conflicting_write:
David Howells31143d52007-05-09 02:33:46 -0700152 _debug("flush conflict");
David Howells4343d002017-11-02 15:27:52 +0000153 ret = write_one_page(page);
154 if (ret < 0) {
155 _leave(" = %d", ret);
156 return ret;
David Howells31143d52007-05-09 02:33:46 -0700157 }
158
David Howells4343d002017-11-02 15:27:52 +0000159 ret = lock_page_killable(page);
160 if (ret < 0) {
161 _leave(" = %d", ret);
162 return ret;
163 }
David Howells31143d52007-05-09 02:33:46 -0700164 goto try_again;
165}
166
167/*
168 * finalise part of a write to a page
169 */
Nick Piggin15b46502008-10-15 22:04:32 -0700170int afs_write_end(struct file *file, struct address_space *mapping,
171 loff_t pos, unsigned len, unsigned copied,
172 struct page *page, void *fsdata)
David Howells31143d52007-05-09 02:33:46 -0700173{
Al Viro496ad9a2013-01-23 17:07:38 -0500174 struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
David Howells215804a2017-11-02 15:27:52 +0000175 struct key *key = afs_file_key(file);
David Howells31143d52007-05-09 02:33:46 -0700176 loff_t i_size, maybe_i_size;
David Howellse8e581a2017-03-16 16:27:44 +0000177 int ret;
David Howells31143d52007-05-09 02:33:46 -0700178
Nick Piggin15b46502008-10-15 22:04:32 -0700179 _enter("{%x:%u},{%lx}",
180 vnode->fid.vid, vnode->fid.vnode, page->index);
David Howells31143d52007-05-09 02:33:46 -0700181
Nick Piggin15b46502008-10-15 22:04:32 -0700182 maybe_i_size = pos + copied;
David Howells31143d52007-05-09 02:33:46 -0700183
184 i_size = i_size_read(&vnode->vfs_inode);
185 if (maybe_i_size > i_size) {
David Howells4343d002017-11-02 15:27:52 +0000186 spin_lock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700187 i_size = i_size_read(&vnode->vfs_inode);
188 if (maybe_i_size > i_size)
189 i_size_write(&vnode->vfs_inode, maybe_i_size);
David Howells4343d002017-11-02 15:27:52 +0000190 spin_unlock(&vnode->wb_lock);
David Howells31143d52007-05-09 02:33:46 -0700191 }
192
David Howellse8e581a2017-03-16 16:27:44 +0000193 if (!PageUptodate(page)) {
194 if (copied < len) {
195 /* Try and load any missing data from the server. The
196 * unmarshalling routine will take care of clearing any
197 * bits that are beyond the EOF.
198 */
199 ret = afs_fill_page(vnode, key, pos + copied,
200 len - copied, page);
201 if (ret < 0)
David Howellsafae4572018-01-02 10:02:19 +0000202 goto out;
David Howellse8e581a2017-03-16 16:27:44 +0000203 }
204 SetPageUptodate(page);
205 }
206
David Howells31143d52007-05-09 02:33:46 -0700207 set_page_dirty(page);
David Howells31143d52007-05-09 02:33:46 -0700208 if (PageDirty(page))
209 _debug("dirtied");
David Howellsafae4572018-01-02 10:02:19 +0000210 ret = copied;
211
212out:
Nick Piggin15b46502008-10-15 22:04:32 -0700213 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300214 put_page(page);
David Howellsafae4572018-01-02 10:02:19 +0000215 return ret;
David Howells31143d52007-05-09 02:33:46 -0700216}
217
218/*
219 * kill all the pages in the given range
220 */
David Howells4343d002017-11-02 15:27:52 +0000221static void afs_kill_pages(struct address_space *mapping,
David Howells31143d52007-05-09 02:33:46 -0700222 pgoff_t first, pgoff_t last)
223{
David Howells4343d002017-11-02 15:27:52 +0000224 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700225 struct pagevec pv;
226 unsigned count, loop;
227
228 _enter("{%x:%u},%lx-%lx",
229 vnode->fid.vid, vnode->fid.vnode, first, last);
230
Mel Gorman86679822017-11-15 17:37:52 -0800231 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700232
233 do {
234 _debug("kill %lx-%lx", first, last);
235
236 count = last - first + 1;
237 if (count > PAGEVEC_SIZE)
238 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000239 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700240 ASSERTCMP(pv.nr, ==, count);
241
242 for (loop = 0; loop < count; loop++) {
David Howells7286a352017-03-16 16:27:48 +0000243 struct page *page = pv.pages[loop];
244 ClearPageUptodate(page);
David Howells4343d002017-11-02 15:27:52 +0000245 SetPageError(page);
246 end_page_writeback(page);
David Howells7286a352017-03-16 16:27:48 +0000247 if (page->index >= first)
248 first = page->index + 1;
David Howells4343d002017-11-02 15:27:52 +0000249 lock_page(page);
250 generic_error_remove_page(mapping, page);
David Howells31143d52007-05-09 02:33:46 -0700251 }
252
253 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000254 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700255
256 _leave("");
257}
258
259/*
David Howells4343d002017-11-02 15:27:52 +0000260 * Redirty all the pages in a given range.
David Howells31143d52007-05-09 02:33:46 -0700261 */
David Howells4343d002017-11-02 15:27:52 +0000262static void afs_redirty_pages(struct writeback_control *wbc,
263 struct address_space *mapping,
264 pgoff_t first, pgoff_t last)
David Howells31143d52007-05-09 02:33:46 -0700265{
David Howells4343d002017-11-02 15:27:52 +0000266 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
267 struct pagevec pv;
268 unsigned count, loop;
269
270 _enter("{%x:%u},%lx-%lx",
271 vnode->fid.vid, vnode->fid.vnode, first, last);
272
Linus Torvalds487e2c92017-11-16 11:41:22 -0800273 pagevec_init(&pv);
David Howells4343d002017-11-02 15:27:52 +0000274
275 do {
276 _debug("redirty %lx-%lx", first, last);
277
278 count = last - first + 1;
279 if (count > PAGEVEC_SIZE)
280 count = PAGEVEC_SIZE;
281 pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
282 ASSERTCMP(pv.nr, ==, count);
283
284 for (loop = 0; loop < count; loop++) {
285 struct page *page = pv.pages[loop];
286
287 redirty_page_for_writepage(wbc, page);
288 end_page_writeback(page);
David Howells31143d52007-05-09 02:33:46 -0700289 if (page->index >= first)
290 first = page->index + 1;
291 }
292
293 __pagevec_release(&pv);
David Howells4343d002017-11-02 15:27:52 +0000294 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700295
296 _leave("");
297}
298
299/*
David Howellsd2ddc772017-11-02 15:27:50 +0000300 * write to a file
301 */
David Howells4343d002017-11-02 15:27:52 +0000302static int afs_store_data(struct address_space *mapping,
303 pgoff_t first, pgoff_t last,
David Howellsd2ddc772017-11-02 15:27:50 +0000304 unsigned offset, unsigned to)
305{
David Howells4343d002017-11-02 15:27:52 +0000306 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howellsd2ddc772017-11-02 15:27:50 +0000307 struct afs_fs_cursor fc;
David Howells4343d002017-11-02 15:27:52 +0000308 struct afs_wb_key *wbk = NULL;
309 struct list_head *p;
310 int ret = -ENOKEY, ret2;
David Howellsd2ddc772017-11-02 15:27:50 +0000311
David Howells4343d002017-11-02 15:27:52 +0000312 _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
David Howellsd2ddc772017-11-02 15:27:50 +0000313 vnode->volume->name,
314 vnode->fid.vid,
315 vnode->fid.vnode,
316 vnode->fid.unique,
David Howellsd2ddc772017-11-02 15:27:50 +0000317 first, last, offset, to);
318
David Howells4343d002017-11-02 15:27:52 +0000319 spin_lock(&vnode->wb_lock);
320 p = vnode->wb_keys.next;
321
322 /* Iterate through the list looking for a valid key to use. */
323try_next_key:
324 while (p != &vnode->wb_keys) {
325 wbk = list_entry(p, struct afs_wb_key, vnode_link);
326 _debug("wbk %u", key_serial(wbk->key));
327 ret2 = key_validate(wbk->key);
328 if (ret2 == 0)
329 goto found_key;
330 if (ret == -ENOKEY)
331 ret = ret2;
332 p = p->next;
333 }
334
335 spin_unlock(&vnode->wb_lock);
336 afs_put_wb_key(wbk);
337 _leave(" = %d [no keys]", ret);
338 return ret;
339
340found_key:
341 refcount_inc(&wbk->usage);
342 spin_unlock(&vnode->wb_lock);
343
344 _debug("USE WB KEY %u", key_serial(wbk->key));
345
David Howellsd2ddc772017-11-02 15:27:50 +0000346 ret = -ERESTARTSYS;
David Howells4343d002017-11-02 15:27:52 +0000347 if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
David Howellsd2ddc772017-11-02 15:27:50 +0000348 while (afs_select_fileserver(&fc)) {
349 fc.cb_break = vnode->cb_break + vnode->cb_s_break;
David Howells4343d002017-11-02 15:27:52 +0000350 afs_fs_store_data(&fc, mapping, first, last, offset, to);
David Howellsd2ddc772017-11-02 15:27:50 +0000351 }
352
353 afs_check_for_remote_deletion(&fc, fc.vnode);
354 afs_vnode_commit_status(&fc, vnode, fc.cb_break);
355 ret = afs_end_vnode_operation(&fc);
356 }
357
David Howells4343d002017-11-02 15:27:52 +0000358 switch (ret) {
359 case -EACCES:
360 case -EPERM:
361 case -ENOKEY:
362 case -EKEYEXPIRED:
363 case -EKEYREJECTED:
364 case -EKEYREVOKED:
365 _debug("next");
366 spin_lock(&vnode->wb_lock);
367 p = wbk->vnode_link.next;
368 afs_put_wb_key(wbk);
369 goto try_next_key;
370 }
371
372 afs_put_wb_key(wbk);
David Howellsd2ddc772017-11-02 15:27:50 +0000373 _leave(" = %d", ret);
374 return ret;
375}
376
377/*
David Howells4343d002017-11-02 15:27:52 +0000378 * Synchronously write back the locked page and any subsequent non-locked dirty
379 * pages.
David Howells31143d52007-05-09 02:33:46 -0700380 */
David Howells4343d002017-11-02 15:27:52 +0000381static int afs_write_back_from_locked_page(struct address_space *mapping,
382 struct writeback_control *wbc,
383 struct page *primary_page,
384 pgoff_t final_page)
David Howells31143d52007-05-09 02:33:46 -0700385{
David Howells13524ab2017-11-02 15:27:53 +0000386 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
David Howells31143d52007-05-09 02:33:46 -0700387 struct page *pages[8], *page;
David Howells4343d002017-11-02 15:27:52 +0000388 unsigned long count, priv;
389 unsigned n, offset, to, f, t;
David Howells31143d52007-05-09 02:33:46 -0700390 pgoff_t start, first, last;
391 int loop, ret;
392
393 _enter(",%lx", primary_page->index);
394
395 count = 1;
David Howells31143d52007-05-09 02:33:46 -0700396 if (test_set_page_writeback(primary_page))
397 BUG();
398
David Howells4343d002017-11-02 15:27:52 +0000399 /* Find all consecutive lockable dirty pages that have contiguous
400 * written regions, stopping when we find a page that is not
401 * immediately lockable, is not dirty or is missing, or we reach the
402 * end of the range.
403 */
David Howells31143d52007-05-09 02:33:46 -0700404 start = primary_page->index;
David Howells4343d002017-11-02 15:27:52 +0000405 priv = page_private(primary_page);
406 offset = priv & AFS_PRIV_MAX;
407 to = priv >> AFS_PRIV_SHIFT;
David Howells13524ab2017-11-02 15:27:53 +0000408 trace_afs_page_dirty(vnode, tracepoint_string("store"),
409 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000410
411 WARN_ON(offset == to);
David Howells13524ab2017-11-02 15:27:53 +0000412 if (offset == to)
413 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
414 primary_page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000415
416 if (start >= final_page || to < PAGE_SIZE)
David Howells31143d52007-05-09 02:33:46 -0700417 goto no_more;
David Howells4343d002017-11-02 15:27:52 +0000418
David Howells31143d52007-05-09 02:33:46 -0700419 start++;
420 do {
421 _debug("more %lx [%lx]", start, count);
David Howells4343d002017-11-02 15:27:52 +0000422 n = final_page - start + 1;
David Howells31143d52007-05-09 02:33:46 -0700423 if (n > ARRAY_SIZE(pages))
424 n = ARRAY_SIZE(pages);
David Howells4343d002017-11-02 15:27:52 +0000425 n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
David Howells31143d52007-05-09 02:33:46 -0700426 _debug("fgpc %u", n);
427 if (n == 0)
428 goto no_more;
429 if (pages[0]->index != start) {
David Howells9d577b62007-05-10 22:22:19 -0700430 do {
431 put_page(pages[--n]);
432 } while (n > 0);
David Howells31143d52007-05-09 02:33:46 -0700433 goto no_more;
434 }
435
436 for (loop = 0; loop < n; loop++) {
David Howells4343d002017-11-02 15:27:52 +0000437 if (to != PAGE_SIZE)
438 break;
David Howells31143d52007-05-09 02:33:46 -0700439 page = pages[loop];
David Howells4343d002017-11-02 15:27:52 +0000440 if (page->index > final_page)
David Howells31143d52007-05-09 02:33:46 -0700441 break;
Nick Piggin529ae9a2008-08-02 12:01:03 +0200442 if (!trylock_page(page))
David Howells31143d52007-05-09 02:33:46 -0700443 break;
David Howells4343d002017-11-02 15:27:52 +0000444 if (!PageDirty(page) || PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700445 unlock_page(page);
446 break;
447 }
David Howells4343d002017-11-02 15:27:52 +0000448
449 priv = page_private(page);
450 f = priv & AFS_PRIV_MAX;
451 t = priv >> AFS_PRIV_SHIFT;
452 if (f != 0) {
453 unlock_page(page);
454 break;
455 }
456 to = t;
457
David Howells13524ab2017-11-02 15:27:53 +0000458 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
459 page->index, priv);
460
David Howells31143d52007-05-09 02:33:46 -0700461 if (!clear_page_dirty_for_io(page))
462 BUG();
463 if (test_set_page_writeback(page))
464 BUG();
465 unlock_page(page);
466 put_page(page);
467 }
468 count += loop;
469 if (loop < n) {
470 for (; loop < n; loop++)
471 put_page(pages[loop]);
472 goto no_more;
473 }
474
475 start += loop;
David Howells4343d002017-11-02 15:27:52 +0000476 } while (start <= final_page && count < 65536);
David Howells31143d52007-05-09 02:33:46 -0700477
478no_more:
David Howells4343d002017-11-02 15:27:52 +0000479 /* We now have a contiguous set of dirty pages, each with writeback
480 * set; the first page is still locked at this point, but all the rest
481 * have been unlocked.
482 */
483 unlock_page(primary_page);
484
David Howells31143d52007-05-09 02:33:46 -0700485 first = primary_page->index;
486 last = first + count - 1;
487
David Howells31143d52007-05-09 02:33:46 -0700488 _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
489
David Howells4343d002017-11-02 15:27:52 +0000490 ret = afs_store_data(mapping, first, last, offset, to);
491 switch (ret) {
492 case 0:
David Howells31143d52007-05-09 02:33:46 -0700493 ret = count;
David Howells4343d002017-11-02 15:27:52 +0000494 break;
495
496 default:
497 pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
498 /* Fall through */
499 case -EACCES:
500 case -EPERM:
501 case -ENOKEY:
502 case -EKEYEXPIRED:
503 case -EKEYREJECTED:
504 case -EKEYREVOKED:
505 afs_redirty_pages(wbc, mapping, first, last);
506 mapping_set_error(mapping, ret);
507 break;
508
509 case -EDQUOT:
510 case -ENOSPC:
511 afs_redirty_pages(wbc, mapping, first, last);
512 mapping_set_error(mapping, -ENOSPC);
513 break;
514
515 case -EROFS:
516 case -EIO:
517 case -EREMOTEIO:
518 case -EFBIG:
519 case -ENOENT:
520 case -ENOMEDIUM:
521 case -ENXIO:
522 afs_kill_pages(mapping, first, last);
523 mapping_set_error(mapping, ret);
524 break;
David Howells31143d52007-05-09 02:33:46 -0700525 }
526
527 _leave(" = %d", ret);
528 return ret;
529}
530
531/*
532 * write a page back to the server
533 * - the caller locked the page for us
534 */
535int afs_writepage(struct page *page, struct writeback_control *wbc)
536{
David Howells31143d52007-05-09 02:33:46 -0700537 int ret;
538
539 _enter("{%lx},", page->index);
540
David Howells4343d002017-11-02 15:27:52 +0000541 ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
542 wbc->range_end >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700543 if (ret < 0) {
544 _leave(" = %d", ret);
545 return 0;
546 }
547
548 wbc->nr_to_write -= ret;
David Howells31143d52007-05-09 02:33:46 -0700549
550 _leave(" = 0");
551 return 0;
552}
553
554/*
555 * write a region of pages back to the server
556 */
Adrian Bunkc1206a22007-10-16 23:26:41 -0700557static int afs_writepages_region(struct address_space *mapping,
558 struct writeback_control *wbc,
559 pgoff_t index, pgoff_t end, pgoff_t *_next)
David Howells31143d52007-05-09 02:33:46 -0700560{
David Howells31143d52007-05-09 02:33:46 -0700561 struct page *page;
562 int ret, n;
563
564 _enter(",,%lx,%lx,", index, end);
565
566 do {
Jan Karaaef6e412017-11-15 17:35:23 -0800567 n = find_get_pages_range_tag(mapping, &index, end,
568 PAGECACHE_TAG_DIRTY, 1, &page);
David Howells31143d52007-05-09 02:33:46 -0700569 if (!n)
570 break;
571
572 _debug("wback %lx", page->index);
573
David Howells31143d52007-05-09 02:33:46 -0700574 /* at this point we hold neither mapping->tree_lock nor lock on
575 * the page itself: the page may be truncated or invalidated
576 * (changing page->mapping to NULL), or even swizzled back from
577 * swapper_space to tmpfs file mapping
578 */
David Howells4343d002017-11-02 15:27:52 +0000579 ret = lock_page_killable(page);
580 if (ret < 0) {
581 put_page(page);
582 _leave(" = %d", ret);
583 return ret;
584 }
David Howells31143d52007-05-09 02:33:46 -0700585
David Howellsc5051c72017-03-16 16:27:49 +0000586 if (page->mapping != mapping || !PageDirty(page)) {
David Howells31143d52007-05-09 02:33:46 -0700587 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300588 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700589 continue;
590 }
591
David Howellsc5051c72017-03-16 16:27:49 +0000592 if (PageWriteback(page)) {
David Howells31143d52007-05-09 02:33:46 -0700593 unlock_page(page);
David Howellsc5051c72017-03-16 16:27:49 +0000594 if (wbc->sync_mode != WB_SYNC_NONE)
595 wait_on_page_writeback(page);
David Howells29c8bbb2017-03-16 16:27:43 +0000596 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700597 continue;
598 }
599
David Howells65a15102017-03-16 16:27:49 +0000600 if (!clear_page_dirty_for_io(page))
601 BUG();
David Howells4343d002017-11-02 15:27:52 +0000602 ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300603 put_page(page);
David Howells31143d52007-05-09 02:33:46 -0700604 if (ret < 0) {
605 _leave(" = %d", ret);
606 return ret;
607 }
608
609 wbc->nr_to_write -= ret;
610
David Howells31143d52007-05-09 02:33:46 -0700611 cond_resched();
612 } while (index < end && wbc->nr_to_write > 0);
613
614 *_next = index;
615 _leave(" = 0 [%lx]", *_next);
616 return 0;
617}
618
619/*
620 * write some of the pending data back to the server
621 */
622int afs_writepages(struct address_space *mapping,
623 struct writeback_control *wbc)
624{
David Howells31143d52007-05-09 02:33:46 -0700625 pgoff_t start, end, next;
626 int ret;
627
628 _enter("");
629
David Howells31143d52007-05-09 02:33:46 -0700630 if (wbc->range_cyclic) {
631 start = mapping->writeback_index;
632 end = -1;
633 ret = afs_writepages_region(mapping, wbc, start, end, &next);
Wu Fengguang1b430be2010-10-26 14:21:26 -0700634 if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
David Howells31143d52007-05-09 02:33:46 -0700635 ret = afs_writepages_region(mapping, wbc, 0, start,
636 &next);
637 mapping->writeback_index = next;
638 } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300639 end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
David Howells31143d52007-05-09 02:33:46 -0700640 ret = afs_writepages_region(mapping, wbc, 0, end, &next);
641 if (wbc->nr_to_write > 0)
642 mapping->writeback_index = next;
643 } else {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300644 start = wbc->range_start >> PAGE_SHIFT;
645 end = wbc->range_end >> PAGE_SHIFT;
David Howells31143d52007-05-09 02:33:46 -0700646 ret = afs_writepages_region(mapping, wbc, start, end, &next);
647 }
648
649 _leave(" = %d", ret);
650 return ret;
651}
652
653/*
David Howells31143d52007-05-09 02:33:46 -0700654 * completion of write to server
655 */
656void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
657{
David Howells31143d52007-05-09 02:33:46 -0700658 struct pagevec pv;
David Howells13524ab2017-11-02 15:27:53 +0000659 unsigned long priv;
David Howells31143d52007-05-09 02:33:46 -0700660 unsigned count, loop;
661 pgoff_t first = call->first, last = call->last;
David Howells31143d52007-05-09 02:33:46 -0700662
663 _enter("{%x:%u},{%lx-%lx}",
664 vnode->fid.vid, vnode->fid.vnode, first, last);
665
Mel Gorman86679822017-11-15 17:37:52 -0800666 pagevec_init(&pv);
David Howells31143d52007-05-09 02:33:46 -0700667
668 do {
David Howells5bbf5d32007-05-10 03:15:23 -0700669 _debug("done %lx-%lx", first, last);
David Howells31143d52007-05-09 02:33:46 -0700670
671 count = last - first + 1;
672 if (count > PAGEVEC_SIZE)
673 count = PAGEVEC_SIZE;
David Howells4343d002017-11-02 15:27:52 +0000674 pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
675 first, count, pv.pages);
David Howells31143d52007-05-09 02:33:46 -0700676 ASSERTCMP(pv.nr, ==, count);
677
David Howells31143d52007-05-09 02:33:46 -0700678 for (loop = 0; loop < count; loop++) {
David Howells13524ab2017-11-02 15:27:53 +0000679 priv = page_private(pv.pages[loop]);
680 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
681 pv.pages[loop]->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000682 set_page_private(pv.pages[loop], 0);
683 end_page_writeback(pv.pages[loop]);
David Howells31143d52007-05-09 02:33:46 -0700684 }
David Howells31143d52007-05-09 02:33:46 -0700685 first += count;
David Howells31143d52007-05-09 02:33:46 -0700686 __pagevec_release(&pv);
David Howells5bbf5d32007-05-10 03:15:23 -0700687 } while (first <= last);
David Howells31143d52007-05-09 02:33:46 -0700688
David Howells4343d002017-11-02 15:27:52 +0000689 afs_prune_wb_keys(vnode);
David Howells31143d52007-05-09 02:33:46 -0700690 _leave("");
691}
692
693/*
694 * write to an AFS file
695 */
Al Viro50b55512014-04-03 14:13:46 -0400696ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
David Howells31143d52007-05-09 02:33:46 -0700697{
Al Viro496ad9a2013-01-23 17:07:38 -0500698 struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
David Howells31143d52007-05-09 02:33:46 -0700699 ssize_t result;
Al Viro50b55512014-04-03 14:13:46 -0400700 size_t count = iov_iter_count(from);
David Howells31143d52007-05-09 02:33:46 -0700701
Al Viro50b55512014-04-03 14:13:46 -0400702 _enter("{%x.%u},{%zu},",
703 vnode->fid.vid, vnode->fid.vnode, count);
David Howells31143d52007-05-09 02:33:46 -0700704
705 if (IS_SWAPFILE(&vnode->vfs_inode)) {
706 printk(KERN_INFO
707 "AFS: Attempt to write to active swap file!\n");
708 return -EBUSY;
709 }
710
711 if (!count)
712 return 0;
713
Al Viro50b55512014-04-03 14:13:46 -0400714 result = generic_file_write_iter(iocb, from);
David Howells31143d52007-05-09 02:33:46 -0700715
David Howells31143d52007-05-09 02:33:46 -0700716 _leave(" = %zd", result);
717 return result;
718}
719
720/*
David Howells31143d52007-05-09 02:33:46 -0700721 * flush any dirty pages for this process, and check for write errors.
722 * - the return status from this call provides a reliable indication of
723 * whether any write errors occurred for this process.
724 */
Josef Bacik02c24a82011-07-16 20:44:56 -0400725int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
David Howells31143d52007-05-09 02:33:46 -0700726{
Al Viro3c981bf2013-09-03 13:37:45 -0400727 struct inode *inode = file_inode(file);
Al Viro3c981bf2013-09-03 13:37:45 -0400728 struct afs_vnode *vnode = AFS_FS_I(inode);
David Howells31143d52007-05-09 02:33:46 -0700729
Al Viro3c981bf2013-09-03 13:37:45 -0400730 _enter("{%x:%u},{n=%pD},%d",
731 vnode->fid.vid, vnode->fid.vnode, file,
David Howells31143d52007-05-09 02:33:46 -0700732 datasync);
733
David Howells4343d002017-11-02 15:27:52 +0000734 return file_write_and_wait_range(file, start, end);
David Howells31143d52007-05-09 02:33:46 -0700735}
David Howells9b3f26c2009-04-03 16:42:41 +0100736
737/*
David Howells58fed942017-03-16 16:27:45 +0000738 * Flush out all outstanding writes on a file opened for writing when it is
739 * closed.
740 */
741int afs_flush(struct file *file, fl_owner_t id)
742{
743 _enter("");
744
745 if ((file->f_mode & FMODE_WRITE) == 0)
746 return 0;
747
748 return vfs_fsync(file, 0);
749}
750
751/*
David Howells9b3f26c2009-04-03 16:42:41 +0100752 * notification that a previously read-only page is about to become writable
753 * - if it returns an error, the caller will deliver a bus error signal
754 */
David Howells1cf7a152017-11-02 15:27:52 +0000755int afs_page_mkwrite(struct vm_fault *vmf)
David Howells9b3f26c2009-04-03 16:42:41 +0100756{
David Howells1cf7a152017-11-02 15:27:52 +0000757 struct file *file = vmf->vma->vm_file;
758 struct inode *inode = file_inode(file);
759 struct afs_vnode *vnode = AFS_FS_I(inode);
760 unsigned long priv;
David Howells9b3f26c2009-04-03 16:42:41 +0100761
762 _enter("{{%x:%u}},{%lx}",
David Howells1cf7a152017-11-02 15:27:52 +0000763 vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
David Howells9b3f26c2009-04-03 16:42:41 +0100764
David Howells1cf7a152017-11-02 15:27:52 +0000765 sb_start_pagefault(inode->i_sb);
766
767 /* Wait for the page to be written to the cache before we allow it to
768 * be modified. We then assume the entire page will need writing back.
769 */
David Howells9b3f26c2009-04-03 16:42:41 +0100770#ifdef CONFIG_AFS_FSCACHE
David Howells1cf7a152017-11-02 15:27:52 +0000771 fscache_wait_on_page_write(vnode->cache, vmf->page);
David Howells9b3f26c2009-04-03 16:42:41 +0100772#endif
773
David Howells1cf7a152017-11-02 15:27:52 +0000774 if (PageWriteback(vmf->page) &&
775 wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
776 return VM_FAULT_RETRY;
777
778 if (lock_page_killable(vmf->page) < 0)
779 return VM_FAULT_RETRY;
780
781 /* We mustn't change page->private until writeback is complete as that
782 * details the portion of the page we need to write back and we might
783 * need to redirty the page if there's a problem.
784 */
785 wait_on_page_writeback(vmf->page);
786
787 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
788 priv |= 0; /* From */
David Howells13524ab2017-11-02 15:27:53 +0000789 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
790 vmf->page->index, priv);
David Howells1cf7a152017-11-02 15:27:52 +0000791 SetPagePrivate(vmf->page);
792 set_page_private(vmf->page, priv);
793
794 sb_end_pagefault(inode->i_sb);
795 return VM_FAULT_LOCKED;
David Howells9b3f26c2009-04-03 16:42:41 +0100796}
David Howells4343d002017-11-02 15:27:52 +0000797
798/*
799 * Prune the keys cached for writeback. The caller must hold vnode->wb_lock.
800 */
801void afs_prune_wb_keys(struct afs_vnode *vnode)
802{
803 LIST_HEAD(graveyard);
804 struct afs_wb_key *wbk, *tmp;
805
806 /* Discard unused keys */
807 spin_lock(&vnode->wb_lock);
808
809 if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
810 !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
811 list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
812 if (refcount_read(&wbk->usage) == 1)
813 list_move(&wbk->vnode_link, &graveyard);
814 }
815 }
816
817 spin_unlock(&vnode->wb_lock);
818
819 while (!list_empty(&graveyard)) {
820 wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
821 list_del(&wbk->vnode_link);
822 afs_put_wb_key(wbk);
823 }
824}
825
826/*
827 * Clean up a page during invalidation.
828 */
829int afs_launder_page(struct page *page)
830{
831 struct address_space *mapping = page->mapping;
832 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
833 unsigned long priv;
834 unsigned int f, t;
835 int ret = 0;
836
837 _enter("{%lx}", page->index);
838
839 priv = page_private(page);
840 if (clear_page_dirty_for_io(page)) {
841 f = 0;
842 t = PAGE_SIZE;
843 if (PagePrivate(page)) {
844 f = priv & AFS_PRIV_MAX;
845 t = priv >> AFS_PRIV_SHIFT;
846 }
847
David Howells13524ab2017-11-02 15:27:53 +0000848 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
849 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000850 ret = afs_store_data(mapping, page->index, page->index, t, f);
851 }
852
David Howells13524ab2017-11-02 15:27:53 +0000853 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
854 page->index, priv);
David Howells4343d002017-11-02 15:27:52 +0000855 set_page_private(page, 0);
856 ClearPagePrivate(page);
857
858#ifdef CONFIG_AFS_FSCACHE
859 if (PageFsCache(page)) {
860 fscache_wait_on_page_write(vnode->cache, page);
861 fscache_uncache_page(vnode->cache, page);
862 }
863#endif
864 return ret;
David Howells31143d52007-05-09 02:33:46 -0700865}