e40e949598fd29e7eb2fa6dcae0bf2e8fa8d9a5f
[linux-2.6.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 #include <linux/migrate.h>
17
18 #include <linux/sunrpc/clnt.h>
19 #include <linux/nfs_fs.h>
20 #include <linux/nfs_mount.h>
21 #include <linux/nfs_page.h>
22 #include <linux/backing-dev.h>
23
24 #include <asm/uaccess.h>
25
26 #include "delegation.h"
27 #include "internal.h"
28 #include "iostat.h"
29 #include "nfs4_fs.h"
30 #include "fscache.h"
31
32 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
33
34 #define MIN_POOL_WRITE          (32)
35 #define MIN_POOL_COMMIT         (4)
36
37 /*
38  * Local function declarations
39  */
40 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
41                                   struct inode *inode, int ioflags);
42 static void nfs_redirty_request(struct nfs_page *req);
43 static const struct rpc_call_ops nfs_write_partial_ops;
44 static const struct rpc_call_ops nfs_write_full_ops;
45 static const struct rpc_call_ops nfs_commit_ops;
46
47 static struct kmem_cache *nfs_wdata_cachep;
48 static mempool_t *nfs_wdata_mempool;
49 static mempool_t *nfs_commit_mempool;
50
51 struct nfs_write_data *nfs_commitdata_alloc(void)
52 {
53         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
54
55         if (p) {
56                 memset(p, 0, sizeof(*p));
57                 INIT_LIST_HEAD(&p->pages);
58                 p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
59         }
60         return p;
61 }
62
63 void nfs_commit_free(struct nfs_write_data *p)
64 {
65         if (p && (p->pagevec != &p->page_array[0]))
66                 kfree(p->pagevec);
67         mempool_free(p, nfs_commit_mempool);
68 }
69
70 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
71 {
72         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
73
74         if (p) {
75                 memset(p, 0, sizeof(*p));
76                 INIT_LIST_HEAD(&p->pages);
77                 p->npages = pagecount;
78                 p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
79                 if (pagecount <= ARRAY_SIZE(p->page_array))
80                         p->pagevec = p->page_array;
81                 else {
82                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
83                         if (!p->pagevec) {
84                                 mempool_free(p, nfs_wdata_mempool);
85                                 p = NULL;
86                         }
87                 }
88         }
89         return p;
90 }
91
92 void nfs_writedata_free(struct nfs_write_data *p)
93 {
94         if (p && (p->pagevec != &p->page_array[0]))
95                 kfree(p->pagevec);
96         mempool_free(p, nfs_wdata_mempool);
97 }
98
99 static void nfs_writedata_release(struct nfs_write_data *wdata)
100 {
101         put_nfs_open_context(wdata->args.context);
102         nfs_writedata_free(wdata);
103 }
104
105 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
106 {
107         ctx->error = error;
108         smp_wmb();
109         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
110 }
111
112 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
113 {
114         struct nfs_page *req = NULL;
115
116         if (PagePrivate(page)) {
117                 req = (struct nfs_page *)page_private(page);
118                 if (req != NULL)
119                         kref_get(&req->wb_kref);
120         }
121         return req;
122 }
123
124 static struct nfs_page *nfs_page_find_request(struct page *page)
125 {
126         struct inode *inode = page->mapping->host;
127         struct nfs_page *req = NULL;
128
129         spin_lock(&inode->i_lock);
130         req = nfs_page_find_request_locked(page);
131         spin_unlock(&inode->i_lock);
132         return req;
133 }
134
135 /* Adjust the file length if we're writing beyond the end */
136 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
137 {
138         struct inode *inode = page->mapping->host;
139         loff_t end, i_size;
140         pgoff_t end_index;
141
142         spin_lock(&inode->i_lock);
143         i_size = i_size_read(inode);
144         end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
145         if (i_size > 0 && page->index < end_index)
146                 goto out;
147         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
148         if (i_size >= end)
149                 goto out;
150         i_size_write(inode, end);
151         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
152 out:
153         spin_unlock(&inode->i_lock);
154 }
155
156 /* A writeback failed: mark the page as bad, and invalidate the page cache */
157 static void nfs_set_pageerror(struct page *page)
158 {
159         SetPageError(page);
160         nfs_zap_mapping(page->mapping->host, page->mapping);
161 }
162
163 /* We can set the PG_uptodate flag if we see that a write request
164  * covers the full page.
165  */
166 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
167 {
168         if (PageUptodate(page))
169                 return;
170         if (base != 0)
171                 return;
172         if (count != nfs_page_length(page))
173                 return;
174         SetPageUptodate(page);
175 }
176
177 static int wb_priority(struct writeback_control *wbc)
178 {
179         if (wbc->for_reclaim)
180                 return FLUSH_HIGHPRI | FLUSH_STABLE;
181         if (wbc->for_kupdate || wbc->for_background)
182                 return FLUSH_LOWPRI;
183         return 0;
184 }
185
186 /*
187  * NFS congestion control
188  */
189
190 int nfs_congestion_kb;
191
192 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
193 #define NFS_CONGESTION_OFF_THRESH       \
194         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
195
196 static int nfs_set_page_writeback(struct page *page)
197 {
198         int ret = test_set_page_writeback(page);
199
200         if (!ret) {
201                 struct inode *inode = page->mapping->host;
202                 struct nfs_server *nfss = NFS_SERVER(inode);
203
204                 if (atomic_long_inc_return(&nfss->writeback) >
205                                 NFS_CONGESTION_ON_THRESH) {
206                         set_bdi_congested(&nfss->backing_dev_info,
207                                                 BLK_RW_ASYNC);
208                 }
209         }
210         return ret;
211 }
212
213 static void nfs_end_page_writeback(struct page *page)
214 {
215         struct inode *inode = page->mapping->host;
216         struct nfs_server *nfss = NFS_SERVER(inode);
217
218         end_page_writeback(page);
219         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
220                 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
221 }
222
223 static struct nfs_page *nfs_find_and_lock_request(struct page *page)
224 {
225         struct inode *inode = page->mapping->host;
226         struct nfs_page *req;
227         int ret;
228
229         spin_lock(&inode->i_lock);
230         for (;;) {
231                 req = nfs_page_find_request_locked(page);
232                 if (req == NULL)
233                         break;
234                 if (nfs_set_page_tag_locked(req))
235                         break;
236                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
237                  *       then the call to nfs_set_page_tag_locked() will always
238                  *       succeed provided that someone hasn't already marked the
239                  *       request as dirty (in which case we don't care).
240                  */
241                 spin_unlock(&inode->i_lock);
242                 ret = nfs_wait_on_request(req);
243                 nfs_release_request(req);
244                 if (ret != 0)
245                         return ERR_PTR(ret);
246                 spin_lock(&inode->i_lock);
247         }
248         spin_unlock(&inode->i_lock);
249         return req;
250 }
251
252 /*
253  * Find an associated nfs write request, and prepare to flush it out
254  * May return an error if the user signalled nfs_wait_on_request().
255  */
256 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
257                                 struct page *page)
258 {
259         struct nfs_page *req;
260         int ret = 0;
261
262         req = nfs_find_and_lock_request(page);
263         if (!req)
264                 goto out;
265         ret = PTR_ERR(req);
266         if (IS_ERR(req))
267                 goto out;
268
269         ret = nfs_set_page_writeback(page);
270         BUG_ON(ret != 0);
271         BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
272
273         if (!nfs_pageio_add_request(pgio, req)) {
274                 nfs_redirty_request(req);
275                 ret = pgio->pg_error;
276         }
277 out:
278         return ret;
279 }
280
281 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
282 {
283         struct inode *inode = page->mapping->host;
284
285         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
286         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
287
288         nfs_pageio_cond_complete(pgio, page->index);
289         return nfs_page_async_flush(pgio, page);
290 }
291
292 /*
293  * Write an mmapped page to the server.
294  */
295 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
296 {
297         struct nfs_pageio_descriptor pgio;
298         int err;
299
300         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
301         err = nfs_do_writepage(page, wbc, &pgio);
302         nfs_pageio_complete(&pgio);
303         if (err < 0)
304                 return err;
305         if (pgio.pg_error < 0)
306                 return pgio.pg_error;
307         return 0;
308 }
309
310 int nfs_writepage(struct page *page, struct writeback_control *wbc)
311 {
312         int ret;
313
314         ret = nfs_writepage_locked(page, wbc);
315         unlock_page(page);
316         return ret;
317 }
318
319 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
320 {
321         int ret;
322
323         ret = nfs_do_writepage(page, wbc, data);
324         unlock_page(page);
325         return ret;
326 }
327
328 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
329 {
330         struct inode *inode = mapping->host;
331         unsigned long *bitlock = &NFS_I(inode)->flags;
332         struct nfs_pageio_descriptor pgio;
333         int err;
334
335         /* Stop dirtying of new pages while we sync */
336         err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
337                         nfs_wait_bit_killable, TASK_KILLABLE);
338         if (err)
339                 goto out_err;
340
341         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
342
343         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
344         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
345         nfs_pageio_complete(&pgio);
346
347         clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
348         smp_mb__after_clear_bit();
349         wake_up_bit(bitlock, NFS_INO_FLUSHING);
350
351         if (err < 0)
352                 goto out_err;
353         err = pgio.pg_error;
354         if (err < 0)
355                 goto out_err;
356         return 0;
357 out_err:
358         return err;
359 }
360
361 /*
362  * Insert a write request into an inode
363  */
364 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
365 {
366         struct nfs_inode *nfsi = NFS_I(inode);
367         int error;
368
369         error = radix_tree_preload(GFP_NOFS);
370         if (error != 0)
371                 goto out;
372
373         /* Lock the request! */
374         nfs_lock_request_dontget(req);
375
376         spin_lock(&inode->i_lock);
377         error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
378         BUG_ON(error);
379         if (!nfsi->npages) {
380                 igrab(inode);
381                 if (nfs_have_delegation(inode, FMODE_WRITE))
382                         nfsi->change_attr++;
383         }
384         SetPagePrivate(req->wb_page);
385         set_page_private(req->wb_page, (unsigned long)req);
386         nfsi->npages++;
387         kref_get(&req->wb_kref);
388         radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
389                                 NFS_PAGE_TAG_LOCKED);
390         spin_unlock(&inode->i_lock);
391         radix_tree_preload_end();
392 out:
393         return error;
394 }
395
396 /*
397  * Remove a write request from an inode
398  */
399 static void nfs_inode_remove_request(struct nfs_page *req)
400 {
401         struct inode *inode = req->wb_context->path.dentry->d_inode;
402         struct nfs_inode *nfsi = NFS_I(inode);
403
404         BUG_ON (!NFS_WBACK_BUSY(req));
405
406         spin_lock(&inode->i_lock);
407         set_page_private(req->wb_page, 0);
408         ClearPagePrivate(req->wb_page);
409         radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
410         nfsi->npages--;
411         if (!nfsi->npages) {
412                 spin_unlock(&inode->i_lock);
413                 iput(inode);
414         } else
415                 spin_unlock(&inode->i_lock);
416         nfs_clear_request(req);
417         nfs_release_request(req);
418 }
419
420 static void
421 nfs_mark_request_dirty(struct nfs_page *req)
422 {
423         __set_page_dirty_nobuffers(req->wb_page);
424 }
425
426 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
427 /*
428  * Add a request to the inode's commit list.
429  */
430 static void
431 nfs_mark_request_commit(struct nfs_page *req)
432 {
433         struct inode *inode = req->wb_context->path.dentry->d_inode;
434         struct nfs_inode *nfsi = NFS_I(inode);
435
436         spin_lock(&inode->i_lock);
437         set_bit(PG_CLEAN, &(req)->wb_flags);
438         radix_tree_tag_set(&nfsi->nfs_page_tree,
439                         req->wb_index,
440                         NFS_PAGE_TAG_COMMIT);
441         nfsi->ncommit++;
442         spin_unlock(&inode->i_lock);
443         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
444         inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
445         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
446 }
447
448 static int
449 nfs_clear_request_commit(struct nfs_page *req)
450 {
451         struct page *page = req->wb_page;
452
453         if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
454                 dec_zone_page_state(page, NR_UNSTABLE_NFS);
455                 dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
456                 return 1;
457         }
458         return 0;
459 }
460
461 static inline
462 int nfs_write_need_commit(struct nfs_write_data *data)
463 {
464         return data->verf.committed != NFS_FILE_SYNC;
465 }
466
467 static inline
468 int nfs_reschedule_unstable_write(struct nfs_page *req)
469 {
470         if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
471                 nfs_mark_request_commit(req);
472                 return 1;
473         }
474         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
475                 nfs_mark_request_dirty(req);
476                 return 1;
477         }
478         return 0;
479 }
480 #else
481 static inline void
482 nfs_mark_request_commit(struct nfs_page *req)
483 {
484 }
485
486 static inline int
487 nfs_clear_request_commit(struct nfs_page *req)
488 {
489         return 0;
490 }
491
492 static inline
493 int nfs_write_need_commit(struct nfs_write_data *data)
494 {
495         return 0;
496 }
497
498 static inline
499 int nfs_reschedule_unstable_write(struct nfs_page *req)
500 {
501         return 0;
502 }
503 #endif
504
505 /*
506  * Wait for a request to complete.
507  *
508  * Interruptible by fatal signals only.
509  */
510 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
511 {
512         struct nfs_inode *nfsi = NFS_I(inode);
513         struct nfs_page *req;
514         pgoff_t idx_end, next;
515         unsigned int            res = 0;
516         int                     error;
517
518         if (npages == 0)
519                 idx_end = ~0;
520         else
521                 idx_end = idx_start + npages - 1;
522
523         next = idx_start;
524         while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
525                 if (req->wb_index > idx_end)
526                         break;
527
528                 next = req->wb_index + 1;
529                 BUG_ON(!NFS_WBACK_BUSY(req));
530
531                 kref_get(&req->wb_kref);
532                 spin_unlock(&inode->i_lock);
533                 error = nfs_wait_on_request(req);
534                 nfs_release_request(req);
535                 spin_lock(&inode->i_lock);
536                 if (error < 0)
537                         return error;
538                 res++;
539         }
540         return res;
541 }
542
543 static void nfs_cancel_commit_list(struct list_head *head)
544 {
545         struct nfs_page *req;
546
547         while(!list_empty(head)) {
548                 req = nfs_list_entry(head->next);
549                 nfs_list_remove_request(req);
550                 nfs_clear_request_commit(req);
551                 nfs_inode_remove_request(req);
552                 nfs_unlock_request(req);
553         }
554 }
555
556 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
557 static int
558 nfs_need_commit(struct nfs_inode *nfsi)
559 {
560         return radix_tree_tagged(&nfsi->nfs_page_tree, NFS_PAGE_TAG_COMMIT);
561 }
562
563 /*
564  * nfs_scan_commit - Scan an inode for commit requests
565  * @inode: NFS inode to scan
566  * @dst: destination list
567  * @idx_start: lower bound of page->index to scan.
568  * @npages: idx_start + npages sets the upper bound to scan.
569  *
570  * Moves requests from the inode's 'commit' request list.
571  * The requests are *not* checked to ensure that they form a contiguous set.
572  */
573 static int
574 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
575 {
576         struct nfs_inode *nfsi = NFS_I(inode);
577         int ret;
578
579         if (!nfs_need_commit(nfsi))
580                 return 0;
581
582         ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
583         if (ret > 0)
584                 nfsi->ncommit -= ret;
585         if (nfs_need_commit(NFS_I(inode)))
586                 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
587         return ret;
588 }
589 #else
590 static inline int nfs_need_commit(struct nfs_inode *nfsi)
591 {
592         return 0;
593 }
594
595 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
596 {
597         return 0;
598 }
599 #endif
600
601 /*
602  * Search for an existing write request, and attempt to update
603  * it to reflect a new dirty region on a given page.
604  *
605  * If the attempt fails, then the existing request is flushed out
606  * to disk.
607  */
608 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
609                 struct page *page,
610                 unsigned int offset,
611                 unsigned int bytes)
612 {
613         struct nfs_page *req;
614         unsigned int rqend;
615         unsigned int end;
616         int error;
617
618         if (!PagePrivate(page))
619                 return NULL;
620
621         end = offset + bytes;
622         spin_lock(&inode->i_lock);
623
624         for (;;) {
625                 req = nfs_page_find_request_locked(page);
626                 if (req == NULL)
627                         goto out_unlock;
628
629                 rqend = req->wb_offset + req->wb_bytes;
630                 /*
631                  * Tell the caller to flush out the request if
632                  * the offsets are non-contiguous.
633                  * Note: nfs_flush_incompatible() will already
634                  * have flushed out requests having wrong owners.
635                  */
636                 if (offset > rqend
637                     || end < req->wb_offset)
638                         goto out_flushme;
639
640                 if (nfs_set_page_tag_locked(req))
641                         break;
642
643                 /* The request is locked, so wait and then retry */
644                 spin_unlock(&inode->i_lock);
645                 error = nfs_wait_on_request(req);
646                 nfs_release_request(req);
647                 if (error != 0)
648                         goto out_err;
649                 spin_lock(&inode->i_lock);
650         }
651
652         if (nfs_clear_request_commit(req) &&
653                         radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
654                                 req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
655                 NFS_I(inode)->ncommit--;
656
657         /* Okay, the request matches. Update the region */
658         if (offset < req->wb_offset) {
659                 req->wb_offset = offset;
660                 req->wb_pgbase = offset;
661         }
662         if (end > rqend)
663                 req->wb_bytes = end - req->wb_offset;
664         else
665                 req->wb_bytes = rqend - req->wb_offset;
666 out_unlock:
667         spin_unlock(&inode->i_lock);
668         return req;
669 out_flushme:
670         spin_unlock(&inode->i_lock);
671         nfs_release_request(req);
672         error = nfs_wb_page(inode, page);
673 out_err:
674         return ERR_PTR(error);
675 }
676
677 /*
678  * Try to update an existing write request, or create one if there is none.
679  *
680  * Note: Should always be called with the Page Lock held to prevent races
681  * if we have to add a new request. Also assumes that the caller has
682  * already called nfs_flush_incompatible() if necessary.
683  */
684 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
685                 struct page *page, unsigned int offset, unsigned int bytes)
686 {
687         struct inode *inode = page->mapping->host;
688         struct nfs_page *req;
689         int error;
690
691         req = nfs_try_to_update_request(inode, page, offset, bytes);
692         if (req != NULL)
693                 goto out;
694         req = nfs_create_request(ctx, inode, page, offset, bytes);
695         if (IS_ERR(req))
696                 goto out;
697         error = nfs_inode_add_request(inode, req);
698         if (error != 0) {
699                 nfs_release_request(req);
700                 req = ERR_PTR(error);
701         }
702 out:
703         return req;
704 }
705
706 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
707                 unsigned int offset, unsigned int count)
708 {
709         struct nfs_page *req;
710
711         req = nfs_setup_write_request(ctx, page, offset, count);
712         if (IS_ERR(req))
713                 return PTR_ERR(req);
714         /* Update file length */
715         nfs_grow_file(page, offset, count);
716         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
717         nfs_clear_page_tag_locked(req);
718         return 0;
719 }
720
721 int nfs_flush_incompatible(struct file *file, struct page *page)
722 {
723         struct nfs_open_context *ctx = nfs_file_open_context(file);
724         struct nfs_page *req;
725         int do_flush, status;
726         /*
727          * Look for a request corresponding to this page. If there
728          * is one, and it belongs to another file, we flush it out
729          * before we try to copy anything into the page. Do this
730          * due to the lack of an ACCESS-type call in NFSv2.
731          * Also do the same if we find a request from an existing
732          * dropped page.
733          */
734         do {
735                 req = nfs_page_find_request(page);
736                 if (req == NULL)
737                         return 0;
738                 do_flush = req->wb_page != page || req->wb_context != ctx;
739                 nfs_release_request(req);
740                 if (!do_flush)
741                         return 0;
742                 status = nfs_wb_page(page->mapping->host, page);
743         } while (status == 0);
744         return status;
745 }
746
747 /*
748  * If the page cache is marked as unsafe or invalid, then we can't rely on
749  * the PageUptodate() flag. In this case, we will need to turn off
750  * write optimisations that depend on the page contents being correct.
751  */
752 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
753 {
754         return PageUptodate(page) &&
755                 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
756 }
757
758 /*
759  * Update and possibly write a cached page of an NFS file.
760  *
761  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
762  * things with a page scheduled for an RPC call (e.g. invalidate it).
763  */
764 int nfs_updatepage(struct file *file, struct page *page,
765                 unsigned int offset, unsigned int count)
766 {
767         struct nfs_open_context *ctx = nfs_file_open_context(file);
768         struct inode    *inode = page->mapping->host;
769         int             status = 0;
770
771         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
772
773         dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
774                 file->f_path.dentry->d_parent->d_name.name,
775                 file->f_path.dentry->d_name.name, count,
776                 (long long)(page_offset(page) + offset));
777
778         /* If we're not using byte range locks, and we know the page
779          * is up to date, it may be more efficient to extend the write
780          * to cover the entire page in order to avoid fragmentation
781          * inefficiencies.
782          */
783         if (nfs_write_pageuptodate(page, inode) &&
784                         inode->i_flock == NULL &&
785                         !(file->f_flags & O_DSYNC)) {
786                 count = max(count + offset, nfs_page_length(page));
787                 offset = 0;
788         }
789
790         status = nfs_writepage_setup(ctx, page, offset, count);
791         if (status < 0)
792                 nfs_set_pageerror(page);
793         else
794                 __set_page_dirty_nobuffers(page);
795
796         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
797                         status, (long long)i_size_read(inode));
798         return status;
799 }
800
801 static void nfs_writepage_release(struct nfs_page *req)
802 {
803
804         if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
805                 nfs_end_page_writeback(req->wb_page);
806                 nfs_inode_remove_request(req);
807         } else
808                 nfs_end_page_writeback(req->wb_page);
809         nfs_clear_page_tag_locked(req);
810 }
811
812 static int flush_task_priority(int how)
813 {
814         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
815                 case FLUSH_HIGHPRI:
816                         return RPC_PRIORITY_HIGH;
817                 case FLUSH_LOWPRI:
818                         return RPC_PRIORITY_LOW;
819         }
820         return RPC_PRIORITY_NORMAL;
821 }
822
823 /*
824  * Set up the argument/result storage required for the RPC call.
825  */
826 static int nfs_write_rpcsetup(struct nfs_page *req,
827                 struct nfs_write_data *data,
828                 const struct rpc_call_ops *call_ops,
829                 unsigned int count, unsigned int offset,
830                 int how)
831 {
832         struct inode *inode = req->wb_context->path.dentry->d_inode;
833         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
834         int priority = flush_task_priority(how);
835         struct rpc_task *task;
836         struct rpc_message msg = {
837                 .rpc_argp = &data->args,
838                 .rpc_resp = &data->res,
839                 .rpc_cred = req->wb_context->cred,
840         };
841         struct rpc_task_setup task_setup_data = {
842                 .rpc_client = NFS_CLIENT(inode),
843                 .task = &data->task,
844                 .rpc_message = &msg,
845                 .callback_ops = call_ops,
846                 .callback_data = data,
847                 .workqueue = nfsiod_workqueue,
848                 .flags = flags,
849                 .priority = priority,
850         };
851
852         /* Set up the RPC argument and reply structs
853          * NB: take care not to mess about with data->commit et al. */
854
855         data->req = req;
856         data->inode = inode = req->wb_context->path.dentry->d_inode;
857         data->cred = msg.rpc_cred;
858
859         data->args.fh     = NFS_FH(inode);
860         data->args.offset = req_offset(req) + offset;
861         data->args.pgbase = req->wb_pgbase + offset;
862         data->args.pages  = data->pagevec;
863         data->args.count  = count;
864         data->args.context = get_nfs_open_context(req->wb_context);
865         data->args.stable  = NFS_UNSTABLE;
866         if (how & FLUSH_STABLE) {
867                 data->args.stable = NFS_DATA_SYNC;
868                 if (!nfs_need_commit(NFS_I(inode)))
869                         data->args.stable = NFS_FILE_SYNC;
870         }
871
872         data->res.fattr   = &data->fattr;
873         data->res.count   = count;
874         data->res.verf    = &data->verf;
875         nfs_fattr_init(&data->fattr);
876
877         /* Set up the initial task struct.  */
878         NFS_PROTO(inode)->write_setup(data, &msg);
879
880         dprintk("NFS: %5u initiated write call "
881                 "(req %s/%lld, %u bytes @ offset %llu)\n",
882                 data->task.tk_pid,
883                 inode->i_sb->s_id,
884                 (long long)NFS_FILEID(inode),
885                 count,
886                 (unsigned long long)data->args.offset);
887
888         task = rpc_run_task(&task_setup_data);
889         if (IS_ERR(task))
890                 return PTR_ERR(task);
891         rpc_put_task(task);
892         return 0;
893 }
894
895 /* If a nfs_flush_* function fails, it should remove reqs from @head and
896  * call this on each, which will prepare them to be retried on next
897  * writeback using standard nfs.
898  */
899 static void nfs_redirty_request(struct nfs_page *req)
900 {
901         nfs_mark_request_dirty(req);
902         nfs_end_page_writeback(req->wb_page);
903         nfs_clear_page_tag_locked(req);
904 }
905
906 /*
907  * Generate multiple small requests to write out a single
908  * contiguous dirty area on one page.
909  */
910 static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
911 {
912         struct nfs_page *req = nfs_list_entry(head->next);
913         struct page *page = req->wb_page;
914         struct nfs_write_data *data;
915         size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
916         unsigned int offset;
917         int requests = 0;
918         int ret = 0;
919         LIST_HEAD(list);
920
921         nfs_list_remove_request(req);
922
923         nbytes = count;
924         do {
925                 size_t len = min(nbytes, wsize);
926
927                 data = nfs_writedata_alloc(1);
928                 if (!data)
929                         goto out_bad;
930                 list_add(&data->pages, &list);
931                 requests++;
932                 nbytes -= len;
933         } while (nbytes != 0);
934         atomic_set(&req->wb_complete, requests);
935
936         ClearPageError(page);
937         offset = 0;
938         nbytes = count;
939         do {
940                 int ret2;
941
942                 data = list_entry(list.next, struct nfs_write_data, pages);
943                 list_del_init(&data->pages);
944
945                 data->pagevec[0] = page;
946
947                 if (nbytes < wsize)
948                         wsize = nbytes;
949                 ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
950                                    wsize, offset, how);
951                 if (ret == 0)
952                         ret = ret2;
953                 offset += wsize;
954                 nbytes -= wsize;
955         } while (nbytes != 0);
956
957         return ret;
958
959 out_bad:
960         while (!list_empty(&list)) {
961                 data = list_entry(list.next, struct nfs_write_data, pages);
962                 list_del(&data->pages);
963                 nfs_writedata_release(data);
964         }
965         nfs_redirty_request(req);
966         return -ENOMEM;
967 }
968
969 /*
970  * Create an RPC task for the given write request and kick it.
971  * The page must have been locked by the caller.
972  *
973  * It may happen that the page we're passed is not marked dirty.
974  * This is the case if nfs_updatepage detects a conflicting request
975  * that has been written but not committed.
976  */
977 static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
978 {
979         struct nfs_page         *req;
980         struct page             **pages;
981         struct nfs_write_data   *data;
982
983         data = nfs_writedata_alloc(npages);
984         if (!data)
985                 goto out_bad;
986
987         pages = data->pagevec;
988         while (!list_empty(head)) {
989                 req = nfs_list_entry(head->next);
990                 nfs_list_remove_request(req);
991                 nfs_list_add_request(req, &data->pages);
992                 ClearPageError(req->wb_page);
993                 *pages++ = req->wb_page;
994         }
995         req = nfs_list_entry(data->pages.next);
996
997         /* Set up the argument struct */
998         return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
999  out_bad:
1000         while (!list_empty(head)) {
1001                 req = nfs_list_entry(head->next);
1002                 nfs_list_remove_request(req);
1003                 nfs_redirty_request(req);
1004         }
1005         return -ENOMEM;
1006 }
1007
1008 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1009                                   struct inode *inode, int ioflags)
1010 {
1011         size_t wsize = NFS_SERVER(inode)->wsize;
1012
1013         if (wsize < PAGE_CACHE_SIZE)
1014                 nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
1015         else
1016                 nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
1017 }
1018
1019 /*
1020  * Handle a write reply that flushed part of a page.
1021  */
1022 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1023 {
1024         struct nfs_write_data   *data = calldata;
1025
1026         dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1027                 task->tk_pid,
1028                 data->req->wb_context->path.dentry->d_inode->i_sb->s_id,
1029                 (long long)
1030                   NFS_FILEID(data->req->wb_context->path.dentry->d_inode),
1031                 data->req->wb_bytes, (long long)req_offset(data->req));
1032
1033         nfs_writeback_done(task, data);
1034 }
1035
1036 static void nfs_writeback_release_partial(void *calldata)
1037 {
1038         struct nfs_write_data   *data = calldata;
1039         struct nfs_page         *req = data->req;
1040         struct page             *page = req->wb_page;
1041         int status = data->task.tk_status;
1042
1043         if (status < 0) {
1044                 nfs_set_pageerror(page);
1045                 nfs_context_set_write_error(req->wb_context, status);
1046                 dprintk(", error = %d\n", status);
1047                 goto out;
1048         }
1049
1050         if (nfs_write_need_commit(data)) {
1051                 struct inode *inode = page->mapping->host;
1052
1053                 spin_lock(&inode->i_lock);
1054                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1055                         /* Do nothing we need to resend the writes */
1056                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1057                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1058                         dprintk(" defer commit\n");
1059                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1060                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
1061                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1062                         dprintk(" server reboot detected\n");
1063                 }
1064                 spin_unlock(&inode->i_lock);
1065         } else
1066                 dprintk(" OK\n");
1067
1068 out:
1069         if (atomic_dec_and_test(&req->wb_complete))
1070                 nfs_writepage_release(req);
1071         nfs_writedata_release(calldata);
1072 }
1073
1074 #if defined(CONFIG_NFS_V4_1)
1075 void nfs_write_prepare(struct rpc_task *task, void *calldata)
1076 {
1077         struct nfs_write_data *data = calldata;
1078         struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
1079
1080         if (nfs4_setup_sequence(clp, &data->args.seq_args,
1081                                 &data->res.seq_res, 1, task))
1082                 return;
1083         rpc_call_start(task);
1084 }
1085 #endif /* CONFIG_NFS_V4_1 */
1086
1087 static const struct rpc_call_ops nfs_write_partial_ops = {
1088 #if defined(CONFIG_NFS_V4_1)
1089         .rpc_call_prepare = nfs_write_prepare,
1090 #endif /* CONFIG_NFS_V4_1 */
1091         .rpc_call_done = nfs_writeback_done_partial,
1092         .rpc_release = nfs_writeback_release_partial,
1093 };
1094
1095 /*
1096  * Handle a write reply that flushes a whole page.
1097  *
1098  * FIXME: There is an inherent race with invalidate_inode_pages and
1099  *        writebacks since the page->count is kept > 1 for as long
1100  *        as the page has a write request pending.
1101  */
1102 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1103 {
1104         struct nfs_write_data   *data = calldata;
1105
1106         nfs_writeback_done(task, data);
1107 }
1108
1109 static void nfs_writeback_release_full(void *calldata)
1110 {
1111         struct nfs_write_data   *data = calldata;
1112         int status = data->task.tk_status;
1113
1114         /* Update attributes as result of writeback. */
1115         while (!list_empty(&data->pages)) {
1116                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1117                 struct page *page = req->wb_page;
1118
1119                 nfs_list_remove_request(req);
1120
1121                 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1122                         data->task.tk_pid,
1123                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1124                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1125                         req->wb_bytes,
1126                         (long long)req_offset(req));
1127
1128                 if (status < 0) {
1129                         nfs_set_pageerror(page);
1130                         nfs_context_set_write_error(req->wb_context, status);
1131                         dprintk(", error = %d\n", status);
1132                         goto remove_request;
1133                 }
1134
1135                 if (nfs_write_need_commit(data)) {
1136                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1137                         nfs_mark_request_commit(req);
1138                         nfs_end_page_writeback(page);
1139                         dprintk(" marked for commit\n");
1140                         goto next;
1141                 }
1142                 dprintk(" OK\n");
1143 remove_request:
1144                 nfs_end_page_writeback(page);
1145                 nfs_inode_remove_request(req);
1146         next:
1147                 nfs_clear_page_tag_locked(req);
1148         }
1149         nfs_writedata_release(calldata);
1150 }
1151
1152 static const struct rpc_call_ops nfs_write_full_ops = {
1153 #if defined(CONFIG_NFS_V4_1)
1154         .rpc_call_prepare = nfs_write_prepare,
1155 #endif /* CONFIG_NFS_V4_1 */
1156         .rpc_call_done = nfs_writeback_done_full,
1157         .rpc_release = nfs_writeback_release_full,
1158 };
1159
1160
1161 /*
1162  * This function is called when the WRITE call is complete.
1163  */
1164 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1165 {
1166         struct nfs_writeargs    *argp = &data->args;
1167         struct nfs_writeres     *resp = &data->res;
1168         struct nfs_server       *server = NFS_SERVER(data->inode);
1169         int status;
1170
1171         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1172                 task->tk_pid, task->tk_status);
1173
1174         /*
1175          * ->write_done will attempt to use post-op attributes to detect
1176          * conflicting writes by other clients.  A strict interpretation
1177          * of close-to-open would allow us to continue caching even if
1178          * another writer had changed the file, but some applications
1179          * depend on tighter cache coherency when writing.
1180          */
1181         status = NFS_PROTO(data->inode)->write_done(task, data);
1182         if (status != 0)
1183                 return status;
1184         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1185
1186 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1187         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1188                 /* We tried a write call, but the server did not
1189                  * commit data to stable storage even though we
1190                  * requested it.
1191                  * Note: There is a known bug in Tru64 < 5.0 in which
1192                  *       the server reports NFS_DATA_SYNC, but performs
1193                  *       NFS_FILE_SYNC. We therefore implement this checking
1194                  *       as a dprintk() in order to avoid filling syslog.
1195                  */
1196                 static unsigned long    complain;
1197
1198                 if (time_before(complain, jiffies)) {
1199                         dprintk("NFS:       faulty NFS server %s:"
1200                                 " (committed = %d) != (stable = %d)\n",
1201                                 server->nfs_client->cl_hostname,
1202                                 resp->verf->committed, argp->stable);
1203                         complain = jiffies + 300 * HZ;
1204                 }
1205         }
1206 #endif
1207         /* Is this a short write? */
1208         if (task->tk_status >= 0 && resp->count < argp->count) {
1209                 static unsigned long    complain;
1210
1211                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1212
1213                 /* Has the server at least made some progress? */
1214                 if (resp->count != 0) {
1215                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1216                         if (resp->verf->committed != NFS_UNSTABLE) {
1217                                 /* Resend from where the server left off */
1218                                 argp->offset += resp->count;
1219                                 argp->pgbase += resp->count;
1220                                 argp->count -= resp->count;
1221                         } else {
1222                                 /* Resend as a stable write in order to avoid
1223                                  * headaches in the case of a server crash.
1224                                  */
1225                                 argp->stable = NFS_FILE_SYNC;
1226                         }
1227                         nfs_restart_rpc(task, server->nfs_client);
1228                         return -EAGAIN;
1229                 }
1230                 if (time_before(complain, jiffies)) {
1231                         printk(KERN_WARNING
1232                                "NFS: Server wrote zero bytes, expected %u.\n",
1233                                         argp->count);
1234                         complain = jiffies + 300 * HZ;
1235                 }
1236                 /* Can't do anything about it except throw an error. */
1237                 task->tk_status = -EIO;
1238         }
1239         return 0;
1240 }
1241
1242
1243 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1244 static void nfs_commitdata_release(void *data)
1245 {
1246         struct nfs_write_data *wdata = data;
1247
1248         put_nfs_open_context(wdata->args.context);
1249         nfs_commit_free(wdata);
1250 }
1251
1252 /*
1253  * Set up the argument/result storage required for the RPC call.
1254  */
1255 static int nfs_commit_rpcsetup(struct list_head *head,
1256                 struct nfs_write_data *data,
1257                 int how)
1258 {
1259         struct nfs_page *first = nfs_list_entry(head->next);
1260         struct inode *inode = first->wb_context->path.dentry->d_inode;
1261         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1262         int priority = flush_task_priority(how);
1263         struct rpc_task *task;
1264         struct rpc_message msg = {
1265                 .rpc_argp = &data->args,
1266                 .rpc_resp = &data->res,
1267                 .rpc_cred = first->wb_context->cred,
1268         };
1269         struct rpc_task_setup task_setup_data = {
1270                 .task = &data->task,
1271                 .rpc_client = NFS_CLIENT(inode),
1272                 .rpc_message = &msg,
1273                 .callback_ops = &nfs_commit_ops,
1274                 .callback_data = data,
1275                 .workqueue = nfsiod_workqueue,
1276                 .flags = flags,
1277                 .priority = priority,
1278         };
1279
1280         /* Set up the RPC argument and reply structs
1281          * NB: take care not to mess about with data->commit et al. */
1282
1283         list_splice_init(head, &data->pages);
1284
1285         data->inode       = inode;
1286         data->cred        = msg.rpc_cred;
1287
1288         data->args.fh     = NFS_FH(data->inode);
1289         /* Note: we always request a commit of the entire inode */
1290         data->args.offset = 0;
1291         data->args.count  = 0;
1292         data->args.context = get_nfs_open_context(first->wb_context);
1293         data->res.count   = 0;
1294         data->res.fattr   = &data->fattr;
1295         data->res.verf    = &data->verf;
1296         nfs_fattr_init(&data->fattr);
1297
1298         /* Set up the initial task struct.  */
1299         NFS_PROTO(inode)->commit_setup(data, &msg);
1300
1301         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1302
1303         task = rpc_run_task(&task_setup_data);
1304         if (IS_ERR(task))
1305                 return PTR_ERR(task);
1306         rpc_put_task(task);
1307         return 0;
1308 }
1309
1310 /*
1311  * Commit dirty pages
1312  */
1313 static int
1314 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1315 {
1316         struct nfs_write_data   *data;
1317         struct nfs_page         *req;
1318
1319         data = nfs_commitdata_alloc();
1320
1321         if (!data)
1322                 goto out_bad;
1323
1324         /* Set up the argument struct */
1325         return nfs_commit_rpcsetup(head, data, how);
1326  out_bad:
1327         while (!list_empty(head)) {
1328                 req = nfs_list_entry(head->next);
1329                 nfs_list_remove_request(req);
1330                 nfs_mark_request_commit(req);
1331                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1332                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1333                                 BDI_RECLAIMABLE);
1334                 nfs_clear_page_tag_locked(req);
1335         }
1336         return -ENOMEM;
1337 }
1338
1339 /*
1340  * COMMIT call returned
1341  */
1342 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1343 {
1344         struct nfs_write_data   *data = calldata;
1345
1346         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1347                                 task->tk_pid, task->tk_status);
1348
1349         /* Call the NFS version-specific code */
1350         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1351                 return;
1352 }
1353
1354 static void nfs_commit_release(void *calldata)
1355 {
1356         struct nfs_write_data   *data = calldata;
1357         struct nfs_page         *req;
1358         int status = data->task.tk_status;
1359
1360         while (!list_empty(&data->pages)) {
1361                 req = nfs_list_entry(data->pages.next);
1362                 nfs_list_remove_request(req);
1363                 nfs_clear_request_commit(req);
1364
1365                 dprintk("NFS:       commit (%s/%lld %d@%lld)",
1366                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1367                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1368                         req->wb_bytes,
1369                         (long long)req_offset(req));
1370                 if (status < 0) {
1371                         nfs_context_set_write_error(req->wb_context, status);
1372                         nfs_inode_remove_request(req);
1373                         dprintk(", error = %d\n", status);
1374                         goto next;
1375                 }
1376
1377                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1378                  * returned by the server against all stored verfs. */
1379                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1380                         /* We have a match */
1381                         nfs_inode_remove_request(req);
1382                         dprintk(" OK\n");
1383                         goto next;
1384                 }
1385                 /* We have a mismatch. Write the page again */
1386                 dprintk(" mismatch\n");
1387                 nfs_mark_request_dirty(req);
1388         next:
1389                 nfs_clear_page_tag_locked(req);
1390         }
1391         nfs_commitdata_release(calldata);
1392 }
1393
1394 static const struct rpc_call_ops nfs_commit_ops = {
1395 #if defined(CONFIG_NFS_V4_1)
1396         .rpc_call_prepare = nfs_write_prepare,
1397 #endif /* CONFIG_NFS_V4_1 */
1398         .rpc_call_done = nfs_commit_done,
1399         .rpc_release = nfs_commit_release,
1400 };
1401
1402 static int nfs_commit_inode(struct inode *inode, int how)
1403 {
1404         LIST_HEAD(head);
1405         int res;
1406
1407         spin_lock(&inode->i_lock);
1408         res = nfs_scan_commit(inode, &head, 0, 0);
1409         spin_unlock(&inode->i_lock);
1410         if (res) {
1411                 int error = nfs_commit_list(inode, &head, how);
1412                 if (error < 0)
1413                         return error;
1414         }
1415         return res;
1416 }
1417
1418 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1419 {
1420         struct nfs_inode *nfsi = NFS_I(inode);
1421         int flags = FLUSH_SYNC;
1422         int ret = 0;
1423
1424         /* Don't commit yet if this is a non-blocking flush and there are
1425          * lots of outstanding writes for this mapping.
1426          */
1427         if (wbc->sync_mode == WB_SYNC_NONE &&
1428             nfsi->ncommit <= (nfsi->npages >> 1))
1429                 goto out_mark_dirty;
1430
1431         if (wbc->nonblocking || wbc->for_background)
1432                 flags = 0;
1433         ret = nfs_commit_inode(inode, flags);
1434         if (ret >= 0) {
1435                 if (wbc->sync_mode == WB_SYNC_NONE) {
1436                         if (ret < wbc->nr_to_write)
1437                                 wbc->nr_to_write -= ret;
1438                         else
1439                                 wbc->nr_to_write = 0;
1440                 }
1441                 return 0;
1442         }
1443 out_mark_dirty:
1444         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1445         return ret;
1446 }
1447 #else
1448 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1449 {
1450         return 0;
1451 }
1452
1453 static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1454 {
1455         return 0;
1456 }
1457 #endif
1458
1459 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1460 {
1461         return nfs_commit_unstable_pages(inode, wbc);
1462 }
1463
1464 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1465 {
1466         struct inode *inode = mapping->host;
1467         pgoff_t idx_start, idx_end;
1468         unsigned int npages = 0;
1469         LIST_HEAD(head);
1470         int nocommit = how & FLUSH_NOCOMMIT;
1471         long pages, ret;
1472
1473         /* FIXME */
1474         if (wbc->range_cyclic)
1475                 idx_start = 0;
1476         else {
1477                 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1478                 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1479                 if (idx_end > idx_start) {
1480                         pgoff_t l_npages = 1 + idx_end - idx_start;
1481                         npages = l_npages;
1482                         if (sizeof(npages) != sizeof(l_npages) &&
1483                                         (pgoff_t)npages != l_npages)
1484                                 npages = 0;
1485                 }
1486         }
1487         how &= ~FLUSH_NOCOMMIT;
1488         spin_lock(&inode->i_lock);
1489         do {
1490                 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1491                 if (ret != 0)
1492                         continue;
1493                 if (nocommit)
1494                         break;
1495                 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1496                 if (pages == 0)
1497                         break;
1498                 if (how & FLUSH_INVALIDATE) {
1499                         spin_unlock(&inode->i_lock);
1500                         nfs_cancel_commit_list(&head);
1501                         ret = pages;
1502                         spin_lock(&inode->i_lock);
1503                         continue;
1504                 }
1505                 pages += nfs_scan_commit(inode, &head, 0, 0);
1506                 spin_unlock(&inode->i_lock);
1507                 ret = nfs_commit_list(inode, &head, how);
1508                 spin_lock(&inode->i_lock);
1509
1510         } while (ret >= 0);
1511         spin_unlock(&inode->i_lock);
1512         return ret;
1513 }
1514
1515 static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1516 {
1517         int ret;
1518
1519         ret = nfs_writepages(mapping, wbc);
1520         if (ret < 0)
1521                 goto out;
1522         ret = nfs_sync_mapping_wait(mapping, wbc, how);
1523         if (ret < 0)
1524                 goto out;
1525         return 0;
1526 out:
1527         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1528         return ret;
1529 }
1530
1531 /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
1532 static int nfs_write_mapping(struct address_space *mapping, int how)
1533 {
1534         struct writeback_control wbc = {
1535                 .bdi = mapping->backing_dev_info,
1536                 .sync_mode = WB_SYNC_ALL,
1537                 .nr_to_write = LONG_MAX,
1538                 .range_start = 0,
1539                 .range_end = LLONG_MAX,
1540         };
1541
1542         return __nfs_write_mapping(mapping, &wbc, how);
1543 }
1544
1545 /*
1546  * flush the inode to disk.
1547  */
1548 int nfs_wb_all(struct inode *inode)
1549 {
1550         return nfs_write_mapping(inode->i_mapping, 0);
1551 }
1552
1553 int nfs_wb_nocommit(struct inode *inode)
1554 {
1555         return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
1556 }
1557
1558 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1559 {
1560         struct nfs_page *req;
1561         loff_t range_start = page_offset(page);
1562         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1563         struct writeback_control wbc = {
1564                 .bdi = page->mapping->backing_dev_info,
1565                 .sync_mode = WB_SYNC_ALL,
1566                 .nr_to_write = LONG_MAX,
1567                 .range_start = range_start,
1568                 .range_end = range_end,
1569         };
1570         int ret = 0;
1571
1572         BUG_ON(!PageLocked(page));
1573         for (;;) {
1574                 req = nfs_page_find_request(page);
1575                 if (req == NULL)
1576                         goto out;
1577                 if (test_bit(PG_CLEAN, &req->wb_flags)) {
1578                         nfs_release_request(req);
1579                         break;
1580                 }
1581                 if (nfs_lock_request_dontget(req)) {
1582                         nfs_inode_remove_request(req);
1583                         /*
1584                          * In case nfs_inode_remove_request has marked the
1585                          * page as being dirty
1586                          */
1587                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1588                         nfs_unlock_request(req);
1589                         break;
1590                 }
1591                 ret = nfs_wait_on_request(req);
1592                 nfs_release_request(req);
1593                 if (ret < 0)
1594                         goto out;
1595         }
1596         if (!PagePrivate(page))
1597                 return 0;
1598         ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
1599 out:
1600         return ret;
1601 }
1602
1603 static int nfs_wb_page_priority(struct inode *inode, struct page *page,
1604                                 int how)
1605 {
1606         loff_t range_start = page_offset(page);
1607         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1608         struct writeback_control wbc = {
1609                 .bdi = page->mapping->backing_dev_info,
1610                 .sync_mode = WB_SYNC_ALL,
1611                 .nr_to_write = LONG_MAX,
1612                 .range_start = range_start,
1613                 .range_end = range_end,
1614         };
1615         int ret;
1616
1617         do {
1618                 if (clear_page_dirty_for_io(page)) {
1619                         ret = nfs_writepage_locked(page, &wbc);
1620                         if (ret < 0)
1621                                 goto out_error;
1622                 } else if (!PagePrivate(page))
1623                         break;
1624                 ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1625                 if (ret < 0)
1626                         goto out_error;
1627         } while (PagePrivate(page));
1628         return 0;
1629 out_error:
1630         __mark_inode_dirty(inode, I_DIRTY_PAGES);
1631         return ret;
1632 }
1633
1634 /*
1635  * Write back all requests on one page - we do this before reading it.
1636  */
1637 int nfs_wb_page(struct inode *inode, struct page* page)
1638 {
1639         return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1640 }
1641
1642 #ifdef CONFIG_MIGRATION
1643 int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1644                 struct page *page)
1645 {
1646         struct nfs_page *req;
1647         int ret;
1648
1649         nfs_fscache_release_page(page, GFP_KERNEL);
1650
1651         req = nfs_find_and_lock_request(page);
1652         ret = PTR_ERR(req);
1653         if (IS_ERR(req))
1654                 goto out;
1655
1656         ret = migrate_page(mapping, newpage, page);
1657         if (!req)
1658                 goto out;
1659         if (ret)
1660                 goto out_unlock;
1661         page_cache_get(newpage);
1662         spin_lock(&mapping->host->i_lock);
1663         req->wb_page = newpage;
1664         SetPagePrivate(newpage);
1665         set_page_private(newpage, (unsigned long)req);
1666         ClearPagePrivate(page);
1667         set_page_private(page, 0);
1668         spin_unlock(&mapping->host->i_lock);
1669         page_cache_release(page);
1670 out_unlock:
1671         nfs_clear_page_tag_locked(req);
1672 out:
1673         return ret;
1674 }
1675 #endif
1676
1677 int __init nfs_init_writepagecache(void)
1678 {
1679         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1680                                              sizeof(struct nfs_write_data),
1681                                              0, SLAB_HWCACHE_ALIGN,
1682                                              NULL);
1683         if (nfs_wdata_cachep == NULL)
1684                 return -ENOMEM;
1685
1686         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1687                                                      nfs_wdata_cachep);
1688         if (nfs_wdata_mempool == NULL)
1689                 return -ENOMEM;
1690
1691         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1692                                                       nfs_wdata_cachep);
1693         if (nfs_commit_mempool == NULL)
1694                 return -ENOMEM;
1695
1696         /*
1697          * NFS congestion size, scale with available memory.
1698          *
1699          *  64MB:    8192k
1700          * 128MB:   11585k
1701          * 256MB:   16384k
1702          * 512MB:   23170k
1703          *   1GB:   32768k
1704          *   2GB:   46340k
1705          *   4GB:   65536k
1706          *   8GB:   92681k
1707          *  16GB:  131072k
1708          *
1709          * This allows larger machines to have larger/more transfers.
1710          * Limit the default to 256M
1711          */
1712         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1713         if (nfs_congestion_kb > 256*1024)
1714                 nfs_congestion_kb = 256*1024;
1715
1716         return 0;
1717 }
1718
1719 void nfs_destroy_writepagecache(void)
1720 {
1721         mempool_destroy(nfs_commit_mempool);
1722         mempool_destroy(nfs_wdata_mempool);
1723         kmem_cache_destroy(nfs_wdata_cachep);
1724 }
1725