04f51e52e18493609ba1a375c023d2885e37995a
[linux-2.6.git] / fs / nfs / write.c
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
22
23 #include <asm/uaccess.h>
24
25 #include "delegation.h"
26 #include "internal.h"
27 #include "iostat.h"
28
29 #define NFSDBG_FACILITY         NFSDBG_PAGECACHE
30
31 #define MIN_POOL_WRITE          (32)
32 #define MIN_POOL_COMMIT         (4)
33
34 /*
35  * Local function declarations
36  */
37 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
38                                   struct inode *inode, int ioflags);
39 static void nfs_redirty_request(struct nfs_page *req);
40 static const struct rpc_call_ops nfs_write_partial_ops;
41 static const struct rpc_call_ops nfs_write_full_ops;
42 static const struct rpc_call_ops nfs_commit_ops;
43
44 static struct kmem_cache *nfs_wdata_cachep;
45 static mempool_t *nfs_wdata_mempool;
46 static mempool_t *nfs_commit_mempool;
47
48 struct nfs_write_data *nfs_commitdata_alloc(void)
49 {
50         struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
51
52         if (p) {
53                 memset(p, 0, sizeof(*p));
54                 INIT_LIST_HEAD(&p->pages);
55         }
56         return p;
57 }
58
59 void nfs_commit_free(struct nfs_write_data *p)
60 {
61         if (p && (p->pagevec != &p->page_array[0]))
62                 kfree(p->pagevec);
63         mempool_free(p, nfs_commit_mempool);
64 }
65
66 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
67 {
68         struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
69
70         if (p) {
71                 memset(p, 0, sizeof(*p));
72                 INIT_LIST_HEAD(&p->pages);
73                 p->npages = pagecount;
74                 if (pagecount <= ARRAY_SIZE(p->page_array))
75                         p->pagevec = p->page_array;
76                 else {
77                         p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
78                         if (!p->pagevec) {
79                                 mempool_free(p, nfs_wdata_mempool);
80                                 p = NULL;
81                         }
82                 }
83         }
84         return p;
85 }
86
87 static void nfs_writedata_free(struct nfs_write_data *p)
88 {
89         if (p && (p->pagevec != &p->page_array[0]))
90                 kfree(p->pagevec);
91         mempool_free(p, nfs_wdata_mempool);
92 }
93
94 void nfs_writedata_release(void *data)
95 {
96         struct nfs_write_data *wdata = data;
97
98         put_nfs_open_context(wdata->args.context);
99         nfs_writedata_free(wdata);
100 }
101
102 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
103 {
104         ctx->error = error;
105         smp_wmb();
106         set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
107 }
108
109 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
110 {
111         struct nfs_page *req = NULL;
112
113         if (PagePrivate(page)) {
114                 req = (struct nfs_page *)page_private(page);
115                 if (req != NULL)
116                         kref_get(&req->wb_kref);
117         }
118         return req;
119 }
120
121 static struct nfs_page *nfs_page_find_request(struct page *page)
122 {
123         struct inode *inode = page->mapping->host;
124         struct nfs_page *req = NULL;
125
126         spin_lock(&inode->i_lock);
127         req = nfs_page_find_request_locked(page);
128         spin_unlock(&inode->i_lock);
129         return req;
130 }
131
132 /* Adjust the file length if we're writing beyond the end */
133 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
134 {
135         struct inode *inode = page->mapping->host;
136         loff_t end, i_size = i_size_read(inode);
137         pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
138
139         if (i_size > 0 && page->index < end_index)
140                 return;
141         end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
142         if (i_size >= end)
143                 return;
144         nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
145         i_size_write(inode, end);
146 }
147
148 /* A writeback failed: mark the page as bad, and invalidate the page cache */
149 static void nfs_set_pageerror(struct page *page)
150 {
151         SetPageError(page);
152         nfs_zap_mapping(page->mapping->host, page->mapping);
153 }
154
155 /* We can set the PG_uptodate flag if we see that a write request
156  * covers the full page.
157  */
158 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
159 {
160         if (PageUptodate(page))
161                 return;
162         if (base != 0)
163                 return;
164         if (count != nfs_page_length(page))
165                 return;
166         SetPageUptodate(page);
167 }
168
169 static int wb_priority(struct writeback_control *wbc)
170 {
171         if (wbc->for_reclaim)
172                 return FLUSH_HIGHPRI | FLUSH_STABLE;
173         if (wbc->for_kupdate)
174                 return FLUSH_LOWPRI;
175         return 0;
176 }
177
178 /*
179  * NFS congestion control
180  */
181
182 int nfs_congestion_kb;
183
184 #define NFS_CONGESTION_ON_THRESH        (nfs_congestion_kb >> (PAGE_SHIFT-10))
185 #define NFS_CONGESTION_OFF_THRESH       \
186         (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
187
188 static int nfs_set_page_writeback(struct page *page)
189 {
190         int ret = test_set_page_writeback(page);
191
192         if (!ret) {
193                 struct inode *inode = page->mapping->host;
194                 struct nfs_server *nfss = NFS_SERVER(inode);
195
196                 if (atomic_long_inc_return(&nfss->writeback) >
197                                 NFS_CONGESTION_ON_THRESH)
198                         set_bdi_congested(&nfss->backing_dev_info, WRITE);
199         }
200         return ret;
201 }
202
203 static void nfs_end_page_writeback(struct page *page)
204 {
205         struct inode *inode = page->mapping->host;
206         struct nfs_server *nfss = NFS_SERVER(inode);
207
208         end_page_writeback(page);
209         if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
210                 clear_bdi_congested(&nfss->backing_dev_info, WRITE);
211 }
212
213 /*
214  * Find an associated nfs write request, and prepare to flush it out
215  * May return an error if the user signalled nfs_wait_on_request().
216  */
217 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
218                                 struct page *page)
219 {
220         struct inode *inode = page->mapping->host;
221         struct nfs_page *req;
222         int ret;
223
224         spin_lock(&inode->i_lock);
225         for(;;) {
226                 req = nfs_page_find_request_locked(page);
227                 if (req == NULL) {
228                         spin_unlock(&inode->i_lock);
229                         return 0;
230                 }
231                 if (nfs_set_page_tag_locked(req))
232                         break;
233                 /* Note: If we hold the page lock, as is the case in nfs_writepage,
234                  *       then the call to nfs_set_page_tag_locked() will always
235                  *       succeed provided that someone hasn't already marked the
236                  *       request as dirty (in which case we don't care).
237                  */
238                 spin_unlock(&inode->i_lock);
239                 ret = nfs_wait_on_request(req);
240                 nfs_release_request(req);
241                 if (ret != 0)
242                         return ret;
243                 spin_lock(&inode->i_lock);
244         }
245         if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
246                 /* This request is marked for commit */
247                 spin_unlock(&inode->i_lock);
248                 nfs_clear_page_tag_locked(req);
249                 nfs_pageio_complete(pgio);
250                 return 0;
251         }
252         if (nfs_set_page_writeback(page) != 0) {
253                 spin_unlock(&inode->i_lock);
254                 BUG();
255         }
256         spin_unlock(&inode->i_lock);
257         if (!nfs_pageio_add_request(pgio, req)) {
258                 nfs_redirty_request(req);
259                 return pgio->pg_error;
260         }
261         return 0;
262 }
263
264 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
265 {
266         struct inode *inode = page->mapping->host;
267
268         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
269         nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
270
271         nfs_pageio_cond_complete(pgio, page->index);
272         return nfs_page_async_flush(pgio, page);
273 }
274
275 /*
276  * Write an mmapped page to the server.
277  */
278 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
279 {
280         struct nfs_pageio_descriptor pgio;
281         int err;
282
283         nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
284         err = nfs_do_writepage(page, wbc, &pgio);
285         nfs_pageio_complete(&pgio);
286         if (err < 0)
287                 return err;
288         if (pgio.pg_error < 0)
289                 return pgio.pg_error;
290         return 0;
291 }
292
293 int nfs_writepage(struct page *page, struct writeback_control *wbc)
294 {
295         int ret;
296
297         ret = nfs_writepage_locked(page, wbc);
298         unlock_page(page);
299         return ret;
300 }
301
302 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
303 {
304         int ret;
305
306         ret = nfs_do_writepage(page, wbc, data);
307         unlock_page(page);
308         return ret;
309 }
310
311 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
312 {
313         struct inode *inode = mapping->host;
314         struct nfs_pageio_descriptor pgio;
315         int err;
316
317         nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
318
319         nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
320         err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
321         nfs_pageio_complete(&pgio);
322         if (err < 0)
323                 return err;
324         if (pgio.pg_error < 0)
325                 return pgio.pg_error;
326         return 0;
327 }
328
329 /*
330  * Insert a write request into an inode
331  */
332 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
333 {
334         struct nfs_inode *nfsi = NFS_I(inode);
335         int error;
336
337         error = radix_tree_preload(GFP_NOFS);
338         if (error != 0)
339                 goto out;
340
341         /* Lock the request! */
342         nfs_lock_request_dontget(req);
343
344         spin_lock(&inode->i_lock);
345         error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
346         BUG_ON(error);
347         if (!nfsi->npages) {
348                 igrab(inode);
349                 if (nfs_have_delegation(inode, FMODE_WRITE))
350                         nfsi->change_attr++;
351         }
352         SetPagePrivate(req->wb_page);
353         set_page_private(req->wb_page, (unsigned long)req);
354         nfsi->npages++;
355         kref_get(&req->wb_kref);
356         radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
357                                 NFS_PAGE_TAG_LOCKED);
358         spin_unlock(&inode->i_lock);
359         radix_tree_preload_end();
360 out:
361         return error;
362 }
363
364 /*
365  * Remove a write request from an inode
366  */
367 static void nfs_inode_remove_request(struct nfs_page *req)
368 {
369         struct inode *inode = req->wb_context->path.dentry->d_inode;
370         struct nfs_inode *nfsi = NFS_I(inode);
371
372         BUG_ON (!NFS_WBACK_BUSY(req));
373
374         spin_lock(&inode->i_lock);
375         set_page_private(req->wb_page, 0);
376         ClearPagePrivate(req->wb_page);
377         radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
378         nfsi->npages--;
379         if (!nfsi->npages) {
380                 spin_unlock(&inode->i_lock);
381                 iput(inode);
382         } else
383                 spin_unlock(&inode->i_lock);
384         nfs_clear_request(req);
385         nfs_release_request(req);
386 }
387
388 static void
389 nfs_mark_request_dirty(struct nfs_page *req)
390 {
391         __set_page_dirty_nobuffers(req->wb_page);
392 }
393
394 /*
395  * Check if a request is dirty
396  */
397 static inline int
398 nfs_dirty_request(struct nfs_page *req)
399 {
400         struct page *page = req->wb_page;
401
402         if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
403                 return 0;
404         return !PageWriteback(page);
405 }
406
407 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
408 /*
409  * Add a request to the inode's commit list.
410  */
411 static void
412 nfs_mark_request_commit(struct nfs_page *req)
413 {
414         struct inode *inode = req->wb_context->path.dentry->d_inode;
415         struct nfs_inode *nfsi = NFS_I(inode);
416
417         spin_lock(&inode->i_lock);
418         nfsi->ncommit++;
419         set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
420         radix_tree_tag_set(&nfsi->nfs_page_tree,
421                         req->wb_index,
422                         NFS_PAGE_TAG_COMMIT);
423         spin_unlock(&inode->i_lock);
424         inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
425         inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
426         __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
427 }
428
429 static inline
430 int nfs_write_need_commit(struct nfs_write_data *data)
431 {
432         return data->verf.committed != NFS_FILE_SYNC;
433 }
434
435 static inline
436 int nfs_reschedule_unstable_write(struct nfs_page *req)
437 {
438         if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
439                 nfs_mark_request_commit(req);
440                 return 1;
441         }
442         if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
443                 nfs_mark_request_dirty(req);
444                 return 1;
445         }
446         return 0;
447 }
448 #else
449 static inline void
450 nfs_mark_request_commit(struct nfs_page *req)
451 {
452 }
453
454 static inline
455 int nfs_write_need_commit(struct nfs_write_data *data)
456 {
457         return 0;
458 }
459
460 static inline
461 int nfs_reschedule_unstable_write(struct nfs_page *req)
462 {
463         return 0;
464 }
465 #endif
466
467 /*
468  * Wait for a request to complete.
469  *
470  * Interruptible by fatal signals only.
471  */
472 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
473 {
474         struct nfs_inode *nfsi = NFS_I(inode);
475         struct nfs_page *req;
476         pgoff_t idx_end, next;
477         unsigned int            res = 0;
478         int                     error;
479
480         if (npages == 0)
481                 idx_end = ~0;
482         else
483                 idx_end = idx_start + npages - 1;
484
485         next = idx_start;
486         while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
487                 if (req->wb_index > idx_end)
488                         break;
489
490                 next = req->wb_index + 1;
491                 BUG_ON(!NFS_WBACK_BUSY(req));
492
493                 kref_get(&req->wb_kref);
494                 spin_unlock(&inode->i_lock);
495                 error = nfs_wait_on_request(req);
496                 nfs_release_request(req);
497                 spin_lock(&inode->i_lock);
498                 if (error < 0)
499                         return error;
500                 res++;
501         }
502         return res;
503 }
504
505 static void nfs_cancel_commit_list(struct list_head *head)
506 {
507         struct nfs_page *req;
508
509         while(!list_empty(head)) {
510                 req = nfs_list_entry(head->next);
511                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
512                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
513                                 BDI_RECLAIMABLE);
514                 nfs_list_remove_request(req);
515                 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
516                 nfs_inode_remove_request(req);
517                 nfs_unlock_request(req);
518         }
519 }
520
521 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
522 /*
523  * nfs_scan_commit - Scan an inode for commit requests
524  * @inode: NFS inode to scan
525  * @dst: destination list
526  * @idx_start: lower bound of page->index to scan.
527  * @npages: idx_start + npages sets the upper bound to scan.
528  *
529  * Moves requests from the inode's 'commit' request list.
530  * The requests are *not* checked to ensure that they form a contiguous set.
531  */
532 static int
533 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
534 {
535         struct nfs_inode *nfsi = NFS_I(inode);
536         int res = 0;
537
538         if (nfsi->ncommit != 0) {
539                 res = nfs_scan_list(nfsi, dst, idx_start, npages,
540                                 NFS_PAGE_TAG_COMMIT);
541                 nfsi->ncommit -= res;
542         }
543         return res;
544 }
545 #else
546 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
547 {
548         return 0;
549 }
550 #endif
551
552 /*
553  * Search for an existing write request, and attempt to update
554  * it to reflect a new dirty region on a given page.
555  *
556  * If the attempt fails, then the existing request is flushed out
557  * to disk.
558  */
559 static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
560                 struct page *page,
561                 unsigned int offset,
562                 unsigned int bytes)
563 {
564         struct nfs_page *req;
565         unsigned int rqend;
566         unsigned int end;
567         int error;
568
569         if (!PagePrivate(page))
570                 return NULL;
571
572         end = offset + bytes;
573         spin_lock(&inode->i_lock);
574
575         for (;;) {
576                 req = nfs_page_find_request_locked(page);
577                 if (req == NULL)
578                         goto out_unlock;
579
580                 rqend = req->wb_offset + req->wb_bytes;
581                 /*
582                  * Tell the caller to flush out the request if
583                  * the offsets are non-contiguous.
584                  * Note: nfs_flush_incompatible() will already
585                  * have flushed out requests having wrong owners.
586                  */
587                 if (!nfs_dirty_request(req)
588                     || offset > rqend
589                     || end < req->wb_offset)
590                         goto out_flushme;
591
592                 if (nfs_set_page_tag_locked(req))
593                         break;
594
595                 /* The request is locked, so wait and then retry */
596                 spin_unlock(&inode->i_lock);
597                 error = nfs_wait_on_request(req);
598                 nfs_release_request(req);
599                 if (error != 0)
600                         goto out_err;
601                 spin_lock(&inode->i_lock);
602         }
603
604         /* Okay, the request matches. Update the region */
605         if (offset < req->wb_offset) {
606                 req->wb_offset = offset;
607                 req->wb_pgbase = offset;
608         }
609         if (end > rqend)
610                 req->wb_bytes = end - req->wb_offset;
611         else
612                 req->wb_bytes = rqend - req->wb_offset;
613 out_unlock:
614         spin_unlock(&inode->i_lock);
615         return req;
616 out_flushme:
617         spin_unlock(&inode->i_lock);
618         nfs_release_request(req);
619         error = nfs_wb_page(inode, page);
620 out_err:
621         return ERR_PTR(error);
622 }
623
624 /*
625  * Try to update an existing write request, or create one if there is none.
626  *
627  * Note: Should always be called with the Page Lock held to prevent races
628  * if we have to add a new request. Also assumes that the caller has
629  * already called nfs_flush_incompatible() if necessary.
630  */
631 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
632                 struct page *page, unsigned int offset, unsigned int bytes)
633 {
634         struct inode *inode = page->mapping->host;
635         struct nfs_page *req;
636         int error;
637
638         req = nfs_try_to_update_request(inode, page, offset, bytes);
639         if (req != NULL)
640                 goto out;
641         req = nfs_create_request(ctx, inode, page, offset, bytes);
642         if (IS_ERR(req))
643                 goto out;
644         error = nfs_inode_add_request(inode, req);
645         if (error != 0) {
646                 nfs_release_request(req);
647                 req = ERR_PTR(error);
648         }
649 out:
650         return req;
651 }
652
653 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
654                 unsigned int offset, unsigned int count)
655 {
656         struct nfs_page *req;
657
658         req = nfs_setup_write_request(ctx, page, offset, count);
659         if (IS_ERR(req))
660                 return PTR_ERR(req);
661         /* Update file length */
662         nfs_grow_file(page, offset, count);
663         nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
664         nfs_clear_page_tag_locked(req);
665         return 0;
666 }
667
668 int nfs_flush_incompatible(struct file *file, struct page *page)
669 {
670         struct nfs_open_context *ctx = nfs_file_open_context(file);
671         struct nfs_page *req;
672         int do_flush, status;
673         /*
674          * Look for a request corresponding to this page. If there
675          * is one, and it belongs to another file, we flush it out
676          * before we try to copy anything into the page. Do this
677          * due to the lack of an ACCESS-type call in NFSv2.
678          * Also do the same if we find a request from an existing
679          * dropped page.
680          */
681         do {
682                 req = nfs_page_find_request(page);
683                 if (req == NULL)
684                         return 0;
685                 do_flush = req->wb_page != page || req->wb_context != ctx
686                         || !nfs_dirty_request(req);
687                 nfs_release_request(req);
688                 if (!do_flush)
689                         return 0;
690                 status = nfs_wb_page(page->mapping->host, page);
691         } while (status == 0);
692         return status;
693 }
694
695 /*
696  * If the page cache is marked as unsafe or invalid, then we can't rely on
697  * the PageUptodate() flag. In this case, we will need to turn off
698  * write optimisations that depend on the page contents being correct.
699  */
700 static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
701 {
702         return PageUptodate(page) &&
703                 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
704 }
705
706 /*
707  * Update and possibly write a cached page of an NFS file.
708  *
709  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
710  * things with a page scheduled for an RPC call (e.g. invalidate it).
711  */
712 int nfs_updatepage(struct file *file, struct page *page,
713                 unsigned int offset, unsigned int count)
714 {
715         struct nfs_open_context *ctx = nfs_file_open_context(file);
716         struct inode    *inode = page->mapping->host;
717         int             status = 0;
718
719         nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
720
721         dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
722                 file->f_path.dentry->d_parent->d_name.name,
723                 file->f_path.dentry->d_name.name, count,
724                 (long long)(page_offset(page) + offset));
725
726         /* If we're not using byte range locks, and we know the page
727          * is up to date, it may be more efficient to extend the write
728          * to cover the entire page in order to avoid fragmentation
729          * inefficiencies.
730          */
731         if (nfs_write_pageuptodate(page, inode) &&
732                         inode->i_flock == NULL &&
733                         !(file->f_flags & O_SYNC)) {
734                 count = max(count + offset, nfs_page_length(page));
735                 offset = 0;
736         }
737
738         status = nfs_writepage_setup(ctx, page, offset, count);
739         if (status < 0)
740                 nfs_set_pageerror(page);
741         else
742                 __set_page_dirty_nobuffers(page);
743
744         dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
745                         status, (long long)i_size_read(inode));
746         return status;
747 }
748
749 static void nfs_writepage_release(struct nfs_page *req)
750 {
751
752         if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
753                 nfs_end_page_writeback(req->wb_page);
754                 nfs_inode_remove_request(req);
755         } else
756                 nfs_end_page_writeback(req->wb_page);
757         nfs_clear_page_tag_locked(req);
758 }
759
760 static int flush_task_priority(int how)
761 {
762         switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
763                 case FLUSH_HIGHPRI:
764                         return RPC_PRIORITY_HIGH;
765                 case FLUSH_LOWPRI:
766                         return RPC_PRIORITY_LOW;
767         }
768         return RPC_PRIORITY_NORMAL;
769 }
770
771 /*
772  * Set up the argument/result storage required for the RPC call.
773  */
774 static int nfs_write_rpcsetup(struct nfs_page *req,
775                 struct nfs_write_data *data,
776                 const struct rpc_call_ops *call_ops,
777                 unsigned int count, unsigned int offset,
778                 int how)
779 {
780         struct inode *inode = req->wb_context->path.dentry->d_inode;
781         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
782         int priority = flush_task_priority(how);
783         struct rpc_task *task;
784         struct rpc_message msg = {
785                 .rpc_argp = &data->args,
786                 .rpc_resp = &data->res,
787                 .rpc_cred = req->wb_context->cred,
788         };
789         struct rpc_task_setup task_setup_data = {
790                 .rpc_client = NFS_CLIENT(inode),
791                 .task = &data->task,
792                 .rpc_message = &msg,
793                 .callback_ops = call_ops,
794                 .callback_data = data,
795                 .workqueue = nfsiod_workqueue,
796                 .flags = flags,
797                 .priority = priority,
798         };
799
800         /* Set up the RPC argument and reply structs
801          * NB: take care not to mess about with data->commit et al. */
802
803         data->req = req;
804         data->inode = inode = req->wb_context->path.dentry->d_inode;
805         data->cred = msg.rpc_cred;
806
807         data->args.fh     = NFS_FH(inode);
808         data->args.offset = req_offset(req) + offset;
809         data->args.pgbase = req->wb_pgbase + offset;
810         data->args.pages  = data->pagevec;
811         data->args.count  = count;
812         data->args.context = get_nfs_open_context(req->wb_context);
813         data->args.stable  = NFS_UNSTABLE;
814         if (how & FLUSH_STABLE) {
815                 data->args.stable = NFS_DATA_SYNC;
816                 if (!NFS_I(inode)->ncommit)
817                         data->args.stable = NFS_FILE_SYNC;
818         }
819
820         data->res.fattr   = &data->fattr;
821         data->res.count   = count;
822         data->res.verf    = &data->verf;
823         nfs_fattr_init(&data->fattr);
824
825         /* Set up the initial task struct.  */
826         NFS_PROTO(inode)->write_setup(data, &msg);
827
828         dprintk("NFS: %5u initiated write call "
829                 "(req %s/%lld, %u bytes @ offset %llu)\n",
830                 data->task.tk_pid,
831                 inode->i_sb->s_id,
832                 (long long)NFS_FILEID(inode),
833                 count,
834                 (unsigned long long)data->args.offset);
835
836         task = rpc_run_task(&task_setup_data);
837         if (IS_ERR(task))
838                 return PTR_ERR(task);
839         rpc_put_task(task);
840         return 0;
841 }
842
843 /* If a nfs_flush_* function fails, it should remove reqs from @head and
844  * call this on each, which will prepare them to be retried on next
845  * writeback using standard nfs.
846  */
847 static void nfs_redirty_request(struct nfs_page *req)
848 {
849         nfs_mark_request_dirty(req);
850         nfs_end_page_writeback(req->wb_page);
851         nfs_clear_page_tag_locked(req);
852 }
853
854 /*
855  * Generate multiple small requests to write out a single
856  * contiguous dirty area on one page.
857  */
858 static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
859 {
860         struct nfs_page *req = nfs_list_entry(head->next);
861         struct page *page = req->wb_page;
862         struct nfs_write_data *data;
863         size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
864         unsigned int offset;
865         int requests = 0;
866         int ret = 0;
867         LIST_HEAD(list);
868
869         nfs_list_remove_request(req);
870
871         nbytes = count;
872         do {
873                 size_t len = min(nbytes, wsize);
874
875                 data = nfs_writedata_alloc(1);
876                 if (!data)
877                         goto out_bad;
878                 list_add(&data->pages, &list);
879                 requests++;
880                 nbytes -= len;
881         } while (nbytes != 0);
882         atomic_set(&req->wb_complete, requests);
883
884         ClearPageError(page);
885         offset = 0;
886         nbytes = count;
887         do {
888                 int ret2;
889
890                 data = list_entry(list.next, struct nfs_write_data, pages);
891                 list_del_init(&data->pages);
892
893                 data->pagevec[0] = page;
894
895                 if (nbytes < wsize)
896                         wsize = nbytes;
897                 ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
898                                    wsize, offset, how);
899                 if (ret == 0)
900                         ret = ret2;
901                 offset += wsize;
902                 nbytes -= wsize;
903         } while (nbytes != 0);
904
905         return ret;
906
907 out_bad:
908         while (!list_empty(&list)) {
909                 data = list_entry(list.next, struct nfs_write_data, pages);
910                 list_del(&data->pages);
911                 nfs_writedata_release(data);
912         }
913         nfs_redirty_request(req);
914         return -ENOMEM;
915 }
916
917 /*
918  * Create an RPC task for the given write request and kick it.
919  * The page must have been locked by the caller.
920  *
921  * It may happen that the page we're passed is not marked dirty.
922  * This is the case if nfs_updatepage detects a conflicting request
923  * that has been written but not committed.
924  */
925 static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
926 {
927         struct nfs_page         *req;
928         struct page             **pages;
929         struct nfs_write_data   *data;
930
931         data = nfs_writedata_alloc(npages);
932         if (!data)
933                 goto out_bad;
934
935         pages = data->pagevec;
936         while (!list_empty(head)) {
937                 req = nfs_list_entry(head->next);
938                 nfs_list_remove_request(req);
939                 nfs_list_add_request(req, &data->pages);
940                 ClearPageError(req->wb_page);
941                 *pages++ = req->wb_page;
942         }
943         req = nfs_list_entry(data->pages.next);
944
945         /* Set up the argument struct */
946         return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
947  out_bad:
948         while (!list_empty(head)) {
949                 req = nfs_list_entry(head->next);
950                 nfs_list_remove_request(req);
951                 nfs_redirty_request(req);
952         }
953         return -ENOMEM;
954 }
955
956 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
957                                   struct inode *inode, int ioflags)
958 {
959         size_t wsize = NFS_SERVER(inode)->wsize;
960
961         if (wsize < PAGE_CACHE_SIZE)
962                 nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
963         else
964                 nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
965 }
966
967 /*
968  * Handle a write reply that flushed part of a page.
969  */
970 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
971 {
972         struct nfs_write_data   *data = calldata;
973
974         dprintk("NFS: %5u write(%s/%lld %d@%lld)",
975                 task->tk_pid,
976                 data->req->wb_context->path.dentry->d_inode->i_sb->s_id,
977                 (long long)
978                   NFS_FILEID(data->req->wb_context->path.dentry->d_inode),
979                 data->req->wb_bytes, (long long)req_offset(data->req));
980
981         nfs_writeback_done(task, data);
982 }
983
984 static void nfs_writeback_release_partial(void *calldata)
985 {
986         struct nfs_write_data   *data = calldata;
987         struct nfs_page         *req = data->req;
988         struct page             *page = req->wb_page;
989         int status = data->task.tk_status;
990
991         if (status < 0) {
992                 nfs_set_pageerror(page);
993                 nfs_context_set_write_error(req->wb_context, status);
994                 dprintk(", error = %d\n", status);
995                 goto out;
996         }
997
998         if (nfs_write_need_commit(data)) {
999                 struct inode *inode = page->mapping->host;
1000
1001                 spin_lock(&inode->i_lock);
1002                 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1003                         /* Do nothing we need to resend the writes */
1004                 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1005                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1006                         dprintk(" defer commit\n");
1007                 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1008                         set_bit(PG_NEED_RESCHED, &req->wb_flags);
1009                         clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1010                         dprintk(" server reboot detected\n");
1011                 }
1012                 spin_unlock(&inode->i_lock);
1013         } else
1014                 dprintk(" OK\n");
1015
1016 out:
1017         if (atomic_dec_and_test(&req->wb_complete))
1018                 nfs_writepage_release(req);
1019         nfs_writedata_release(calldata);
1020 }
1021
1022 static const struct rpc_call_ops nfs_write_partial_ops = {
1023         .rpc_call_done = nfs_writeback_done_partial,
1024         .rpc_release = nfs_writeback_release_partial,
1025 };
1026
1027 /*
1028  * Handle a write reply that flushes a whole page.
1029  *
1030  * FIXME: There is an inherent race with invalidate_inode_pages and
1031  *        writebacks since the page->count is kept > 1 for as long
1032  *        as the page has a write request pending.
1033  */
1034 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1035 {
1036         struct nfs_write_data   *data = calldata;
1037
1038         nfs_writeback_done(task, data);
1039 }
1040
1041 static void nfs_writeback_release_full(void *calldata)
1042 {
1043         struct nfs_write_data   *data = calldata;
1044         int status = data->task.tk_status;
1045
1046         /* Update attributes as result of writeback. */
1047         while (!list_empty(&data->pages)) {
1048                 struct nfs_page *req = nfs_list_entry(data->pages.next);
1049                 struct page *page = req->wb_page;
1050
1051                 nfs_list_remove_request(req);
1052
1053                 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1054                         data->task.tk_pid,
1055                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1056                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1057                         req->wb_bytes,
1058                         (long long)req_offset(req));
1059
1060                 if (status < 0) {
1061                         nfs_set_pageerror(page);
1062                         nfs_context_set_write_error(req->wb_context, status);
1063                         dprintk(", error = %d\n", status);
1064                         goto remove_request;
1065                 }
1066
1067                 if (nfs_write_need_commit(data)) {
1068                         memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1069                         nfs_mark_request_commit(req);
1070                         nfs_end_page_writeback(page);
1071                         dprintk(" marked for commit\n");
1072                         goto next;
1073                 }
1074                 dprintk(" OK\n");
1075 remove_request:
1076                 nfs_end_page_writeback(page);
1077                 nfs_inode_remove_request(req);
1078         next:
1079                 nfs_clear_page_tag_locked(req);
1080         }
1081         nfs_writedata_release(calldata);
1082 }
1083
1084 static const struct rpc_call_ops nfs_write_full_ops = {
1085         .rpc_call_done = nfs_writeback_done_full,
1086         .rpc_release = nfs_writeback_release_full,
1087 };
1088
1089
1090 /*
1091  * This function is called when the WRITE call is complete.
1092  */
1093 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1094 {
1095         struct nfs_writeargs    *argp = &data->args;
1096         struct nfs_writeres     *resp = &data->res;
1097         int status;
1098
1099         dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1100                 task->tk_pid, task->tk_status);
1101
1102         /*
1103          * ->write_done will attempt to use post-op attributes to detect
1104          * conflicting writes by other clients.  A strict interpretation
1105          * of close-to-open would allow us to continue caching even if
1106          * another writer had changed the file, but some applications
1107          * depend on tighter cache coherency when writing.
1108          */
1109         status = NFS_PROTO(data->inode)->write_done(task, data);
1110         if (status != 0)
1111                 return status;
1112         nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1113
1114 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1115         if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1116                 /* We tried a write call, but the server did not
1117                  * commit data to stable storage even though we
1118                  * requested it.
1119                  * Note: There is a known bug in Tru64 < 5.0 in which
1120                  *       the server reports NFS_DATA_SYNC, but performs
1121                  *       NFS_FILE_SYNC. We therefore implement this checking
1122                  *       as a dprintk() in order to avoid filling syslog.
1123                  */
1124                 static unsigned long    complain;
1125
1126                 if (time_before(complain, jiffies)) {
1127                         dprintk("NFS:       faulty NFS server %s:"
1128                                 " (committed = %d) != (stable = %d)\n",
1129                                 NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1130                                 resp->verf->committed, argp->stable);
1131                         complain = jiffies + 300 * HZ;
1132                 }
1133         }
1134 #endif
1135         /* Is this a short write? */
1136         if (task->tk_status >= 0 && resp->count < argp->count) {
1137                 static unsigned long    complain;
1138
1139                 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1140
1141                 /* Has the server at least made some progress? */
1142                 if (resp->count != 0) {
1143                         /* Was this an NFSv2 write or an NFSv3 stable write? */
1144                         if (resp->verf->committed != NFS_UNSTABLE) {
1145                                 /* Resend from where the server left off */
1146                                 argp->offset += resp->count;
1147                                 argp->pgbase += resp->count;
1148                                 argp->count -= resp->count;
1149                         } else {
1150                                 /* Resend as a stable write in order to avoid
1151                                  * headaches in the case of a server crash.
1152                                  */
1153                                 argp->stable = NFS_FILE_SYNC;
1154                         }
1155                         rpc_restart_call(task);
1156                         return -EAGAIN;
1157                 }
1158                 if (time_before(complain, jiffies)) {
1159                         printk(KERN_WARNING
1160                                "NFS: Server wrote zero bytes, expected %u.\n",
1161                                         argp->count);
1162                         complain = jiffies + 300 * HZ;
1163                 }
1164                 /* Can't do anything about it except throw an error. */
1165                 task->tk_status = -EIO;
1166         }
1167         return 0;
1168 }
1169
1170
1171 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1172 void nfs_commitdata_release(void *data)
1173 {
1174         struct nfs_write_data *wdata = data;
1175
1176         put_nfs_open_context(wdata->args.context);
1177         nfs_commit_free(wdata);
1178 }
1179
1180 /*
1181  * Set up the argument/result storage required for the RPC call.
1182  */
1183 static int nfs_commit_rpcsetup(struct list_head *head,
1184                 struct nfs_write_data *data,
1185                 int how)
1186 {
1187         struct nfs_page *first = nfs_list_entry(head->next);
1188         struct inode *inode = first->wb_context->path.dentry->d_inode;
1189         int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1190         int priority = flush_task_priority(how);
1191         struct rpc_task *task;
1192         struct rpc_message msg = {
1193                 .rpc_argp = &data->args,
1194                 .rpc_resp = &data->res,
1195                 .rpc_cred = first->wb_context->cred,
1196         };
1197         struct rpc_task_setup task_setup_data = {
1198                 .task = &data->task,
1199                 .rpc_client = NFS_CLIENT(inode),
1200                 .rpc_message = &msg,
1201                 .callback_ops = &nfs_commit_ops,
1202                 .callback_data = data,
1203                 .workqueue = nfsiod_workqueue,
1204                 .flags = flags,
1205                 .priority = priority,
1206         };
1207
1208         /* Set up the RPC argument and reply structs
1209          * NB: take care not to mess about with data->commit et al. */
1210
1211         list_splice_init(head, &data->pages);
1212
1213         data->inode       = inode;
1214         data->cred        = msg.rpc_cred;
1215
1216         data->args.fh     = NFS_FH(data->inode);
1217         /* Note: we always request a commit of the entire inode */
1218         data->args.offset = 0;
1219         data->args.count  = 0;
1220         data->args.context = get_nfs_open_context(first->wb_context);
1221         data->res.count   = 0;
1222         data->res.fattr   = &data->fattr;
1223         data->res.verf    = &data->verf;
1224         nfs_fattr_init(&data->fattr);
1225
1226         /* Set up the initial task struct.  */
1227         NFS_PROTO(inode)->commit_setup(data, &msg);
1228
1229         dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1230
1231         task = rpc_run_task(&task_setup_data);
1232         if (IS_ERR(task))
1233                 return PTR_ERR(task);
1234         rpc_put_task(task);
1235         return 0;
1236 }
1237
1238 /*
1239  * Commit dirty pages
1240  */
1241 static int
1242 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1243 {
1244         struct nfs_write_data   *data;
1245         struct nfs_page         *req;
1246
1247         data = nfs_commitdata_alloc();
1248
1249         if (!data)
1250                 goto out_bad;
1251
1252         /* Set up the argument struct */
1253         return nfs_commit_rpcsetup(head, data, how);
1254  out_bad:
1255         while (!list_empty(head)) {
1256                 req = nfs_list_entry(head->next);
1257                 nfs_list_remove_request(req);
1258                 nfs_mark_request_commit(req);
1259                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1260                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1261                                 BDI_RECLAIMABLE);
1262                 nfs_clear_page_tag_locked(req);
1263         }
1264         return -ENOMEM;
1265 }
1266
1267 /*
1268  * COMMIT call returned
1269  */
1270 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1271 {
1272         struct nfs_write_data   *data = calldata;
1273
1274         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1275                                 task->tk_pid, task->tk_status);
1276
1277         /* Call the NFS version-specific code */
1278         if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1279                 return;
1280 }
1281
1282 static void nfs_commit_release(void *calldata)
1283 {
1284         struct nfs_write_data   *data = calldata;
1285         struct nfs_page         *req;
1286         int status = data->task.tk_status;
1287
1288         while (!list_empty(&data->pages)) {
1289                 req = nfs_list_entry(data->pages.next);
1290                 nfs_list_remove_request(req);
1291                 clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
1292                 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1293                 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1294                                 BDI_RECLAIMABLE);
1295
1296                 dprintk("NFS:       commit (%s/%lld %d@%lld)",
1297                         req->wb_context->path.dentry->d_inode->i_sb->s_id,
1298                         (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
1299                         req->wb_bytes,
1300                         (long long)req_offset(req));
1301                 if (status < 0) {
1302                         nfs_context_set_write_error(req->wb_context, status);
1303                         nfs_inode_remove_request(req);
1304                         dprintk(", error = %d\n", status);
1305                         goto next;
1306                 }
1307
1308                 /* Okay, COMMIT succeeded, apparently. Check the verifier
1309                  * returned by the server against all stored verfs. */
1310                 if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1311                         /* We have a match */
1312                         nfs_inode_remove_request(req);
1313                         dprintk(" OK\n");
1314                         goto next;
1315                 }
1316                 /* We have a mismatch. Write the page again */
1317                 dprintk(" mismatch\n");
1318                 nfs_mark_request_dirty(req);
1319         next:
1320                 nfs_clear_page_tag_locked(req);
1321         }
1322         nfs_commitdata_release(calldata);
1323 }
1324
1325 static const struct rpc_call_ops nfs_commit_ops = {
1326         .rpc_call_done = nfs_commit_done,
1327         .rpc_release = nfs_commit_release,
1328 };
1329
1330 int nfs_commit_inode(struct inode *inode, int how)
1331 {
1332         LIST_HEAD(head);
1333         int res;
1334
1335         spin_lock(&inode->i_lock);
1336         res = nfs_scan_commit(inode, &head, 0, 0);
1337         spin_unlock(&inode->i_lock);
1338         if (res) {
1339                 int error = nfs_commit_list(inode, &head, how);
1340                 if (error < 0)
1341                         return error;
1342         }
1343         return res;
1344 }
1345 #else
1346 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1347 {
1348         return 0;
1349 }
1350 #endif
1351
1352 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1353 {
1354         struct inode *inode = mapping->host;
1355         pgoff_t idx_start, idx_end;
1356         unsigned int npages = 0;
1357         LIST_HEAD(head);
1358         int nocommit = how & FLUSH_NOCOMMIT;
1359         long pages, ret;
1360
1361         /* FIXME */
1362         if (wbc->range_cyclic)
1363                 idx_start = 0;
1364         else {
1365                 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1366                 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1367                 if (idx_end > idx_start) {
1368                         pgoff_t l_npages = 1 + idx_end - idx_start;
1369                         npages = l_npages;
1370                         if (sizeof(npages) != sizeof(l_npages) &&
1371                                         (pgoff_t)npages != l_npages)
1372                                 npages = 0;
1373                 }
1374         }
1375         how &= ~FLUSH_NOCOMMIT;
1376         spin_lock(&inode->i_lock);
1377         do {
1378                 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1379                 if (ret != 0)
1380                         continue;
1381                 if (nocommit)
1382                         break;
1383                 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1384                 if (pages == 0)
1385                         break;
1386                 if (how & FLUSH_INVALIDATE) {
1387                         spin_unlock(&inode->i_lock);
1388                         nfs_cancel_commit_list(&head);
1389                         ret = pages;
1390                         spin_lock(&inode->i_lock);
1391                         continue;
1392                 }
1393                 pages += nfs_scan_commit(inode, &head, 0, 0);
1394                 spin_unlock(&inode->i_lock);
1395                 ret = nfs_commit_list(inode, &head, how);
1396                 spin_lock(&inode->i_lock);
1397
1398         } while (ret >= 0);
1399         spin_unlock(&inode->i_lock);
1400         return ret;
1401 }
1402
1403 static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how)
1404 {
1405         int ret;
1406
1407         ret = nfs_writepages(mapping, wbc);
1408         if (ret < 0)
1409                 goto out;
1410         ret = nfs_sync_mapping_wait(mapping, wbc, how);
1411         if (ret < 0)
1412                 goto out;
1413         return 0;
1414 out:
1415         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1416         return ret;
1417 }
1418
1419 /* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */
1420 static int nfs_write_mapping(struct address_space *mapping, int how)
1421 {
1422         struct writeback_control wbc = {
1423                 .bdi = mapping->backing_dev_info,
1424                 .sync_mode = WB_SYNC_NONE,
1425                 .nr_to_write = LONG_MAX,
1426                 .for_writepages = 1,
1427                 .range_cyclic = 1,
1428         };
1429         int ret;
1430
1431         ret = __nfs_write_mapping(mapping, &wbc, how);
1432         if (ret < 0)
1433                 return ret;
1434         wbc.sync_mode = WB_SYNC_ALL;
1435         return __nfs_write_mapping(mapping, &wbc, how);
1436 }
1437
1438 /*
1439  * flush the inode to disk.
1440  */
1441 int nfs_wb_all(struct inode *inode)
1442 {
1443         return nfs_write_mapping(inode->i_mapping, 0);
1444 }
1445
1446 int nfs_wb_nocommit(struct inode *inode)
1447 {
1448         return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
1449 }
1450
1451 int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1452 {
1453         struct nfs_page *req;
1454         loff_t range_start = page_offset(page);
1455         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1456         struct writeback_control wbc = {
1457                 .bdi = page->mapping->backing_dev_info,
1458                 .sync_mode = WB_SYNC_ALL,
1459                 .nr_to_write = LONG_MAX,
1460                 .range_start = range_start,
1461                 .range_end = range_end,
1462         };
1463         int ret = 0;
1464
1465         BUG_ON(!PageLocked(page));
1466         for (;;) {
1467                 req = nfs_page_find_request(page);
1468                 if (req == NULL)
1469                         goto out;
1470                 if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1471                         nfs_release_request(req);
1472                         break;
1473                 }
1474                 if (nfs_lock_request_dontget(req)) {
1475                         nfs_inode_remove_request(req);
1476                         /*
1477                          * In case nfs_inode_remove_request has marked the
1478                          * page as being dirty
1479                          */
1480                         cancel_dirty_page(page, PAGE_CACHE_SIZE);
1481                         nfs_unlock_request(req);
1482                         break;
1483                 }
1484                 ret = nfs_wait_on_request(req);
1485                 if (ret < 0)
1486                         goto out;
1487         }
1488         if (!PagePrivate(page))
1489                 return 0;
1490         ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
1491 out:
1492         return ret;
1493 }
1494
1495 static int nfs_wb_page_priority(struct inode *inode, struct page *page,
1496                                 int how)
1497 {
1498         loff_t range_start = page_offset(page);
1499         loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1500         struct writeback_control wbc = {
1501                 .bdi = page->mapping->backing_dev_info,
1502                 .sync_mode = WB_SYNC_ALL,
1503                 .nr_to_write = LONG_MAX,
1504                 .range_start = range_start,
1505                 .range_end = range_end,
1506         };
1507         int ret;
1508
1509         do {
1510                 if (clear_page_dirty_for_io(page)) {
1511                         ret = nfs_writepage_locked(page, &wbc);
1512                         if (ret < 0)
1513                                 goto out_error;
1514                 } else if (!PagePrivate(page))
1515                         break;
1516                 ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1517                 if (ret < 0)
1518                         goto out_error;
1519         } while (PagePrivate(page));
1520         return 0;
1521 out_error:
1522         __mark_inode_dirty(inode, I_DIRTY_PAGES);
1523         return ret;
1524 }
1525
1526 /*
1527  * Write back all requests on one page - we do this before reading it.
1528  */
1529 int nfs_wb_page(struct inode *inode, struct page* page)
1530 {
1531         return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1532 }
1533
1534 int __init nfs_init_writepagecache(void)
1535 {
1536         nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1537                                              sizeof(struct nfs_write_data),
1538                                              0, SLAB_HWCACHE_ALIGN,
1539                                              NULL);
1540         if (nfs_wdata_cachep == NULL)
1541                 return -ENOMEM;
1542
1543         nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1544                                                      nfs_wdata_cachep);
1545         if (nfs_wdata_mempool == NULL)
1546                 return -ENOMEM;
1547
1548         nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1549                                                       nfs_wdata_cachep);
1550         if (nfs_commit_mempool == NULL)
1551                 return -ENOMEM;
1552
1553         /*
1554          * NFS congestion size, scale with available memory.
1555          *
1556          *  64MB:    8192k
1557          * 128MB:   11585k
1558          * 256MB:   16384k
1559          * 512MB:   23170k
1560          *   1GB:   32768k
1561          *   2GB:   46340k
1562          *   4GB:   65536k
1563          *   8GB:   92681k
1564          *  16GB:  131072k
1565          *
1566          * This allows larger machines to have larger/more transfers.
1567          * Limit the default to 256M
1568          */
1569         nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1570         if (nfs_congestion_kb > 256*1024)
1571                 nfs_congestion_kb = 256*1024;
1572
1573         return 0;
1574 }
1575
1576 void nfs_destroy_writepagecache(void)
1577 {
1578         mempool_destroy(nfs_commit_mempool);
1579         mempool_destroy(nfs_wdata_mempool);
1580         kmem_cache_destroy(nfs_wdata_cachep);
1581 }
1582