GFS2: change gfs2_quota_scan into a shrinker
[linux-2.6.git] / fs / gfs2 / ops_address.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/mpage.h>
18 #include <linux/fs.h>
19 #include <linux/writeback.h>
20 #include <linux/swap.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
23 #include <linux/backing-dev.h>
24
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "bmap.h"
28 #include "glock.h"
29 #include "inode.h"
30 #include "log.h"
31 #include "meta_io.h"
32 #include "ops_address.h"
33 #include "quota.h"
34 #include "trans.h"
35 #include "rgrp.h"
36 #include "super.h"
37 #include "util.h"
38 #include "glops.h"
39
40
41 static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
42                                    unsigned int from, unsigned int to)
43 {
44         struct buffer_head *head = page_buffers(page);
45         unsigned int bsize = head->b_size;
46         struct buffer_head *bh;
47         unsigned int start, end;
48
49         for (bh = head, start = 0; bh != head || !start;
50              bh = bh->b_this_page, start = end) {
51                 end = start + bsize;
52                 if (end <= from || start >= to)
53                         continue;
54                 if (gfs2_is_jdata(ip))
55                         set_buffer_uptodate(bh);
56                 gfs2_trans_add_bh(ip->i_gl, bh, 0);
57         }
58 }
59
60 /**
61  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
62  * @inode: The inode
63  * @lblock: The block number to look up
64  * @bh_result: The buffer head to return the result in
65  * @create: Non-zero if we may add block to the file
66  *
67  * Returns: errno
68  */
69
70 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
71                                   struct buffer_head *bh_result, int create)
72 {
73         int error;
74
75         error = gfs2_block_map(inode, lblock, bh_result, 0);
76         if (error)
77                 return error;
78         if (!buffer_mapped(bh_result))
79                 return -EIO;
80         return 0;
81 }
82
83 static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
84                                  struct buffer_head *bh_result, int create)
85 {
86         return gfs2_block_map(inode, lblock, bh_result, 0);
87 }
88
89 /**
90  * gfs2_writepage_common - Common bits of writepage
91  * @page: The page to be written
92  * @wbc: The writeback control
93  *
94  * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
95  */
96
97 static int gfs2_writepage_common(struct page *page,
98                                  struct writeback_control *wbc)
99 {
100         struct inode *inode = page->mapping->host;
101         struct gfs2_inode *ip = GFS2_I(inode);
102         struct gfs2_sbd *sdp = GFS2_SB(inode);
103         loff_t i_size = i_size_read(inode);
104         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
105         unsigned offset;
106
107         if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
108                 goto out;
109         if (current->journal_info)
110                 goto redirty;
111         /* Is the page fully outside i_size? (truncate in progress) */
112         offset = i_size & (PAGE_CACHE_SIZE-1);
113         if (page->index > end_index || (page->index == end_index && !offset)) {
114                 page->mapping->a_ops->invalidatepage(page, 0);
115                 goto out;
116         }
117         return 1;
118 redirty:
119         redirty_page_for_writepage(wbc, page);
120 out:
121         unlock_page(page);
122         return 0;
123 }
124
125 /**
126  * gfs2_writeback_writepage - Write page for writeback mappings
127  * @page: The page
128  * @wbc: The writeback control
129  *
130  */
131
132 static int gfs2_writeback_writepage(struct page *page,
133                                     struct writeback_control *wbc)
134 {
135         int ret;
136
137         ret = gfs2_writepage_common(page, wbc);
138         if (ret <= 0)
139                 return ret;
140
141         ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
142         if (ret == -EAGAIN)
143                 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
144         return ret;
145 }
146
147 /**
148  * gfs2_ordered_writepage - Write page for ordered data files
149  * @page: The page to write
150  * @wbc: The writeback control
151  *
152  */
153
154 static int gfs2_ordered_writepage(struct page *page,
155                                   struct writeback_control *wbc)
156 {
157         struct inode *inode = page->mapping->host;
158         struct gfs2_inode *ip = GFS2_I(inode);
159         int ret;
160
161         ret = gfs2_writepage_common(page, wbc);
162         if (ret <= 0)
163                 return ret;
164
165         if (!page_has_buffers(page)) {
166                 create_empty_buffers(page, inode->i_sb->s_blocksize,
167                                      (1 << BH_Dirty)|(1 << BH_Uptodate));
168         }
169         gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
170         return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
171 }
172
173 /**
174  * __gfs2_jdata_writepage - The core of jdata writepage
175  * @page: The page to write
176  * @wbc: The writeback control
177  *
178  * This is shared between writepage and writepages and implements the
179  * core of the writepage operation. If a transaction is required then
180  * PageChecked will have been set and the transaction will have
181  * already been started before this is called.
182  */
183
184 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
185 {
186         struct inode *inode = page->mapping->host;
187         struct gfs2_inode *ip = GFS2_I(inode);
188         struct gfs2_sbd *sdp = GFS2_SB(inode);
189
190         if (PageChecked(page)) {
191                 ClearPageChecked(page);
192                 if (!page_has_buffers(page)) {
193                         create_empty_buffers(page, inode->i_sb->s_blocksize,
194                                              (1 << BH_Dirty)|(1 << BH_Uptodate));
195                 }
196                 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
197         }
198         return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
199 }
200
201 /**
202  * gfs2_jdata_writepage - Write complete page
203  * @page: Page to write
204  *
205  * Returns: errno
206  *
207  */
208
209 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
210 {
211         struct inode *inode = page->mapping->host;
212         struct gfs2_sbd *sdp = GFS2_SB(inode);
213         int ret;
214         int done_trans = 0;
215
216         if (PageChecked(page)) {
217                 if (wbc->sync_mode != WB_SYNC_ALL)
218                         goto out_ignore;
219                 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
220                 if (ret)
221                         goto out_ignore;
222                 done_trans = 1;
223         }
224         ret = gfs2_writepage_common(page, wbc);
225         if (ret > 0)
226                 ret = __gfs2_jdata_writepage(page, wbc);
227         if (done_trans)
228                 gfs2_trans_end(sdp);
229         return ret;
230
231 out_ignore:
232         redirty_page_for_writepage(wbc, page);
233         unlock_page(page);
234         return 0;
235 }
236
237 /**
238  * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
239  * @mapping: The mapping to write
240  * @wbc: Write-back control
241  *
242  * For the data=writeback case we can already ignore buffer heads
243  * and write whole extents at once. This is a big reduction in the
244  * number of I/O requests we send and the bmap calls we make in this case.
245  */
246 static int gfs2_writeback_writepages(struct address_space *mapping,
247                                      struct writeback_control *wbc)
248 {
249         return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
250 }
251
252 /**
253  * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
254  * @mapping: The mapping
255  * @wbc: The writeback control
256  * @writepage: The writepage function to call for each page
257  * @pvec: The vector of pages
258  * @nr_pages: The number of pages to write
259  *
260  * Returns: non-zero if loop should terminate, zero otherwise
261  */
262
263 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
264                                     struct writeback_control *wbc,
265                                     struct pagevec *pvec,
266                                     int nr_pages, pgoff_t end)
267 {
268         struct inode *inode = mapping->host;
269         struct gfs2_sbd *sdp = GFS2_SB(inode);
270         loff_t i_size = i_size_read(inode);
271         pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
272         unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
273         unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
274         struct backing_dev_info *bdi = mapping->backing_dev_info;
275         int i;
276         int ret;
277
278         ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
279         if (ret < 0)
280                 return ret;
281
282         for(i = 0; i < nr_pages; i++) {
283                 struct page *page = pvec->pages[i];
284
285                 lock_page(page);
286
287                 if (unlikely(page->mapping != mapping)) {
288                         unlock_page(page);
289                         continue;
290                 }
291
292                 if (!wbc->range_cyclic && page->index > end) {
293                         ret = 1;
294                         unlock_page(page);
295                         continue;
296                 }
297
298                 if (wbc->sync_mode != WB_SYNC_NONE)
299                         wait_on_page_writeback(page);
300
301                 if (PageWriteback(page) ||
302                     !clear_page_dirty_for_io(page)) {
303                         unlock_page(page);
304                         continue;
305                 }
306
307                 /* Is the page fully outside i_size? (truncate in progress) */
308                 if (page->index > end_index || (page->index == end_index && !offset)) {
309                         page->mapping->a_ops->invalidatepage(page, 0);
310                         unlock_page(page);
311                         continue;
312                 }
313
314                 ret = __gfs2_jdata_writepage(page, wbc);
315
316                 if (ret || (--(wbc->nr_to_write) <= 0))
317                         ret = 1;
318                 if (wbc->nonblocking && bdi_write_congested(bdi)) {
319                         wbc->encountered_congestion = 1;
320                         ret = 1;
321                 }
322
323         }
324         gfs2_trans_end(sdp);
325         return ret;
326 }
327
328 /**
329  * gfs2_write_cache_jdata - Like write_cache_pages but different
330  * @mapping: The mapping to write
331  * @wbc: The writeback control
332  * @writepage: The writepage function to call
333  * @data: The data to pass to writepage
334  *
335  * The reason that we use our own function here is that we need to
336  * start transactions before we grab page locks. This allows us
337  * to get the ordering right.
338  */
339
340 static int gfs2_write_cache_jdata(struct address_space *mapping,
341                                   struct writeback_control *wbc)
342 {
343         struct backing_dev_info *bdi = mapping->backing_dev_info;
344         int ret = 0;
345         int done = 0;
346         struct pagevec pvec;
347         int nr_pages;
348         pgoff_t index;
349         pgoff_t end;
350         int scanned = 0;
351         int range_whole = 0;
352
353         if (wbc->nonblocking && bdi_write_congested(bdi)) {
354                 wbc->encountered_congestion = 1;
355                 return 0;
356         }
357
358         pagevec_init(&pvec, 0);
359         if (wbc->range_cyclic) {
360                 index = mapping->writeback_index; /* Start from prev offset */
361                 end = -1;
362         } else {
363                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
364                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
365                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
366                         range_whole = 1;
367                 scanned = 1;
368         }
369
370 retry:
371          while (!done && (index <= end) &&
372                 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
373                                                PAGECACHE_TAG_DIRTY,
374                                                min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
375                 scanned = 1;
376                 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
377                 if (ret)
378                         done = 1;
379                 if (ret > 0)
380                         ret = 0;
381
382                 pagevec_release(&pvec);
383                 cond_resched();
384         }
385
386         if (!scanned && !done) {
387                 /*
388                  * We hit the last page and there is more work to be done: wrap
389                  * back to the start of the file
390                  */
391                 scanned = 1;
392                 index = 0;
393                 goto retry;
394         }
395
396         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
397                 mapping->writeback_index = index;
398         return ret;
399 }
400
401
402 /**
403  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
404  * @mapping: The mapping to write
405  * @wbc: The writeback control
406  * 
407  */
408
409 static int gfs2_jdata_writepages(struct address_space *mapping,
410                                  struct writeback_control *wbc)
411 {
412         struct gfs2_inode *ip = GFS2_I(mapping->host);
413         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
414         int ret;
415
416         ret = gfs2_write_cache_jdata(mapping, wbc);
417         if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
418                 gfs2_log_flush(sdp, ip->i_gl);
419                 ret = gfs2_write_cache_jdata(mapping, wbc);
420         }
421         return ret;
422 }
423
424 /**
425  * stuffed_readpage - Fill in a Linux page with stuffed file data
426  * @ip: the inode
427  * @page: the page
428  *
429  * Returns: errno
430  */
431
432 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
433 {
434         struct buffer_head *dibh;
435         void *kaddr;
436         int error;
437
438         /*
439          * Due to the order of unstuffing files and ->fault(), we can be
440          * asked for a zero page in the case of a stuffed file being extended,
441          * so we need to supply one here. It doesn't happen often.
442          */
443         if (unlikely(page->index)) {
444                 zero_user(page, 0, PAGE_CACHE_SIZE);
445                 SetPageUptodate(page);
446                 return 0;
447         }
448
449         error = gfs2_meta_inode_buffer(ip, &dibh);
450         if (error)
451                 return error;
452
453         kaddr = kmap_atomic(page, KM_USER0);
454         memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
455                ip->i_disksize);
456         memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize);
457         kunmap_atomic(kaddr, KM_USER0);
458         flush_dcache_page(page);
459         brelse(dibh);
460         SetPageUptodate(page);
461
462         return 0;
463 }
464
465
466 /**
467  * __gfs2_readpage - readpage
468  * @file: The file to read a page for
469  * @page: The page to read
470  *
471  * This is the core of gfs2's readpage. Its used by the internal file
472  * reading code as in that case we already hold the glock. Also its
473  * called by gfs2_readpage() once the required lock has been granted.
474  *
475  */
476
477 static int __gfs2_readpage(void *file, struct page *page)
478 {
479         struct gfs2_inode *ip = GFS2_I(page->mapping->host);
480         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
481         int error;
482
483         if (gfs2_is_stuffed(ip)) {
484                 error = stuffed_readpage(ip, page);
485                 unlock_page(page);
486         } else {
487                 error = mpage_readpage(page, gfs2_block_map);
488         }
489
490         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
491                 return -EIO;
492
493         return error;
494 }
495
496 /**
497  * gfs2_readpage - read a page of a file
498  * @file: The file to read
499  * @page: The page of the file
500  *
501  * This deals with the locking required. We have to unlock and
502  * relock the page in order to get the locking in the right
503  * order.
504  */
505
506 static int gfs2_readpage(struct file *file, struct page *page)
507 {
508         struct address_space *mapping = page->mapping;
509         struct gfs2_inode *ip = GFS2_I(mapping->host);
510         struct gfs2_holder gh;
511         int error;
512
513         unlock_page(page);
514         gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
515         error = gfs2_glock_nq(&gh);
516         if (unlikely(error))
517                 goto out;
518         error = AOP_TRUNCATED_PAGE;
519         lock_page(page);
520         if (page->mapping == mapping && !PageUptodate(page))
521                 error = __gfs2_readpage(file, page);
522         else
523                 unlock_page(page);
524         gfs2_glock_dq(&gh);
525 out:
526         gfs2_holder_uninit(&gh);
527         if (error && error != AOP_TRUNCATED_PAGE)
528                 lock_page(page);
529         return error;
530 }
531
532 /**
533  * gfs2_internal_read - read an internal file
534  * @ip: The gfs2 inode
535  * @ra_state: The readahead state (or NULL for no readahead)
536  * @buf: The buffer to fill
537  * @pos: The file position
538  * @size: The amount to read
539  *
540  */
541
542 int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
543                        char *buf, loff_t *pos, unsigned size)
544 {
545         struct address_space *mapping = ip->i_inode.i_mapping;
546         unsigned long index = *pos / PAGE_CACHE_SIZE;
547         unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
548         unsigned copied = 0;
549         unsigned amt;
550         struct page *page;
551         void *p;
552
553         do {
554                 amt = size - copied;
555                 if (offset + size > PAGE_CACHE_SIZE)
556                         amt = PAGE_CACHE_SIZE - offset;
557                 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
558                 if (IS_ERR(page))
559                         return PTR_ERR(page);
560                 p = kmap_atomic(page, KM_USER0);
561                 memcpy(buf + copied, p + offset, amt);
562                 kunmap_atomic(p, KM_USER0);
563                 mark_page_accessed(page);
564                 page_cache_release(page);
565                 copied += amt;
566                 index++;
567                 offset = 0;
568         } while(copied < size);
569         (*pos) += size;
570         return size;
571 }
572
573 /**
574  * gfs2_readpages - Read a bunch of pages at once
575  *
576  * Some notes:
577  * 1. This is only for readahead, so we can simply ignore any things
578  *    which are slightly inconvenient (such as locking conflicts between
579  *    the page lock and the glock) and return having done no I/O. Its
580  *    obviously not something we'd want to do on too regular a basis.
581  *    Any I/O we ignore at this time will be done via readpage later.
582  * 2. We don't handle stuffed files here we let readpage do the honours.
583  * 3. mpage_readpages() does most of the heavy lifting in the common case.
584  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
585  */
586
587 static int gfs2_readpages(struct file *file, struct address_space *mapping,
588                           struct list_head *pages, unsigned nr_pages)
589 {
590         struct inode *inode = mapping->host;
591         struct gfs2_inode *ip = GFS2_I(inode);
592         struct gfs2_sbd *sdp = GFS2_SB(inode);
593         struct gfs2_holder gh;
594         int ret;
595
596         gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
597         ret = gfs2_glock_nq(&gh);
598         if (unlikely(ret))
599                 goto out_uninit;
600         if (!gfs2_is_stuffed(ip))
601                 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
602         gfs2_glock_dq(&gh);
603 out_uninit:
604         gfs2_holder_uninit(&gh);
605         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
606                 ret = -EIO;
607         return ret;
608 }
609
610 /**
611  * gfs2_write_begin - Begin to write to a file
612  * @file: The file to write to
613  * @mapping: The mapping in which to write
614  * @pos: The file offset at which to start writing
615  * @len: Length of the write
616  * @flags: Various flags
617  * @pagep: Pointer to return the page
618  * @fsdata: Pointer to return fs data (unused by GFS2)
619  *
620  * Returns: errno
621  */
622
623 static int gfs2_write_begin(struct file *file, struct address_space *mapping,
624                             loff_t pos, unsigned len, unsigned flags,
625                             struct page **pagep, void **fsdata)
626 {
627         struct gfs2_inode *ip = GFS2_I(mapping->host);
628         struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
629         unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
630         int alloc_required;
631         int error = 0;
632         struct gfs2_alloc *al;
633         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
634         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
635         unsigned to = from + len;
636         struct page *page;
637
638         gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
639         error = gfs2_glock_nq(&ip->i_gh);
640         if (unlikely(error))
641                 goto out_uninit;
642
643         error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
644         if (error)
645                 goto out_unlock;
646
647         if (alloc_required || gfs2_is_jdata(ip))
648                 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
649
650         if (alloc_required) {
651                 al = gfs2_alloc_get(ip);
652                 if (!al) {
653                         error = -ENOMEM;
654                         goto out_unlock;
655                 }
656
657                 error = gfs2_quota_lock_check(ip);
658                 if (error)
659                         goto out_alloc_put;
660
661                 al->al_requested = data_blocks + ind_blocks;
662                 error = gfs2_inplace_reserve(ip);
663                 if (error)
664                         goto out_qunlock;
665         }
666
667         rblocks = RES_DINODE + ind_blocks;
668         if (gfs2_is_jdata(ip))
669                 rblocks += data_blocks ? data_blocks : 1;
670         if (ind_blocks || data_blocks)
671                 rblocks += RES_STATFS + RES_QUOTA;
672
673         error = gfs2_trans_begin(sdp, rblocks,
674                                  PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
675         if (error)
676                 goto out_trans_fail;
677
678         error = -ENOMEM;
679         flags |= AOP_FLAG_NOFS;
680         page = grab_cache_page_write_begin(mapping, index, flags);
681         *pagep = page;
682         if (unlikely(!page))
683                 goto out_endtrans;
684
685         if (gfs2_is_stuffed(ip)) {
686                 error = 0;
687                 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
688                         error = gfs2_unstuff_dinode(ip, page);
689                         if (error == 0)
690                                 goto prepare_write;
691                 } else if (!PageUptodate(page)) {
692                         error = stuffed_readpage(ip, page);
693                 }
694                 goto out;
695         }
696
697 prepare_write:
698         error = block_prepare_write(page, from, to, gfs2_block_map);
699 out:
700         if (error == 0)
701                 return 0;
702
703         page_cache_release(page);
704         if (pos + len > ip->i_inode.i_size)
705                 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
706 out_endtrans:
707         gfs2_trans_end(sdp);
708 out_trans_fail:
709         if (alloc_required) {
710                 gfs2_inplace_release(ip);
711 out_qunlock:
712                 gfs2_quota_unlock(ip);
713 out_alloc_put:
714                 gfs2_alloc_put(ip);
715         }
716 out_unlock:
717         gfs2_glock_dq(&ip->i_gh);
718 out_uninit:
719         gfs2_holder_uninit(&ip->i_gh);
720         return error;
721 }
722
723 /**
724  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
725  * @inode: the rindex inode
726  */
727 static void adjust_fs_space(struct inode *inode)
728 {
729         struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
730         struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
731         struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
732         u64 fs_total, new_free;
733
734         /* Total up the file system space, according to the latest rindex. */
735         fs_total = gfs2_ri_total(sdp);
736
737         spin_lock(&sdp->sd_statfs_spin);
738         if (fs_total > (m_sc->sc_total + l_sc->sc_total))
739                 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
740         else
741                 new_free = 0;
742         spin_unlock(&sdp->sd_statfs_spin);
743         fs_warn(sdp, "File system extended by %llu blocks.\n",
744                 (unsigned long long)new_free);
745         gfs2_statfs_change(sdp, new_free, new_free, 0);
746 }
747
748 /**
749  * gfs2_stuffed_write_end - Write end for stuffed files
750  * @inode: The inode
751  * @dibh: The buffer_head containing the on-disk inode
752  * @pos: The file position
753  * @len: The length of the write
754  * @copied: How much was actually copied by the VFS
755  * @page: The page
756  *
757  * This copies the data from the page into the inode block after
758  * the inode data structure itself.
759  *
760  * Returns: errno
761  */
762 static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
763                                   loff_t pos, unsigned len, unsigned copied,
764                                   struct page *page)
765 {
766         struct gfs2_inode *ip = GFS2_I(inode);
767         struct gfs2_sbd *sdp = GFS2_SB(inode);
768         u64 to = pos + copied;
769         void *kaddr;
770         unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
771         struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
772
773         BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
774         kaddr = kmap_atomic(page, KM_USER0);
775         memcpy(buf + pos, kaddr + pos, copied);
776         memset(kaddr + pos + copied, 0, len - copied);
777         flush_dcache_page(page);
778         kunmap_atomic(kaddr, KM_USER0);
779
780         if (!PageUptodate(page))
781                 SetPageUptodate(page);
782         unlock_page(page);
783         page_cache_release(page);
784
785         if (inode->i_size < to) {
786                 i_size_write(inode, to);
787                 ip->i_disksize = inode->i_size;
788                 di->di_size = cpu_to_be64(inode->i_size);
789                 mark_inode_dirty(inode);
790         }
791
792         if (inode == sdp->sd_rindex)
793                 adjust_fs_space(inode);
794
795         brelse(dibh);
796         gfs2_trans_end(sdp);
797         gfs2_glock_dq(&ip->i_gh);
798         gfs2_holder_uninit(&ip->i_gh);
799         return copied;
800 }
801
802 /**
803  * gfs2_write_end
804  * @file: The file to write to
805  * @mapping: The address space to write to
806  * @pos: The file position
807  * @len: The length of the data
808  * @copied:
809  * @page: The page that has been written
810  * @fsdata: The fsdata (unused in GFS2)
811  *
812  * The main write_end function for GFS2. We have a separate one for
813  * stuffed files as they are slightly different, otherwise we just
814  * put our locking around the VFS provided functions.
815  *
816  * Returns: errno
817  */
818
819 static int gfs2_write_end(struct file *file, struct address_space *mapping,
820                           loff_t pos, unsigned len, unsigned copied,
821                           struct page *page, void *fsdata)
822 {
823         struct inode *inode = page->mapping->host;
824         struct gfs2_inode *ip = GFS2_I(inode);
825         struct gfs2_sbd *sdp = GFS2_SB(inode);
826         struct buffer_head *dibh;
827         struct gfs2_alloc *al = ip->i_alloc;
828         struct gfs2_dinode *di;
829         unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
830         unsigned int to = from + len;
831         int ret;
832
833         BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
834
835         ret = gfs2_meta_inode_buffer(ip, &dibh);
836         if (unlikely(ret)) {
837                 unlock_page(page);
838                 page_cache_release(page);
839                 goto failed;
840         }
841
842         gfs2_trans_add_bh(ip->i_gl, dibh, 1);
843
844         if (gfs2_is_stuffed(ip))
845                 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
846
847         if (!gfs2_is_writeback(ip))
848                 gfs2_page_add_databufs(ip, page, from, to);
849
850         ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
851
852         if (likely(ret >= 0) && (inode->i_size > ip->i_disksize)) {
853                 di = (struct gfs2_dinode *)dibh->b_data;
854                 ip->i_disksize = inode->i_size;
855                 di->di_size = cpu_to_be64(inode->i_size);
856                 mark_inode_dirty(inode);
857         }
858
859         if (inode == sdp->sd_rindex)
860                 adjust_fs_space(inode);
861
862         brelse(dibh);
863         gfs2_trans_end(sdp);
864 failed:
865         if (al) {
866                 gfs2_inplace_release(ip);
867                 gfs2_quota_unlock(ip);
868                 gfs2_alloc_put(ip);
869         }
870         gfs2_glock_dq(&ip->i_gh);
871         gfs2_holder_uninit(&ip->i_gh);
872         return ret;
873 }
874
875 /**
876  * gfs2_set_page_dirty - Page dirtying function
877  * @page: The page to dirty
878  *
879  * Returns: 1 if it dirtyed the page, or 0 otherwise
880  */
881  
882 static int gfs2_set_page_dirty(struct page *page)
883 {
884         SetPageChecked(page);
885         return __set_page_dirty_buffers(page);
886 }
887
888 /**
889  * gfs2_bmap - Block map function
890  * @mapping: Address space info
891  * @lblock: The block to map
892  *
893  * Returns: The disk address for the block or 0 on hole or error
894  */
895
896 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
897 {
898         struct gfs2_inode *ip = GFS2_I(mapping->host);
899         struct gfs2_holder i_gh;
900         sector_t dblock = 0;
901         int error;
902
903         error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
904         if (error)
905                 return 0;
906
907         if (!gfs2_is_stuffed(ip))
908                 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
909
910         gfs2_glock_dq_uninit(&i_gh);
911
912         return dblock;
913 }
914
915 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
916 {
917         struct gfs2_bufdata *bd;
918
919         lock_buffer(bh);
920         gfs2_log_lock(sdp);
921         clear_buffer_dirty(bh);
922         bd = bh->b_private;
923         if (bd) {
924                 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
925                         list_del_init(&bd->bd_le.le_list);
926                 else
927                         gfs2_remove_from_journal(bh, current->journal_info, 0);
928         }
929         bh->b_bdev = NULL;
930         clear_buffer_mapped(bh);
931         clear_buffer_req(bh);
932         clear_buffer_new(bh);
933         gfs2_log_unlock(sdp);
934         unlock_buffer(bh);
935 }
936
937 static void gfs2_invalidatepage(struct page *page, unsigned long offset)
938 {
939         struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
940         struct buffer_head *bh, *head;
941         unsigned long pos = 0;
942
943         BUG_ON(!PageLocked(page));
944         if (offset == 0)
945                 ClearPageChecked(page);
946         if (!page_has_buffers(page))
947                 goto out;
948
949         bh = head = page_buffers(page);
950         do {
951                 if (offset <= pos)
952                         gfs2_discard(sdp, bh);
953                 pos += bh->b_size;
954                 bh = bh->b_this_page;
955         } while (bh != head);
956 out:
957         if (offset == 0)
958                 try_to_release_page(page, 0);
959 }
960
961 /**
962  * gfs2_ok_for_dio - check that dio is valid on this file
963  * @ip: The inode
964  * @rw: READ or WRITE
965  * @offset: The offset at which we are reading or writing
966  *
967  * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
968  *          1 (to accept the i/o request)
969  */
970 static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
971 {
972         /*
973          * Should we return an error here? I can't see that O_DIRECT for
974          * a stuffed file makes any sense. For now we'll silently fall
975          * back to buffered I/O
976          */
977         if (gfs2_is_stuffed(ip))
978                 return 0;
979
980         if (offset >= i_size_read(&ip->i_inode))
981                 return 0;
982         return 1;
983 }
984
985
986
987 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
988                               const struct iovec *iov, loff_t offset,
989                               unsigned long nr_segs)
990 {
991         struct file *file = iocb->ki_filp;
992         struct inode *inode = file->f_mapping->host;
993         struct gfs2_inode *ip = GFS2_I(inode);
994         struct gfs2_holder gh;
995         int rv;
996
997         /*
998          * Deferred lock, even if its a write, since we do no allocation
999          * on this path. All we need change is atime, and this lock mode
1000          * ensures that other nodes have flushed their buffered read caches
1001          * (i.e. their page cache entries for this inode). We do not,
1002          * unfortunately have the option of only flushing a range like
1003          * the VFS does.
1004          */
1005         gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1006         rv = gfs2_glock_nq(&gh);
1007         if (rv)
1008                 return rv;
1009         rv = gfs2_ok_for_dio(ip, rw, offset);
1010         if (rv != 1)
1011                 goto out; /* dio not valid, fall back to buffered i/o */
1012
1013         rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1014                                            iov, offset, nr_segs,
1015                                            gfs2_get_block_direct, NULL);
1016 out:
1017         gfs2_glock_dq_m(1, &gh);
1018         gfs2_holder_uninit(&gh);
1019         return rv;
1020 }
1021
1022 /**
1023  * gfs2_releasepage - free the metadata associated with a page
1024  * @page: the page that's being released
1025  * @gfp_mask: passed from Linux VFS, ignored by us
1026  *
1027  * Call try_to_free_buffers() if the buffers in this page can be
1028  * released.
1029  *
1030  * Returns: 0
1031  */
1032
1033 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1034 {
1035         struct inode *aspace = page->mapping->host;
1036         struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1037         struct buffer_head *bh, *head;
1038         struct gfs2_bufdata *bd;
1039
1040         if (!page_has_buffers(page))
1041                 return 0;
1042
1043         gfs2_log_lock(sdp);
1044         head = bh = page_buffers(page);
1045         do {
1046                 if (atomic_read(&bh->b_count))
1047                         goto cannot_release;
1048                 bd = bh->b_private;
1049                 if (bd && bd->bd_ail)
1050                         goto cannot_release;
1051                 gfs2_assert_warn(sdp, !buffer_pinned(bh));
1052                 gfs2_assert_warn(sdp, !buffer_dirty(bh));
1053                 bh = bh->b_this_page;
1054         } while(bh != head);
1055         gfs2_log_unlock(sdp);
1056
1057         head = bh = page_buffers(page);
1058         do {
1059                 gfs2_log_lock(sdp);
1060                 bd = bh->b_private;
1061                 if (bd) {
1062                         gfs2_assert_warn(sdp, bd->bd_bh == bh);
1063                         gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
1064                         if (!list_empty(&bd->bd_le.le_list)) {
1065                                 if (!buffer_pinned(bh))
1066                                         list_del_init(&bd->bd_le.le_list);
1067                                 else
1068                                         bd = NULL;
1069                         }
1070                         if (bd)
1071                                 bd->bd_bh = NULL;
1072                         bh->b_private = NULL;
1073                 }
1074                 gfs2_log_unlock(sdp);
1075                 if (bd)
1076                         kmem_cache_free(gfs2_bufdata_cachep, bd);
1077
1078                 bh = bh->b_this_page;
1079         } while (bh != head);
1080
1081         return try_to_free_buffers(page);
1082 cannot_release:
1083         gfs2_log_unlock(sdp);
1084         return 0;
1085 }
1086
1087 static const struct address_space_operations gfs2_writeback_aops = {
1088         .writepage = gfs2_writeback_writepage,
1089         .writepages = gfs2_writeback_writepages,
1090         .readpage = gfs2_readpage,
1091         .readpages = gfs2_readpages,
1092         .sync_page = block_sync_page,
1093         .write_begin = gfs2_write_begin,
1094         .write_end = gfs2_write_end,
1095         .bmap = gfs2_bmap,
1096         .invalidatepage = gfs2_invalidatepage,
1097         .releasepage = gfs2_releasepage,
1098         .direct_IO = gfs2_direct_IO,
1099         .migratepage = buffer_migrate_page,
1100 };
1101
1102 static const struct address_space_operations gfs2_ordered_aops = {
1103         .writepage = gfs2_ordered_writepage,
1104         .readpage = gfs2_readpage,
1105         .readpages = gfs2_readpages,
1106         .sync_page = block_sync_page,
1107         .write_begin = gfs2_write_begin,
1108         .write_end = gfs2_write_end,
1109         .set_page_dirty = gfs2_set_page_dirty,
1110         .bmap = gfs2_bmap,
1111         .invalidatepage = gfs2_invalidatepage,
1112         .releasepage = gfs2_releasepage,
1113         .direct_IO = gfs2_direct_IO,
1114         .migratepage = buffer_migrate_page,
1115 };
1116
1117 static const struct address_space_operations gfs2_jdata_aops = {
1118         .writepage = gfs2_jdata_writepage,
1119         .writepages = gfs2_jdata_writepages,
1120         .readpage = gfs2_readpage,
1121         .readpages = gfs2_readpages,
1122         .sync_page = block_sync_page,
1123         .write_begin = gfs2_write_begin,
1124         .write_end = gfs2_write_end,
1125         .set_page_dirty = gfs2_set_page_dirty,
1126         .bmap = gfs2_bmap,
1127         .invalidatepage = gfs2_invalidatepage,
1128         .releasepage = gfs2_releasepage,
1129 };
1130
1131 void gfs2_set_aops(struct inode *inode)
1132 {
1133         struct gfs2_inode *ip = GFS2_I(inode);
1134
1135         if (gfs2_is_writeback(ip))
1136                 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1137         else if (gfs2_is_ordered(ip))
1138                 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1139         else if (gfs2_is_jdata(ip))
1140                 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1141         else
1142                 BUG();
1143 }
1144