8d595ab2aed1a75bb79a3450ff6205d88dd359ba
[linux-2.6.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/module.h>
33 #include <linux/writeback.h>
34 #include <linux/hash.h>
35 #include <linux/suspend.h>
36 #include <linux/buffer_head.h>
37 #include <linux/task_io_accounting_ops.h>
38 #include <linux/bio.h>
39 #include <linux/notifier.h>
40 #include <linux/cpu.h>
41 #include <linux/bitops.h>
42 #include <linux/mpage.h>
43 #include <linux/bit_spinlock.h>
44
45 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
46
47 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
48
49 inline void
50 init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
51 {
52         bh->b_end_io = handler;
53         bh->b_private = private;
54 }
55 EXPORT_SYMBOL(init_buffer);
56
57 static int sync_buffer(void *word)
58 {
59         struct block_device *bd;
60         struct buffer_head *bh
61                 = container_of(word, struct buffer_head, b_state);
62
63         smp_mb();
64         bd = bh->b_bdev;
65         if (bd)
66                 blk_run_address_space(bd->bd_inode->i_mapping);
67         io_schedule();
68         return 0;
69 }
70
71 void __lock_buffer(struct buffer_head *bh)
72 {
73         wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
74                                                         TASK_UNINTERRUPTIBLE);
75 }
76 EXPORT_SYMBOL(__lock_buffer);
77
78 void unlock_buffer(struct buffer_head *bh)
79 {
80         clear_bit_unlock(BH_Lock, &bh->b_state);
81         smp_mb__after_clear_bit();
82         wake_up_bit(&bh->b_state, BH_Lock);
83 }
84 EXPORT_SYMBOL(unlock_buffer);
85
86 /*
87  * Block until a buffer comes unlocked.  This doesn't stop it
88  * from becoming locked again - you have to lock it yourself
89  * if you want to preserve its state.
90  */
91 void __wait_on_buffer(struct buffer_head * bh)
92 {
93         wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
94 }
95 EXPORT_SYMBOL(__wait_on_buffer);
96
97 static void
98 __clear_page_buffers(struct page *page)
99 {
100         ClearPagePrivate(page);
101         set_page_private(page, 0);
102         page_cache_release(page);
103 }
104
105
106 static int quiet_error(struct buffer_head *bh)
107 {
108         if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
109                 return 0;
110         return 1;
111 }
112
113
114 static void buffer_io_error(struct buffer_head *bh)
115 {
116         char b[BDEVNAME_SIZE];
117         printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
118                         bdevname(bh->b_bdev, b),
119                         (unsigned long long)bh->b_blocknr);
120 }
121
122 /*
123  * End-of-IO handler helper function which does not touch the bh after
124  * unlocking it.
125  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
126  * a race there is benign: unlock_buffer() only use the bh's address for
127  * hashing after unlocking the buffer, so it doesn't actually touch the bh
128  * itself.
129  */
130 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
131 {
132         if (uptodate) {
133                 set_buffer_uptodate(bh);
134         } else {
135                 /* This happens, due to failed READA attempts. */
136                 clear_buffer_uptodate(bh);
137         }
138         unlock_buffer(bh);
139 }
140
141 /*
142  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
143  * unlock the buffer. This is what ll_rw_block uses too.
144  */
145 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
146 {
147         __end_buffer_read_notouch(bh, uptodate);
148         put_bh(bh);
149 }
150 EXPORT_SYMBOL(end_buffer_read_sync);
151
152 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
153 {
154         char b[BDEVNAME_SIZE];
155
156         if (uptodate) {
157                 set_buffer_uptodate(bh);
158         } else {
159                 if (!quiet_error(bh)) {
160                         buffer_io_error(bh);
161                         printk(KERN_WARNING "lost page write due to "
162                                         "I/O error on %s\n",
163                                        bdevname(bh->b_bdev, b));
164                 }
165                 set_buffer_write_io_error(bh);
166                 clear_buffer_uptodate(bh);
167         }
168         unlock_buffer(bh);
169         put_bh(bh);
170 }
171 EXPORT_SYMBOL(end_buffer_write_sync);
172
173 /*
174  * Various filesystems appear to want __find_get_block to be non-blocking.
175  * But it's the page lock which protects the buffers.  To get around this,
176  * we get exclusion from try_to_free_buffers with the blockdev mapping's
177  * private_lock.
178  *
179  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
180  * may be quite high.  This code could TryLock the page, and if that
181  * succeeds, there is no need to take private_lock. (But if
182  * private_lock is contended then so is mapping->tree_lock).
183  */
184 static struct buffer_head *
185 __find_get_block_slow(struct block_device *bdev, sector_t block)
186 {
187         struct inode *bd_inode = bdev->bd_inode;
188         struct address_space *bd_mapping = bd_inode->i_mapping;
189         struct buffer_head *ret = NULL;
190         pgoff_t index;
191         struct buffer_head *bh;
192         struct buffer_head *head;
193         struct page *page;
194         int all_mapped = 1;
195
196         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
197         page = find_get_page(bd_mapping, index);
198         if (!page)
199                 goto out;
200
201         spin_lock(&bd_mapping->private_lock);
202         if (!page_has_buffers(page))
203                 goto out_unlock;
204         head = page_buffers(page);
205         bh = head;
206         do {
207                 if (!buffer_mapped(bh))
208                         all_mapped = 0;
209                 else if (bh->b_blocknr == block) {
210                         ret = bh;
211                         get_bh(bh);
212                         goto out_unlock;
213                 }
214                 bh = bh->b_this_page;
215         } while (bh != head);
216
217         /* we might be here because some of the buffers on this page are
218          * not mapped.  This is due to various races between
219          * file io on the block device and getblk.  It gets dealt with
220          * elsewhere, don't buffer_error if we had some unmapped buffers
221          */
222         if (all_mapped) {
223                 printk("__find_get_block_slow() failed. "
224                         "block=%llu, b_blocknr=%llu\n",
225                         (unsigned long long)block,
226                         (unsigned long long)bh->b_blocknr);
227                 printk("b_state=0x%08lx, b_size=%zu\n",
228                         bh->b_state, bh->b_size);
229                 printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
230         }
231 out_unlock:
232         spin_unlock(&bd_mapping->private_lock);
233         page_cache_release(page);
234 out:
235         return ret;
236 }
237
238 /* If invalidate_buffers() will trash dirty buffers, it means some kind
239    of fs corruption is going on. Trashing dirty data always imply losing
240    information that was supposed to be just stored on the physical layer
241    by the user.
242
243    Thus invalidate_buffers in general usage is not allwowed to trash
244    dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
245    be preserved.  These buffers are simply skipped.
246   
247    We also skip buffers which are still in use.  For example this can
248    happen if a userspace program is reading the block device.
249
250    NOTE: In the case where the user removed a removable-media-disk even if
251    there's still dirty data not synced on disk (due a bug in the device driver
252    or due an error of the user), by not destroying the dirty buffers we could
253    generate corruption also on the next media inserted, thus a parameter is
254    necessary to handle this case in the most safe way possible (trying
255    to not corrupt also the new disk inserted with the data belonging to
256    the old now corrupted disk). Also for the ramdisk the natural thing
257    to do in order to release the ramdisk memory is to destroy dirty buffers.
258
259    These are two special cases. Normal usage imply the device driver
260    to issue a sync on the device (without waiting I/O completion) and
261    then an invalidate_buffers call that doesn't trash dirty buffers.
262
263    For handling cache coherency with the blkdev pagecache the 'update' case
264    is been introduced. It is needed to re-read from disk any pinned
265    buffer. NOTE: re-reading from disk is destructive so we can do it only
266    when we assume nobody is changing the buffercache under our I/O and when
267    we think the disk contains more recent information than the buffercache.
268    The update == 1 pass marks the buffers we need to update, the update == 2
269    pass does the actual I/O. */
270 void invalidate_bdev(struct block_device *bdev)
271 {
272         struct address_space *mapping = bdev->bd_inode->i_mapping;
273
274         if (mapping->nrpages == 0)
275                 return;
276
277         invalidate_bh_lrus();
278         lru_add_drain_all();    /* make sure all lru add caches are flushed */
279         invalidate_mapping_pages(mapping, 0, -1);
280 }
281 EXPORT_SYMBOL(invalidate_bdev);
282
283 /*
284  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
285  */
286 static void free_more_memory(void)
287 {
288         struct zone *zone;
289         int nid;
290
291         wakeup_flusher_threads(1024);
292         yield();
293
294         for_each_online_node(nid) {
295                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
296                                                 gfp_zone(GFP_NOFS), NULL,
297                                                 &zone);
298                 if (zone)
299                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
300                                                 GFP_NOFS, NULL);
301         }
302 }
303
304 /*
305  * I/O completion handler for block_read_full_page() - pages
306  * which come unlocked at the end of I/O.
307  */
308 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
309 {
310         unsigned long flags;
311         struct buffer_head *first;
312         struct buffer_head *tmp;
313         struct page *page;
314         int page_uptodate = 1;
315
316         BUG_ON(!buffer_async_read(bh));
317
318         page = bh->b_page;
319         if (uptodate) {
320                 set_buffer_uptodate(bh);
321         } else {
322                 clear_buffer_uptodate(bh);
323                 if (!quiet_error(bh))
324                         buffer_io_error(bh);
325                 SetPageError(page);
326         }
327
328         /*
329          * Be _very_ careful from here on. Bad things can happen if
330          * two buffer heads end IO at almost the same time and both
331          * decide that the page is now completely done.
332          */
333         first = page_buffers(page);
334         local_irq_save(flags);
335         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
336         clear_buffer_async_read(bh);
337         unlock_buffer(bh);
338         tmp = bh;
339         do {
340                 if (!buffer_uptodate(tmp))
341                         page_uptodate = 0;
342                 if (buffer_async_read(tmp)) {
343                         BUG_ON(!buffer_locked(tmp));
344                         goto still_busy;
345                 }
346                 tmp = tmp->b_this_page;
347         } while (tmp != bh);
348         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
349         local_irq_restore(flags);
350
351         /*
352          * If none of the buffers had errors and they are all
353          * uptodate then we can set the page uptodate.
354          */
355         if (page_uptodate && !PageError(page))
356                 SetPageUptodate(page);
357         unlock_page(page);
358         return;
359
360 still_busy:
361         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
362         local_irq_restore(flags);
363         return;
364 }
365
366 /*
367  * Completion handler for block_write_full_page() - pages which are unlocked
368  * during I/O, and which have PageWriteback cleared upon I/O completion.
369  */
370 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
371 {
372         char b[BDEVNAME_SIZE];
373         unsigned long flags;
374         struct buffer_head *first;
375         struct buffer_head *tmp;
376         struct page *page;
377
378         BUG_ON(!buffer_async_write(bh));
379
380         page = bh->b_page;
381         if (uptodate) {
382                 set_buffer_uptodate(bh);
383         } else {
384                 if (!quiet_error(bh)) {
385                         buffer_io_error(bh);
386                         printk(KERN_WARNING "lost page write due to "
387                                         "I/O error on %s\n",
388                                bdevname(bh->b_bdev, b));
389                 }
390                 set_bit(AS_EIO, &page->mapping->flags);
391                 set_buffer_write_io_error(bh);
392                 clear_buffer_uptodate(bh);
393                 SetPageError(page);
394         }
395
396         first = page_buffers(page);
397         local_irq_save(flags);
398         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
399
400         clear_buffer_async_write(bh);
401         unlock_buffer(bh);
402         tmp = bh->b_this_page;
403         while (tmp != bh) {
404                 if (buffer_async_write(tmp)) {
405                         BUG_ON(!buffer_locked(tmp));
406                         goto still_busy;
407                 }
408                 tmp = tmp->b_this_page;
409         }
410         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
411         local_irq_restore(flags);
412         end_page_writeback(page);
413         return;
414
415 still_busy:
416         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
417         local_irq_restore(flags);
418         return;
419 }
420 EXPORT_SYMBOL(end_buffer_async_write);
421
422 /*
423  * If a page's buffers are under async readin (end_buffer_async_read
424  * completion) then there is a possibility that another thread of
425  * control could lock one of the buffers after it has completed
426  * but while some of the other buffers have not completed.  This
427  * locked buffer would confuse end_buffer_async_read() into not unlocking
428  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
429  * that this buffer is not under async I/O.
430  *
431  * The page comes unlocked when it has no locked buffer_async buffers
432  * left.
433  *
434  * PageLocked prevents anyone starting new async I/O reads any of
435  * the buffers.
436  *
437  * PageWriteback is used to prevent simultaneous writeout of the same
438  * page.
439  *
440  * PageLocked prevents anyone from starting writeback of a page which is
441  * under read I/O (PageWriteback is only ever set against a locked page).
442  */
443 static void mark_buffer_async_read(struct buffer_head *bh)
444 {
445         bh->b_end_io = end_buffer_async_read;
446         set_buffer_async_read(bh);
447 }
448
449 static void mark_buffer_async_write_endio(struct buffer_head *bh,
450                                           bh_end_io_t *handler)
451 {
452         bh->b_end_io = handler;
453         set_buffer_async_write(bh);
454 }
455
456 void mark_buffer_async_write(struct buffer_head *bh)
457 {
458         mark_buffer_async_write_endio(bh, end_buffer_async_write);
459 }
460 EXPORT_SYMBOL(mark_buffer_async_write);
461
462
463 /*
464  * fs/buffer.c contains helper functions for buffer-backed address space's
465  * fsync functions.  A common requirement for buffer-based filesystems is
466  * that certain data from the backing blockdev needs to be written out for
467  * a successful fsync().  For example, ext2 indirect blocks need to be
468  * written back and waited upon before fsync() returns.
469  *
470  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
471  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
472  * management of a list of dependent buffers at ->i_mapping->private_list.
473  *
474  * Locking is a little subtle: try_to_free_buffers() will remove buffers
475  * from their controlling inode's queue when they are being freed.  But
476  * try_to_free_buffers() will be operating against the *blockdev* mapping
477  * at the time, not against the S_ISREG file which depends on those buffers.
478  * So the locking for private_list is via the private_lock in the address_space
479  * which backs the buffers.  Which is different from the address_space 
480  * against which the buffers are listed.  So for a particular address_space,
481  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
482  * mapping->private_list will always be protected by the backing blockdev's
483  * ->private_lock.
484  *
485  * Which introduces a requirement: all buffers on an address_space's
486  * ->private_list must be from the same address_space: the blockdev's.
487  *
488  * address_spaces which do not place buffers at ->private_list via these
489  * utility functions are free to use private_lock and private_list for
490  * whatever they want.  The only requirement is that list_empty(private_list)
491  * be true at clear_inode() time.
492  *
493  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
494  * filesystems should do that.  invalidate_inode_buffers() should just go
495  * BUG_ON(!list_empty).
496  *
497  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
498  * take an address_space, not an inode.  And it should be called
499  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
500  * queued up.
501  *
502  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
503  * list if it is already on a list.  Because if the buffer is on a list,
504  * it *must* already be on the right one.  If not, the filesystem is being
505  * silly.  This will save a ton of locking.  But first we have to ensure
506  * that buffers are taken *off* the old inode's list when they are freed
507  * (presumably in truncate).  That requires careful auditing of all
508  * filesystems (do it inside bforget()).  It could also be done by bringing
509  * b_inode back.
510  */
511
512 /*
513  * The buffer's backing address_space's private_lock must be held
514  */
515 static void __remove_assoc_queue(struct buffer_head *bh)
516 {
517         list_del_init(&bh->b_assoc_buffers);
518         WARN_ON(!bh->b_assoc_map);
519         if (buffer_write_io_error(bh))
520                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
521         bh->b_assoc_map = NULL;
522 }
523
524 int inode_has_buffers(struct inode *inode)
525 {
526         return !list_empty(&inode->i_data.private_list);
527 }
528
529 /*
530  * osync is designed to support O_SYNC io.  It waits synchronously for
531  * all already-submitted IO to complete, but does not queue any new
532  * writes to the disk.
533  *
534  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
535  * you dirty the buffers, and then use osync_inode_buffers to wait for
536  * completion.  Any other dirty buffers which are not yet queued for
537  * write will not be flushed to disk by the osync.
538  */
539 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
540 {
541         struct buffer_head *bh;
542         struct list_head *p;
543         int err = 0;
544
545         spin_lock(lock);
546 repeat:
547         list_for_each_prev(p, list) {
548                 bh = BH_ENTRY(p);
549                 if (buffer_locked(bh)) {
550                         get_bh(bh);
551                         spin_unlock(lock);
552                         wait_on_buffer(bh);
553                         if (!buffer_uptodate(bh))
554                                 err = -EIO;
555                         brelse(bh);
556                         spin_lock(lock);
557                         goto repeat;
558                 }
559         }
560         spin_unlock(lock);
561         return err;
562 }
563
564 static void do_thaw_one(struct super_block *sb, void *unused)
565 {
566         char b[BDEVNAME_SIZE];
567         while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
568                 printk(KERN_WARNING "Emergency Thaw on %s\n",
569                        bdevname(sb->s_bdev, b));
570 }
571
572 static void do_thaw_all(struct work_struct *work)
573 {
574         iterate_supers(do_thaw_one, NULL);
575         kfree(work);
576         printk(KERN_WARNING "Emergency Thaw complete\n");
577 }
578
579 /**
580  * emergency_thaw_all -- forcibly thaw every frozen filesystem
581  *
582  * Used for emergency unfreeze of all filesystems via SysRq
583  */
584 void emergency_thaw_all(void)
585 {
586         struct work_struct *work;
587
588         work = kmalloc(sizeof(*work), GFP_ATOMIC);
589         if (work) {
590                 INIT_WORK(work, do_thaw_all);
591                 schedule_work(work);
592         }
593 }
594
595 /**
596  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
597  * @mapping: the mapping which wants those buffers written
598  *
599  * Starts I/O against the buffers at mapping->private_list, and waits upon
600  * that I/O.
601  *
602  * Basically, this is a convenience function for fsync().
603  * @mapping is a file or directory which needs those buffers to be written for
604  * a successful fsync().
605  */
606 int sync_mapping_buffers(struct address_space *mapping)
607 {
608         struct address_space *buffer_mapping = mapping->assoc_mapping;
609
610         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
611                 return 0;
612
613         return fsync_buffers_list(&buffer_mapping->private_lock,
614                                         &mapping->private_list);
615 }
616 EXPORT_SYMBOL(sync_mapping_buffers);
617
618 /*
619  * Called when we've recently written block `bblock', and it is known that
620  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
621  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
622  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
623  */
624 void write_boundary_block(struct block_device *bdev,
625                         sector_t bblock, unsigned blocksize)
626 {
627         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
628         if (bh) {
629                 if (buffer_dirty(bh))
630                         ll_rw_block(WRITE, 1, &bh);
631                 put_bh(bh);
632         }
633 }
634
635 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
636 {
637         struct address_space *mapping = inode->i_mapping;
638         struct address_space *buffer_mapping = bh->b_page->mapping;
639
640         mark_buffer_dirty(bh);
641         if (!mapping->assoc_mapping) {
642                 mapping->assoc_mapping = buffer_mapping;
643         } else {
644                 BUG_ON(mapping->assoc_mapping != buffer_mapping);
645         }
646         if (!bh->b_assoc_map) {
647                 spin_lock(&buffer_mapping->private_lock);
648                 list_move_tail(&bh->b_assoc_buffers,
649                                 &mapping->private_list);
650                 bh->b_assoc_map = mapping;
651                 spin_unlock(&buffer_mapping->private_lock);
652         }
653 }
654 EXPORT_SYMBOL(mark_buffer_dirty_inode);
655
656 /*
657  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
658  * dirty.
659  *
660  * If warn is true, then emit a warning if the page is not uptodate and has
661  * not been truncated.
662  */
663 static void __set_page_dirty(struct page *page,
664                 struct address_space *mapping, int warn)
665 {
666         spin_lock_irq(&mapping->tree_lock);
667         if (page->mapping) {    /* Race with truncate? */
668                 WARN_ON_ONCE(warn && !PageUptodate(page));
669                 account_page_dirtied(page, mapping);
670                 radix_tree_tag_set(&mapping->page_tree,
671                                 page_index(page), PAGECACHE_TAG_DIRTY);
672         }
673         spin_unlock_irq(&mapping->tree_lock);
674         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
675 }
676
677 /*
678  * Add a page to the dirty page list.
679  *
680  * It is a sad fact of life that this function is called from several places
681  * deeply under spinlocking.  It may not sleep.
682  *
683  * If the page has buffers, the uptodate buffers are set dirty, to preserve
684  * dirty-state coherency between the page and the buffers.  It the page does
685  * not have buffers then when they are later attached they will all be set
686  * dirty.
687  *
688  * The buffers are dirtied before the page is dirtied.  There's a small race
689  * window in which a writepage caller may see the page cleanness but not the
690  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
691  * before the buffers, a concurrent writepage caller could clear the page dirty
692  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
693  * page on the dirty page list.
694  *
695  * We use private_lock to lock against try_to_free_buffers while using the
696  * page's buffer list.  Also use this to protect against clean buffers being
697  * added to the page after it was set dirty.
698  *
699  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
700  * address_space though.
701  */
702 int __set_page_dirty_buffers(struct page *page)
703 {
704         int newly_dirty;
705         struct address_space *mapping = page_mapping(page);
706
707         if (unlikely(!mapping))
708                 return !TestSetPageDirty(page);
709
710         spin_lock(&mapping->private_lock);
711         if (page_has_buffers(page)) {
712                 struct buffer_head *head = page_buffers(page);
713                 struct buffer_head *bh = head;
714
715                 do {
716                         set_buffer_dirty(bh);
717                         bh = bh->b_this_page;
718                 } while (bh != head);
719         }
720         newly_dirty = !TestSetPageDirty(page);
721         spin_unlock(&mapping->private_lock);
722
723         if (newly_dirty)
724                 __set_page_dirty(page, mapping, 1);
725         return newly_dirty;
726 }
727 EXPORT_SYMBOL(__set_page_dirty_buffers);
728
729 /*
730  * Write out and wait upon a list of buffers.
731  *
732  * We have conflicting pressures: we want to make sure that all
733  * initially dirty buffers get waited on, but that any subsequently
734  * dirtied buffers don't.  After all, we don't want fsync to last
735  * forever if somebody is actively writing to the file.
736  *
737  * Do this in two main stages: first we copy dirty buffers to a
738  * temporary inode list, queueing the writes as we go.  Then we clean
739  * up, waiting for those writes to complete.
740  * 
741  * During this second stage, any subsequent updates to the file may end
742  * up refiling the buffer on the original inode's dirty list again, so
743  * there is a chance we will end up with a buffer queued for write but
744  * not yet completed on that list.  So, as a final cleanup we go through
745  * the osync code to catch these locked, dirty buffers without requeuing
746  * any newly dirty buffers for write.
747  */
748 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
749 {
750         struct buffer_head *bh;
751         struct list_head tmp;
752         struct address_space *mapping, *prev_mapping = NULL;
753         int err = 0, err2;
754
755         INIT_LIST_HEAD(&tmp);
756
757         spin_lock(lock);
758         while (!list_empty(list)) {
759                 bh = BH_ENTRY(list->next);
760                 mapping = bh->b_assoc_map;
761                 __remove_assoc_queue(bh);
762                 /* Avoid race with mark_buffer_dirty_inode() which does
763                  * a lockless check and we rely on seeing the dirty bit */
764                 smp_mb();
765                 if (buffer_dirty(bh) || buffer_locked(bh)) {
766                         list_add(&bh->b_assoc_buffers, &tmp);
767                         bh->b_assoc_map = mapping;
768                         if (buffer_dirty(bh)) {
769                                 get_bh(bh);
770                                 spin_unlock(lock);
771                                 /*
772                                  * Ensure any pending I/O completes so that
773                                  * write_dirty_buffer() actually writes the
774                                  * current contents - it is a noop if I/O is
775                                  * still in flight on potentially older
776                                  * contents.
777                                  */
778                                 write_dirty_buffer(bh, WRITE_SYNC_PLUG);
779
780                                 /*
781                                  * Kick off IO for the previous mapping. Note
782                                  * that we will not run the very last mapping,
783                                  * wait_on_buffer() will do that for us
784                                  * through sync_buffer().
785                                  */
786                                 if (prev_mapping && prev_mapping != mapping)
787                                         blk_run_address_space(prev_mapping);
788                                 prev_mapping = mapping;
789
790                                 brelse(bh);
791                                 spin_lock(lock);
792                         }
793                 }
794         }
795
796         while (!list_empty(&tmp)) {
797                 bh = BH_ENTRY(tmp.prev);
798                 get_bh(bh);
799                 mapping = bh->b_assoc_map;
800                 __remove_assoc_queue(bh);
801                 /* Avoid race with mark_buffer_dirty_inode() which does
802                  * a lockless check and we rely on seeing the dirty bit */
803                 smp_mb();
804                 if (buffer_dirty(bh)) {
805                         list_add(&bh->b_assoc_buffers,
806                                  &mapping->private_list);
807                         bh->b_assoc_map = mapping;
808                 }
809                 spin_unlock(lock);
810                 wait_on_buffer(bh);
811                 if (!buffer_uptodate(bh))
812                         err = -EIO;
813                 brelse(bh);
814                 spin_lock(lock);
815         }
816         
817         spin_unlock(lock);
818         err2 = osync_buffers_list(lock, list);
819         if (err)
820                 return err;
821         else
822                 return err2;
823 }
824
825 /*
826  * Invalidate any and all dirty buffers on a given inode.  We are
827  * probably unmounting the fs, but that doesn't mean we have already
828  * done a sync().  Just drop the buffers from the inode list.
829  *
830  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
831  * assumes that all the buffers are against the blockdev.  Not true
832  * for reiserfs.
833  */
834 void invalidate_inode_buffers(struct inode *inode)
835 {
836         if (inode_has_buffers(inode)) {
837                 struct address_space *mapping = &inode->i_data;
838                 struct list_head *list = &mapping->private_list;
839                 struct address_space *buffer_mapping = mapping->assoc_mapping;
840
841                 spin_lock(&buffer_mapping->private_lock);
842                 while (!list_empty(list))
843                         __remove_assoc_queue(BH_ENTRY(list->next));
844                 spin_unlock(&buffer_mapping->private_lock);
845         }
846 }
847 EXPORT_SYMBOL(invalidate_inode_buffers);
848
849 /*
850  * Remove any clean buffers from the inode's buffer list.  This is called
851  * when we're trying to free the inode itself.  Those buffers can pin it.
852  *
853  * Returns true if all buffers were removed.
854  */
855 int remove_inode_buffers(struct inode *inode)
856 {
857         int ret = 1;
858
859         if (inode_has_buffers(inode)) {
860                 struct address_space *mapping = &inode->i_data;
861                 struct list_head *list = &mapping->private_list;
862                 struct address_space *buffer_mapping = mapping->assoc_mapping;
863
864                 spin_lock(&buffer_mapping->private_lock);
865                 while (!list_empty(list)) {
866                         struct buffer_head *bh = BH_ENTRY(list->next);
867                         if (buffer_dirty(bh)) {
868                                 ret = 0;
869                                 break;
870                         }
871                         __remove_assoc_queue(bh);
872                 }
873                 spin_unlock(&buffer_mapping->private_lock);
874         }
875         return ret;
876 }
877
878 /*
879  * Create the appropriate buffers when given a page for data area and
880  * the size of each buffer.. Use the bh->b_this_page linked list to
881  * follow the buffers created.  Return NULL if unable to create more
882  * buffers.
883  *
884  * The retry flag is used to differentiate async IO (paging, swapping)
885  * which may not fail from ordinary buffer allocations.
886  */
887 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
888                 int retry)
889 {
890         struct buffer_head *bh, *head;
891         long offset;
892
893 try_again:
894         head = NULL;
895         offset = PAGE_SIZE;
896         while ((offset -= size) >= 0) {
897                 bh = alloc_buffer_head(GFP_NOFS);
898                 if (!bh)
899                         goto no_grow;
900
901                 bh->b_bdev = NULL;
902                 bh->b_this_page = head;
903                 bh->b_blocknr = -1;
904                 head = bh;
905
906                 bh->b_state = 0;
907                 atomic_set(&bh->b_count, 0);
908                 bh->b_size = size;
909
910                 /* Link the buffer to its page */
911                 set_bh_page(bh, page, offset);
912
913                 init_buffer(bh, NULL, NULL);
914         }
915         return head;
916 /*
917  * In case anything failed, we just free everything we got.
918  */
919 no_grow:
920         if (head) {
921                 do {
922                         bh = head;
923                         head = head->b_this_page;
924                         free_buffer_head(bh);
925                 } while (head);
926         }
927
928         /*
929          * Return failure for non-async IO requests.  Async IO requests
930          * are not allowed to fail, so we have to wait until buffer heads
931          * become available.  But we don't want tasks sleeping with 
932          * partially complete buffers, so all were released above.
933          */
934         if (!retry)
935                 return NULL;
936
937         /* We're _really_ low on memory. Now we just
938          * wait for old buffer heads to become free due to
939          * finishing IO.  Since this is an async request and
940          * the reserve list is empty, we're sure there are 
941          * async buffer heads in use.
942          */
943         free_more_memory();
944         goto try_again;
945 }
946 EXPORT_SYMBOL_GPL(alloc_page_buffers);
947
948 static inline void
949 link_dev_buffers(struct page *page, struct buffer_head *head)
950 {
951         struct buffer_head *bh, *tail;
952
953         bh = head;
954         do {
955                 tail = bh;
956                 bh = bh->b_this_page;
957         } while (bh);
958         tail->b_this_page = head;
959         attach_page_buffers(page, head);
960 }
961
962 /*
963  * Initialise the state of a blockdev page's buffers.
964  */ 
965 static void
966 init_page_buffers(struct page *page, struct block_device *bdev,
967                         sector_t block, int size)
968 {
969         struct buffer_head *head = page_buffers(page);
970         struct buffer_head *bh = head;
971         int uptodate = PageUptodate(page);
972
973         do {
974                 if (!buffer_mapped(bh)) {
975                         init_buffer(bh, NULL, NULL);
976                         bh->b_bdev = bdev;
977                         bh->b_blocknr = block;
978                         if (uptodate)
979                                 set_buffer_uptodate(bh);
980                         set_buffer_mapped(bh);
981                 }
982                 block++;
983                 bh = bh->b_this_page;
984         } while (bh != head);
985 }
986
987 /*
988  * Create the page-cache page that contains the requested block.
989  *
990  * This is user purely for blockdev mappings.
991  */
992 static struct page *
993 grow_dev_page(struct block_device *bdev, sector_t block,
994                 pgoff_t index, int size)
995 {
996         struct inode *inode = bdev->bd_inode;
997         struct page *page;
998         struct buffer_head *bh;
999
1000         page = find_or_create_page(inode->i_mapping, index,
1001                 (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
1002         if (!page)
1003                 return NULL;
1004
1005         BUG_ON(!PageLocked(page));
1006
1007         if (page_has_buffers(page)) {
1008                 bh = page_buffers(page);
1009                 if (bh->b_size == size) {
1010                         init_page_buffers(page, bdev, block, size);
1011                         return page;
1012                 }
1013                 if (!try_to_free_buffers(page))
1014                         goto failed;
1015         }
1016
1017         /*
1018          * Allocate some buffers for this page
1019          */
1020         bh = alloc_page_buffers(page, size, 0);
1021         if (!bh)
1022                 goto failed;
1023
1024         /*
1025          * Link the page to the buffers and initialise them.  Take the
1026          * lock to be atomic wrt __find_get_block(), which does not
1027          * run under the page lock.
1028          */
1029         spin_lock(&inode->i_mapping->private_lock);
1030         link_dev_buffers(page, bh);
1031         init_page_buffers(page, bdev, block, size);
1032         spin_unlock(&inode->i_mapping->private_lock);
1033         return page;
1034
1035 failed:
1036         BUG();
1037         unlock_page(page);
1038         page_cache_release(page);
1039         return NULL;
1040 }
1041
1042 /*
1043  * Create buffers for the specified block device block's page.  If
1044  * that page was dirty, the buffers are set dirty also.
1045  */
1046 static int
1047 grow_buffers(struct block_device *bdev, sector_t block, int size)
1048 {
1049         struct page *page;
1050         pgoff_t index;
1051         int sizebits;
1052
1053         sizebits = -1;
1054         do {
1055                 sizebits++;
1056         } while ((size << sizebits) < PAGE_SIZE);
1057
1058         index = block >> sizebits;
1059
1060         /*
1061          * Check for a block which wants to lie outside our maximum possible
1062          * pagecache index.  (this comparison is done using sector_t types).
1063          */
1064         if (unlikely(index != block >> sizebits)) {
1065                 char b[BDEVNAME_SIZE];
1066
1067                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1068                         "device %s\n",
1069                         __func__, (unsigned long long)block,
1070                         bdevname(bdev, b));
1071                 return -EIO;
1072         }
1073         block = index << sizebits;
1074         /* Create a page with the proper size buffers.. */
1075         page = grow_dev_page(bdev, block, index, size);
1076         if (!page)
1077                 return 0;
1078         unlock_page(page);
1079         page_cache_release(page);
1080         return 1;
1081 }
1082
1083 static struct buffer_head *
1084 __getblk_slow(struct block_device *bdev, sector_t block, int size)
1085 {
1086         /* Size must be multiple of hard sectorsize */
1087         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1088                         (size < 512 || size > PAGE_SIZE))) {
1089                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1090                                         size);
1091                 printk(KERN_ERR "logical block size: %d\n",
1092                                         bdev_logical_block_size(bdev));
1093
1094                 dump_stack();
1095                 return NULL;
1096         }
1097
1098         for (;;) {
1099                 struct buffer_head * bh;
1100                 int ret;
1101
1102                 bh = __find_get_block(bdev, block, size);
1103                 if (bh)
1104                         return bh;
1105
1106                 ret = grow_buffers(bdev, block, size);
1107                 if (ret < 0)
1108                         return NULL;
1109                 if (ret == 0)
1110                         free_more_memory();
1111         }
1112 }
1113
1114 /*
1115  * The relationship between dirty buffers and dirty pages:
1116  *
1117  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1118  * the page is tagged dirty in its radix tree.
1119  *
1120  * At all times, the dirtiness of the buffers represents the dirtiness of
1121  * subsections of the page.  If the page has buffers, the page dirty bit is
1122  * merely a hint about the true dirty state.
1123  *
1124  * When a page is set dirty in its entirety, all its buffers are marked dirty
1125  * (if the page has buffers).
1126  *
1127  * When a buffer is marked dirty, its page is dirtied, but the page's other
1128  * buffers are not.
1129  *
1130  * Also.  When blockdev buffers are explicitly read with bread(), they
1131  * individually become uptodate.  But their backing page remains not
1132  * uptodate - even if all of its buffers are uptodate.  A subsequent
1133  * block_read_full_page() against that page will discover all the uptodate
1134  * buffers, will set the page uptodate and will perform no I/O.
1135  */
1136
1137 /**
1138  * mark_buffer_dirty - mark a buffer_head as needing writeout
1139  * @bh: the buffer_head to mark dirty
1140  *
1141  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1142  * backing page dirty, then tag the page as dirty in its address_space's radix
1143  * tree and then attach the address_space's inode to its superblock's dirty
1144  * inode list.
1145  *
1146  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1147  * mapping->tree_lock and the global inode_lock.
1148  */
1149 void mark_buffer_dirty(struct buffer_head *bh)
1150 {
1151         WARN_ON_ONCE(!buffer_uptodate(bh));
1152
1153         /*
1154          * Very *carefully* optimize the it-is-already-dirty case.
1155          *
1156          * Don't let the final "is it dirty" escape to before we
1157          * perhaps modified the buffer.
1158          */
1159         if (buffer_dirty(bh)) {
1160                 smp_mb();
1161                 if (buffer_dirty(bh))
1162                         return;
1163         }
1164
1165         if (!test_set_buffer_dirty(bh)) {
1166                 struct page *page = bh->b_page;
1167                 if (!TestSetPageDirty(page)) {
1168                         struct address_space *mapping = page_mapping(page);
1169                         if (mapping)
1170                                 __set_page_dirty(page, mapping, 0);
1171                 }
1172         }
1173 }
1174 EXPORT_SYMBOL(mark_buffer_dirty);
1175
1176 /*
1177  * Decrement a buffer_head's reference count.  If all buffers against a page
1178  * have zero reference count, are clean and unlocked, and if the page is clean
1179  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1180  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1181  * a page but it ends up not being freed, and buffers may later be reattached).
1182  */
1183 void __brelse(struct buffer_head * buf)
1184 {
1185         if (atomic_read(&buf->b_count)) {
1186                 put_bh(buf);
1187                 return;
1188         }
1189         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1190 }
1191 EXPORT_SYMBOL(__brelse);
1192
1193 /*
1194  * bforget() is like brelse(), except it discards any
1195  * potentially dirty data.
1196  */
1197 void __bforget(struct buffer_head *bh)
1198 {
1199         clear_buffer_dirty(bh);
1200         if (bh->b_assoc_map) {
1201                 struct address_space *buffer_mapping = bh->b_page->mapping;
1202
1203                 spin_lock(&buffer_mapping->private_lock);
1204                 list_del_init(&bh->b_assoc_buffers);
1205                 bh->b_assoc_map = NULL;
1206                 spin_unlock(&buffer_mapping->private_lock);
1207         }
1208         __brelse(bh);
1209 }
1210 EXPORT_SYMBOL(__bforget);
1211
1212 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1213 {
1214         lock_buffer(bh);
1215         if (buffer_uptodate(bh)) {
1216                 unlock_buffer(bh);
1217                 return bh;
1218         } else {
1219                 get_bh(bh);
1220                 bh->b_end_io = end_buffer_read_sync;
1221                 submit_bh(READ, bh);
1222                 wait_on_buffer(bh);
1223                 if (buffer_uptodate(bh))
1224                         return bh;
1225         }
1226         brelse(bh);
1227         return NULL;
1228 }
1229
1230 /*
1231  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1232  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1233  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1234  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1235  * CPU's LRUs at the same time.
1236  *
1237  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1238  * sb_find_get_block().
1239  *
1240  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1241  * a local interrupt disable for that.
1242  */
1243
1244 #define BH_LRU_SIZE     8
1245
1246 struct bh_lru {
1247         struct buffer_head *bhs[BH_LRU_SIZE];
1248 };
1249
1250 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1251
1252 #ifdef CONFIG_SMP
1253 #define bh_lru_lock()   local_irq_disable()
1254 #define bh_lru_unlock() local_irq_enable()
1255 #else
1256 #define bh_lru_lock()   preempt_disable()
1257 #define bh_lru_unlock() preempt_enable()
1258 #endif
1259
1260 static inline void check_irqs_on(void)
1261 {
1262 #ifdef irqs_disabled
1263         BUG_ON(irqs_disabled());
1264 #endif
1265 }
1266
1267 /*
1268  * The LRU management algorithm is dopey-but-simple.  Sorry.
1269  */
1270 static void bh_lru_install(struct buffer_head *bh)
1271 {
1272         struct buffer_head *evictee = NULL;
1273         struct bh_lru *lru;
1274
1275         check_irqs_on();
1276         bh_lru_lock();
1277         lru = &__get_cpu_var(bh_lrus);
1278         if (lru->bhs[0] != bh) {
1279                 struct buffer_head *bhs[BH_LRU_SIZE];
1280                 int in;
1281                 int out = 0;
1282
1283                 get_bh(bh);
1284                 bhs[out++] = bh;
1285                 for (in = 0; in < BH_LRU_SIZE; in++) {
1286                         struct buffer_head *bh2 = lru->bhs[in];
1287
1288                         if (bh2 == bh) {
1289                                 __brelse(bh2);
1290                         } else {
1291                                 if (out >= BH_LRU_SIZE) {
1292                                         BUG_ON(evictee != NULL);
1293                                         evictee = bh2;
1294                                 } else {
1295                                         bhs[out++] = bh2;
1296                                 }
1297                         }
1298                 }
1299                 while (out < BH_LRU_SIZE)
1300                         bhs[out++] = NULL;
1301                 memcpy(lru->bhs, bhs, sizeof(bhs));
1302         }
1303         bh_lru_unlock();
1304
1305         if (evictee)
1306                 __brelse(evictee);
1307 }
1308
1309 /*
1310  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1311  */
1312 static struct buffer_head *
1313 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1314 {
1315         struct buffer_head *ret = NULL;
1316         struct bh_lru *lru;
1317         unsigned int i;
1318
1319         check_irqs_on();
1320         bh_lru_lock();
1321         lru = &__get_cpu_var(bh_lrus);
1322         for (i = 0; i < BH_LRU_SIZE; i++) {
1323                 struct buffer_head *bh = lru->bhs[i];
1324
1325                 if (bh && bh->b_bdev == bdev &&
1326                                 bh->b_blocknr == block && bh->b_size == size) {
1327                         if (i) {
1328                                 while (i) {
1329                                         lru->bhs[i] = lru->bhs[i - 1];
1330                                         i--;
1331                                 }
1332                                 lru->bhs[0] = bh;
1333                         }
1334                         get_bh(bh);
1335                         ret = bh;
1336                         break;
1337                 }
1338         }
1339         bh_lru_unlock();
1340         return ret;
1341 }
1342
1343 /*
1344  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1345  * it in the LRU and mark it as accessed.  If it is not present then return
1346  * NULL
1347  */
1348 struct buffer_head *
1349 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1350 {
1351         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1352
1353         if (bh == NULL) {
1354                 bh = __find_get_block_slow(bdev, block);
1355                 if (bh)
1356                         bh_lru_install(bh);
1357         }
1358         if (bh)
1359                 touch_buffer(bh);
1360         return bh;
1361 }
1362 EXPORT_SYMBOL(__find_get_block);
1363
1364 /*
1365  * __getblk will locate (and, if necessary, create) the buffer_head
1366  * which corresponds to the passed block_device, block and size. The
1367  * returned buffer has its reference count incremented.
1368  *
1369  * __getblk() cannot fail - it just keeps trying.  If you pass it an
1370  * illegal block number, __getblk() will happily return a buffer_head
1371  * which represents the non-existent block.  Very weird.
1372  *
1373  * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
1374  * attempt is failing.  FIXME, perhaps?
1375  */
1376 struct buffer_head *
1377 __getblk(struct block_device *bdev, sector_t block, unsigned size)
1378 {
1379         struct buffer_head *bh = __find_get_block(bdev, block, size);
1380
1381         might_sleep();
1382         if (bh == NULL)
1383                 bh = __getblk_slow(bdev, block, size);
1384         return bh;
1385 }
1386 EXPORT_SYMBOL(__getblk);
1387
1388 /*
1389  * Do async read-ahead on a buffer..
1390  */
1391 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1392 {
1393         struct buffer_head *bh = __getblk(bdev, block, size);
1394         if (likely(bh)) {
1395                 ll_rw_block(READA, 1, &bh);
1396                 brelse(bh);
1397         }
1398 }
1399 EXPORT_SYMBOL(__breadahead);
1400
1401 /**
1402  *  __bread() - reads a specified block and returns the bh
1403  *  @bdev: the block_device to read from
1404  *  @block: number of block
1405  *  @size: size (in bytes) to read
1406  * 
1407  *  Reads a specified block, and returns buffer head that contains it.
1408  *  It returns NULL if the block was unreadable.
1409  */
1410 struct buffer_head *
1411 __bread(struct block_device *bdev, sector_t block, unsigned size)
1412 {
1413         struct buffer_head *bh = __getblk(bdev, block, size);
1414
1415         if (likely(bh) && !buffer_uptodate(bh))
1416                 bh = __bread_slow(bh);
1417         return bh;
1418 }
1419 EXPORT_SYMBOL(__bread);
1420
1421 /*
1422  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1423  * This doesn't race because it runs in each cpu either in irq
1424  * or with preempt disabled.
1425  */
1426 static void invalidate_bh_lru(void *arg)
1427 {
1428         struct bh_lru *b = &get_cpu_var(bh_lrus);
1429         int i;
1430
1431         for (i = 0; i < BH_LRU_SIZE; i++) {
1432                 brelse(b->bhs[i]);
1433                 b->bhs[i] = NULL;
1434         }
1435         put_cpu_var(bh_lrus);
1436 }
1437         
1438 void invalidate_bh_lrus(void)
1439 {
1440         on_each_cpu(invalidate_bh_lru, NULL, 1);
1441 }
1442 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1443
1444 void set_bh_page(struct buffer_head *bh,
1445                 struct page *page, unsigned long offset)
1446 {
1447         bh->b_page = page;
1448         BUG_ON(offset >= PAGE_SIZE);
1449         if (PageHighMem(page))
1450                 /*
1451                  * This catches illegal uses and preserves the offset:
1452                  */
1453                 bh->b_data = (char *)(0 + offset);
1454         else
1455                 bh->b_data = page_address(page) + offset;
1456 }
1457 EXPORT_SYMBOL(set_bh_page);
1458
1459 /*
1460  * Called when truncating a buffer on a page completely.
1461  */
1462 static void discard_buffer(struct buffer_head * bh)
1463 {
1464         lock_buffer(bh);
1465         clear_buffer_dirty(bh);
1466         bh->b_bdev = NULL;
1467         clear_buffer_mapped(bh);
1468         clear_buffer_req(bh);
1469         clear_buffer_new(bh);
1470         clear_buffer_delay(bh);
1471         clear_buffer_unwritten(bh);
1472         unlock_buffer(bh);
1473 }
1474
1475 /**
1476  * block_invalidatepage - invalidate part of all of a buffer-backed page
1477  *
1478  * @page: the page which is affected
1479  * @offset: the index of the truncation point
1480  *
1481  * block_invalidatepage() is called when all or part of the page has become
1482  * invalidatedby a truncate operation.
1483  *
1484  * block_invalidatepage() does not have to release all buffers, but it must
1485  * ensure that no dirty buffer is left outside @offset and that no I/O
1486  * is underway against any of the blocks which are outside the truncation
1487  * point.  Because the caller is about to free (and possibly reuse) those
1488  * blocks on-disk.
1489  */
1490 void block_invalidatepage(struct page *page, unsigned long offset)
1491 {
1492         struct buffer_head *head, *bh, *next;
1493         unsigned int curr_off = 0;
1494
1495         BUG_ON(!PageLocked(page));
1496         if (!page_has_buffers(page))
1497                 goto out;
1498
1499         head = page_buffers(page);
1500         bh = head;
1501         do {
1502                 unsigned int next_off = curr_off + bh->b_size;
1503                 next = bh->b_this_page;
1504
1505                 /*
1506                  * is this block fully invalidated?
1507                  */
1508                 if (offset <= curr_off)
1509                         discard_buffer(bh);
1510                 curr_off = next_off;
1511                 bh = next;
1512         } while (bh != head);
1513
1514         /*
1515          * We release buffers only if the entire page is being invalidated.
1516          * The get_block cached value has been unconditionally invalidated,
1517          * so real IO is not possible anymore.
1518          */
1519         if (offset == 0)
1520                 try_to_release_page(page, 0);
1521 out:
1522         return;
1523 }
1524 EXPORT_SYMBOL(block_invalidatepage);
1525
1526 /*
1527  * We attach and possibly dirty the buffers atomically wrt
1528  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1529  * is already excluded via the page lock.
1530  */
1531 void create_empty_buffers(struct page *page,
1532                         unsigned long blocksize, unsigned long b_state)
1533 {
1534         struct buffer_head *bh, *head, *tail;
1535
1536         head = alloc_page_buffers(page, blocksize, 1);
1537         bh = head;
1538         do {
1539                 bh->b_state |= b_state;
1540                 tail = bh;
1541                 bh = bh->b_this_page;
1542         } while (bh);
1543         tail->b_this_page = head;
1544
1545         spin_lock(&page->mapping->private_lock);
1546         if (PageUptodate(page) || PageDirty(page)) {
1547                 bh = head;
1548                 do {
1549                         if (PageDirty(page))
1550                                 set_buffer_dirty(bh);
1551                         if (PageUptodate(page))
1552                                 set_buffer_uptodate(bh);
1553                         bh = bh->b_this_page;
1554                 } while (bh != head);
1555         }
1556         attach_page_buffers(page, head);
1557         spin_unlock(&page->mapping->private_lock);
1558 }
1559 EXPORT_SYMBOL(create_empty_buffers);
1560
1561 /*
1562  * We are taking a block for data and we don't want any output from any
1563  * buffer-cache aliases starting from return from that function and
1564  * until the moment when something will explicitly mark the buffer
1565  * dirty (hopefully that will not happen until we will free that block ;-)
1566  * We don't even need to mark it not-uptodate - nobody can expect
1567  * anything from a newly allocated buffer anyway. We used to used
1568  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1569  * don't want to mark the alias unmapped, for example - it would confuse
1570  * anyone who might pick it with bread() afterwards...
1571  *
1572  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1573  * be writeout I/O going on against recently-freed buffers.  We don't
1574  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1575  * only if we really need to.  That happens here.
1576  */
1577 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1578 {
1579         struct buffer_head *old_bh;
1580
1581         might_sleep();
1582
1583         old_bh = __find_get_block_slow(bdev, block);
1584         if (old_bh) {
1585                 clear_buffer_dirty(old_bh);
1586                 wait_on_buffer(old_bh);
1587                 clear_buffer_req(old_bh);
1588                 __brelse(old_bh);
1589         }
1590 }
1591 EXPORT_SYMBOL(unmap_underlying_metadata);
1592
1593 /*
1594  * NOTE! All mapped/uptodate combinations are valid:
1595  *
1596  *      Mapped  Uptodate        Meaning
1597  *
1598  *      No      No              "unknown" - must do get_block()
1599  *      No      Yes             "hole" - zero-filled
1600  *      Yes     No              "allocated" - allocated on disk, not read in
1601  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1602  *
1603  * "Dirty" is valid only with the last case (mapped+uptodate).
1604  */
1605
1606 /*
1607  * While block_write_full_page is writing back the dirty buffers under
1608  * the page lock, whoever dirtied the buffers may decide to clean them
1609  * again at any time.  We handle that by only looking at the buffer
1610  * state inside lock_buffer().
1611  *
1612  * If block_write_full_page() is called for regular writeback
1613  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1614  * locked buffer.   This only can happen if someone has written the buffer
1615  * directly, with submit_bh().  At the address_space level PageWriteback
1616  * prevents this contention from occurring.
1617  *
1618  * If block_write_full_page() is called with wbc->sync_mode ==
1619  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
1620  * causes the writes to be flagged as synchronous writes, but the
1621  * block device queue will NOT be unplugged, since usually many pages
1622  * will be pushed to the out before the higher-level caller actually
1623  * waits for the writes to be completed.  The various wait functions,
1624  * such as wait_on_writeback_range() will ultimately call sync_page()
1625  * which will ultimately call blk_run_backing_dev(), which will end up
1626  * unplugging the device queue.
1627  */
1628 static int __block_write_full_page(struct inode *inode, struct page *page,
1629                         get_block_t *get_block, struct writeback_control *wbc,
1630                         bh_end_io_t *handler)
1631 {
1632         int err;
1633         sector_t block;
1634         sector_t last_block;
1635         struct buffer_head *bh, *head;
1636         const unsigned blocksize = 1 << inode->i_blkbits;
1637         int nr_underway = 0;
1638         int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
1639                         WRITE_SYNC_PLUG : WRITE);
1640
1641         BUG_ON(!PageLocked(page));
1642
1643         last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1644
1645         if (!page_has_buffers(page)) {
1646                 create_empty_buffers(page, blocksize,
1647                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1648         }
1649
1650         /*
1651          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1652          * here, and the (potentially unmapped) buffers may become dirty at
1653          * any time.  If a buffer becomes dirty here after we've inspected it
1654          * then we just miss that fact, and the page stays dirty.
1655          *
1656          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1657          * handle that here by just cleaning them.
1658          */
1659
1660         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1661         head = page_buffers(page);
1662         bh = head;
1663
1664         /*
1665          * Get all the dirty buffers mapped to disk addresses and
1666          * handle any aliases from the underlying blockdev's mapping.
1667          */
1668         do {
1669                 if (block > last_block) {
1670                         /*
1671                          * mapped buffers outside i_size will occur, because
1672                          * this page can be outside i_size when there is a
1673                          * truncate in progress.
1674                          */
1675                         /*
1676                          * The buffer was zeroed by block_write_full_page()
1677                          */
1678                         clear_buffer_dirty(bh);
1679                         set_buffer_uptodate(bh);
1680                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1681                            buffer_dirty(bh)) {
1682                         WARN_ON(bh->b_size != blocksize);
1683                         err = get_block(inode, block, bh, 1);
1684                         if (err)
1685                                 goto recover;
1686                         clear_buffer_delay(bh);
1687                         if (buffer_new(bh)) {
1688                                 /* blockdev mappings never come here */
1689                                 clear_buffer_new(bh);
1690                                 unmap_underlying_metadata(bh->b_bdev,
1691                                                         bh->b_blocknr);
1692                         }
1693                 }
1694                 bh = bh->b_this_page;
1695                 block++;
1696         } while (bh != head);
1697
1698         do {
1699                 if (!buffer_mapped(bh))
1700                         continue;
1701                 /*
1702                  * If it's a fully non-blocking write attempt and we cannot
1703                  * lock the buffer then redirty the page.  Note that this can
1704                  * potentially cause a busy-wait loop from writeback threads
1705                  * and kswapd activity, but those code paths have their own
1706                  * higher-level throttling.
1707                  */
1708                 if (wbc->sync_mode != WB_SYNC_NONE) {
1709                         lock_buffer(bh);
1710                 } else if (!trylock_buffer(bh)) {
1711                         redirty_page_for_writepage(wbc, page);
1712                         continue;
1713                 }
1714                 if (test_clear_buffer_dirty(bh)) {
1715                         mark_buffer_async_write_endio(bh, handler);
1716                 } else {
1717                         unlock_buffer(bh);
1718                 }
1719         } while ((bh = bh->b_this_page) != head);
1720
1721         /*
1722          * The page and its buffers are protected by PageWriteback(), so we can
1723          * drop the bh refcounts early.
1724          */
1725         BUG_ON(PageWriteback(page));
1726         set_page_writeback(page);
1727
1728         do {
1729                 struct buffer_head *next = bh->b_this_page;
1730                 if (buffer_async_write(bh)) {
1731                         submit_bh(write_op, bh);
1732                         nr_underway++;
1733                 }
1734                 bh = next;
1735         } while (bh != head);
1736         unlock_page(page);
1737
1738         err = 0;
1739 done:
1740         if (nr_underway == 0) {
1741                 /*
1742                  * The page was marked dirty, but the buffers were
1743                  * clean.  Someone wrote them back by hand with
1744                  * ll_rw_block/submit_bh.  A rare case.
1745                  */
1746                 end_page_writeback(page);
1747
1748                 /*
1749                  * The page and buffer_heads can be released at any time from
1750                  * here on.
1751                  */
1752         }
1753         return err;
1754
1755 recover:
1756         /*
1757          * ENOSPC, or some other error.  We may already have added some
1758          * blocks to the file, so we need to write these out to avoid
1759          * exposing stale data.
1760          * The page is currently locked and not marked for writeback
1761          */
1762         bh = head;
1763         /* Recovery: lock and submit the mapped buffers */
1764         do {
1765                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1766                     !buffer_delay(bh)) {
1767                         lock_buffer(bh);
1768                         mark_buffer_async_write_endio(bh, handler);
1769                 } else {
1770                         /*
1771                          * The buffer may have been set dirty during
1772                          * attachment to a dirty page.
1773                          */
1774                         clear_buffer_dirty(bh);
1775                 }
1776         } while ((bh = bh->b_this_page) != head);
1777         SetPageError(page);
1778         BUG_ON(PageWriteback(page));
1779         mapping_set_error(page->mapping, err);
1780         set_page_writeback(page);
1781         do {
1782                 struct buffer_head *next = bh->b_this_page;
1783                 if (buffer_async_write(bh)) {
1784                         clear_buffer_dirty(bh);
1785                         submit_bh(write_op, bh);
1786                         nr_underway++;
1787                 }
1788                 bh = next;
1789         } while (bh != head);
1790         unlock_page(page);
1791         goto done;
1792 }
1793
1794 /*
1795  * If a page has any new buffers, zero them out here, and mark them uptodate
1796  * and dirty so they'll be written out (in order to prevent uninitialised
1797  * block data from leaking). And clear the new bit.
1798  */
1799 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1800 {
1801         unsigned int block_start, block_end;
1802         struct buffer_head *head, *bh;
1803
1804         BUG_ON(!PageLocked(page));
1805         if (!page_has_buffers(page))
1806                 return;
1807
1808         bh = head = page_buffers(page);
1809         block_start = 0;
1810         do {
1811                 block_end = block_start + bh->b_size;
1812
1813                 if (buffer_new(bh)) {
1814                         if (block_end > from && block_start < to) {
1815                                 if (!PageUptodate(page)) {
1816                                         unsigned start, size;
1817
1818                                         start = max(from, block_start);
1819                                         size = min(to, block_end) - start;
1820
1821                                         zero_user(page, start, size);
1822                                         set_buffer_uptodate(bh);
1823                                 }
1824
1825                                 clear_buffer_new(bh);
1826                                 mark_buffer_dirty(bh);
1827                         }
1828                 }
1829
1830                 block_start = block_end;
1831                 bh = bh->b_this_page;
1832         } while (bh != head);
1833 }
1834 EXPORT_SYMBOL(page_zero_new_buffers);
1835
1836 int block_prepare_write(struct page *page, unsigned from, unsigned to,
1837                 get_block_t *get_block)
1838 {
1839         struct inode *inode = page->mapping->host;
1840         unsigned block_start, block_end;
1841         sector_t block;
1842         int err = 0;
1843         unsigned blocksize, bbits;
1844         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1845
1846         BUG_ON(!PageLocked(page));
1847         BUG_ON(from > PAGE_CACHE_SIZE);
1848         BUG_ON(to > PAGE_CACHE_SIZE);
1849         BUG_ON(from > to);
1850
1851         blocksize = 1 << inode->i_blkbits;
1852         if (!page_has_buffers(page))
1853                 create_empty_buffers(page, blocksize, 0);
1854         head = page_buffers(page);
1855
1856         bbits = inode->i_blkbits;
1857         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1858
1859         for(bh = head, block_start = 0; bh != head || !block_start;
1860             block++, block_start=block_end, bh = bh->b_this_page) {
1861                 block_end = block_start + blocksize;
1862                 if (block_end <= from || block_start >= to) {
1863                         if (PageUptodate(page)) {
1864                                 if (!buffer_uptodate(bh))
1865                                         set_buffer_uptodate(bh);
1866                         }
1867                         continue;
1868                 }
1869                 if (buffer_new(bh))
1870                         clear_buffer_new(bh);
1871                 if (!buffer_mapped(bh)) {
1872                         WARN_ON(bh->b_size != blocksize);
1873                         err = get_block(inode, block, bh, 1);
1874                         if (err)
1875                                 break;
1876                         if (buffer_new(bh)) {
1877                                 unmap_underlying_metadata(bh->b_bdev,
1878                                                         bh->b_blocknr);
1879                                 if (PageUptodate(page)) {
1880                                         clear_buffer_new(bh);
1881                                         set_buffer_uptodate(bh);
1882                                         mark_buffer_dirty(bh);
1883                                         continue;
1884                                 }
1885                                 if (block_end > to || block_start < from)
1886                                         zero_user_segments(page,
1887                                                 to, block_end,
1888                                                 block_start, from);
1889                                 continue;
1890                         }
1891                 }
1892                 if (PageUptodate(page)) {
1893                         if (!buffer_uptodate(bh))
1894                                 set_buffer_uptodate(bh);
1895                         continue; 
1896                 }
1897                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1898                     !buffer_unwritten(bh) &&
1899                      (block_start < from || block_end > to)) {
1900                         ll_rw_block(READ, 1, &bh);
1901                         *wait_bh++=bh;
1902                 }
1903         }
1904         /*
1905          * If we issued read requests - let them complete.
1906          */
1907         while(wait_bh > wait) {
1908                 wait_on_buffer(*--wait_bh);
1909                 if (!buffer_uptodate(*wait_bh))
1910                         err = -EIO;
1911         }
1912         if (unlikely(err)) {
1913                 page_zero_new_buffers(page, from, to);
1914                 ClearPageUptodate(page);
1915         }
1916         return err;
1917 }
1918 EXPORT_SYMBOL(block_prepare_write);
1919
1920 static int __block_commit_write(struct inode *inode, struct page *page,
1921                 unsigned from, unsigned to)
1922 {
1923         unsigned block_start, block_end;
1924         int partial = 0;
1925         unsigned blocksize;
1926         struct buffer_head *bh, *head;
1927
1928         blocksize = 1 << inode->i_blkbits;
1929
1930         for(bh = head = page_buffers(page), block_start = 0;
1931             bh != head || !block_start;
1932             block_start=block_end, bh = bh->b_this_page) {
1933                 block_end = block_start + blocksize;
1934                 if (block_end <= from || block_start >= to) {
1935                         if (!buffer_uptodate(bh))
1936                                 partial = 1;
1937                 } else {
1938                         set_buffer_uptodate(bh);
1939                         mark_buffer_dirty(bh);
1940                 }
1941                 clear_buffer_new(bh);
1942         }
1943
1944         /*
1945          * If this is a partial write which happened to make all buffers
1946          * uptodate then we can optimize away a bogus readpage() for
1947          * the next read(). Here we 'discover' whether the page went
1948          * uptodate as a result of this (potentially partial) write.
1949          */
1950         if (!partial)
1951                 SetPageUptodate(page);
1952         return 0;
1953 }
1954
1955 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1956                 get_block_t *get_block)
1957 {
1958         unsigned start = pos & (PAGE_CACHE_SIZE - 1);
1959
1960         return block_prepare_write(page, start, start + len, get_block);
1961 }
1962 EXPORT_SYMBOL(__block_write_begin);
1963
1964 /*
1965  * block_write_begin takes care of the basic task of block allocation and
1966  * bringing partial write blocks uptodate first.
1967  *
1968  * The filesystem needs to handle block truncation upon failure.
1969  */
1970 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
1971                 unsigned flags, struct page **pagep, get_block_t *get_block)
1972 {
1973         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1974         struct page *page;
1975         int status;
1976
1977         page = grab_cache_page_write_begin(mapping, index, flags);
1978         if (!page)
1979                 return -ENOMEM;
1980
1981         status = __block_write_begin(page, pos, len, get_block);
1982         if (unlikely(status)) {
1983                 unlock_page(page);
1984                 page_cache_release(page);
1985                 page = NULL;
1986         }
1987
1988         *pagep = page;
1989         return status;
1990 }
1991 EXPORT_SYMBOL(block_write_begin);
1992
1993 int block_write_end(struct file *file, struct address_space *mapping,
1994                         loff_t pos, unsigned len, unsigned copied,
1995                         struct page *page, void *fsdata)
1996 {
1997         struct inode *inode = mapping->host;
1998         unsigned start;
1999
2000         start = pos & (PAGE_CACHE_SIZE - 1);
2001
2002         if (unlikely(copied < len)) {
2003                 /*
2004                  * The buffers that were written will now be uptodate, so we
2005                  * don't have to worry about a readpage reading them and
2006                  * overwriting a partial write. However if we have encountered
2007                  * a short write and only partially written into a buffer, it
2008                  * will not be marked uptodate, so a readpage might come in and
2009                  * destroy our partial write.
2010                  *
2011                  * Do the simplest thing, and just treat any short write to a
2012                  * non uptodate page as a zero-length write, and force the
2013                  * caller to redo the whole thing.
2014                  */
2015                 if (!PageUptodate(page))
2016                         copied = 0;
2017
2018                 page_zero_new_buffers(page, start+copied, start+len);
2019         }
2020         flush_dcache_page(page);
2021
2022         /* This could be a short (even 0-length) commit */
2023         __block_commit_write(inode, page, start, start+copied);
2024
2025         return copied;
2026 }
2027 EXPORT_SYMBOL(block_write_end);
2028
2029 int generic_write_end(struct file *file, struct address_space *mapping,
2030                         loff_t pos, unsigned len, unsigned copied,
2031                         struct page *page, void *fsdata)
2032 {
2033         struct inode *inode = mapping->host;
2034         int i_size_changed = 0;
2035
2036         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2037
2038         /*
2039          * No need to use i_size_read() here, the i_size
2040          * cannot change under us because we hold i_mutex.
2041          *
2042          * But it's important to update i_size while still holding page lock:
2043          * page writeout could otherwise come in and zero beyond i_size.
2044          */
2045         if (pos+copied > inode->i_size) {
2046                 i_size_write(inode, pos+copied);
2047                 i_size_changed = 1;
2048         }
2049
2050         unlock_page(page);
2051         page_cache_release(page);
2052
2053         /*
2054          * Don't mark the inode dirty under page lock. First, it unnecessarily
2055          * makes the holding time of page lock longer. Second, it forces lock
2056          * ordering of page lock and transaction start for journaling
2057          * filesystems.
2058          */
2059         if (i_size_changed)
2060                 mark_inode_dirty(inode);
2061
2062         return copied;
2063 }
2064 EXPORT_SYMBOL(generic_write_end);
2065
2066 /*
2067  * block_is_partially_uptodate checks whether buffers within a page are
2068  * uptodate or not.
2069  *
2070  * Returns true if all buffers which correspond to a file portion
2071  * we want to read are uptodate.
2072  */
2073 int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
2074                                         unsigned long from)
2075 {
2076         struct inode *inode = page->mapping->host;
2077         unsigned block_start, block_end, blocksize;
2078         unsigned to;
2079         struct buffer_head *bh, *head;
2080         int ret = 1;
2081
2082         if (!page_has_buffers(page))
2083                 return 0;
2084
2085         blocksize = 1 << inode->i_blkbits;
2086         to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
2087         to = from + to;
2088         if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2089                 return 0;
2090
2091         head = page_buffers(page);
2092         bh = head;
2093         block_start = 0;
2094         do {
2095                 block_end = block_start + blocksize;
2096                 if (block_end > from && block_start < to) {
2097                         if (!buffer_uptodate(bh)) {
2098                                 ret = 0;
2099                                 break;
2100                         }
2101                         if (block_end >= to)
2102                                 break;
2103                 }
2104                 block_start = block_end;
2105                 bh = bh->b_this_page;
2106         } while (bh != head);
2107
2108         return ret;
2109 }
2110 EXPORT_SYMBOL(block_is_partially_uptodate);
2111
2112 /*
2113  * Generic "read page" function for block devices that have the normal
2114  * get_block functionality. This is most of the block device filesystems.
2115  * Reads the page asynchronously --- the unlock_buffer() and
2116  * set/clear_buffer_uptodate() functions propagate buffer state into the
2117  * page struct once IO has completed.
2118  */
2119 int block_read_full_page(struct page *page, get_block_t *get_block)
2120 {
2121         struct inode *inode = page->mapping->host;
2122         sector_t iblock, lblock;
2123         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2124         unsigned int blocksize;
2125         int nr, i;
2126         int fully_mapped = 1;
2127
2128         BUG_ON(!PageLocked(page));
2129         blocksize = 1 << inode->i_blkbits;
2130         if (!page_has_buffers(page))
2131                 create_empty_buffers(page, blocksize, 0);
2132         head = page_buffers(page);
2133
2134         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2135         lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
2136         bh = head;
2137         nr = 0;
2138         i = 0;
2139
2140         do {
2141                 if (buffer_uptodate(bh))
2142                         continue;
2143
2144                 if (!buffer_mapped(bh)) {
2145                         int err = 0;
2146
2147                         fully_mapped = 0;
2148                         if (iblock < lblock) {
2149                                 WARN_ON(bh->b_size != blocksize);
2150                                 err = get_block(inode, iblock, bh, 0);
2151                                 if (err)
2152                                         SetPageError(page);
2153                         }
2154                         if (!buffer_mapped(bh)) {
2155                                 zero_user(page, i * blocksize, blocksize);
2156                                 if (!err)
2157                                         set_buffer_uptodate(bh);
2158                                 continue;
2159                         }
2160                         /*
2161                          * get_block() might have updated the buffer
2162                          * synchronously
2163                          */
2164                         if (buffer_uptodate(bh))
2165                                 continue;
2166                 }
2167                 arr[nr++] = bh;
2168         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2169
2170         if (fully_mapped)
2171                 SetPageMappedToDisk(page);
2172
2173         if (!nr) {
2174                 /*
2175                  * All buffers are uptodate - we can set the page uptodate
2176                  * as well. But not if get_block() returned an error.
2177                  */
2178                 if (!PageError(page))
2179                         SetPageUptodate(page);
2180                 unlock_page(page);
2181                 return 0;
2182         }
2183
2184         /* Stage two: lock the buffers */
2185         for (i = 0; i < nr; i++) {
2186                 bh = arr[i];
2187                 lock_buffer(bh);
2188                 mark_buffer_async_read(bh);
2189         }
2190
2191         /*
2192          * Stage 3: start the IO.  Check for uptodateness
2193          * inside the buffer lock in case another process reading
2194          * the underlying blockdev brought it uptodate (the sct fix).
2195          */
2196         for (i = 0; i < nr; i++) {
2197                 bh = arr[i];
2198                 if (buffer_uptodate(bh))
2199                         end_buffer_async_read(bh, 1);
2200                 else
2201                         submit_bh(READ, bh);
2202         }
2203         return 0;
2204 }
2205 EXPORT_SYMBOL(block_read_full_page);
2206
2207 /* utility function for filesystems that need to do work on expanding
2208  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2209  * deal with the hole.  
2210  */
2211 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2212 {
2213         struct address_space *mapping = inode->i_mapping;
2214         struct page *page;
2215         void *fsdata;
2216         int err;
2217
2218         err = inode_newsize_ok(inode, size);
2219         if (err)
2220                 goto out;
2221
2222         err = pagecache_write_begin(NULL, mapping, size, 0,
2223                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2224                                 &page, &fsdata);
2225         if (err)
2226                 goto out;
2227
2228         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2229         BUG_ON(err > 0);
2230
2231 out:
2232         return err;
2233 }
2234 EXPORT_SYMBOL(generic_cont_expand_simple);
2235
2236 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2237                             loff_t pos, loff_t *bytes)
2238 {
2239         struct inode *inode = mapping->host;
2240         unsigned blocksize = 1 << inode->i_blkbits;
2241         struct page *page;
2242         void *fsdata;
2243         pgoff_t index, curidx;
2244         loff_t curpos;
2245         unsigned zerofrom, offset, len;
2246         int err = 0;
2247
2248         index = pos >> PAGE_CACHE_SHIFT;
2249         offset = pos & ~PAGE_CACHE_MASK;
2250
2251         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2252                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2253                 if (zerofrom & (blocksize-1)) {
2254                         *bytes |= (blocksize-1);
2255                         (*bytes)++;
2256                 }
2257                 len = PAGE_CACHE_SIZE - zerofrom;
2258
2259                 err = pagecache_write_begin(file, mapping, curpos, len,
2260                                                 AOP_FLAG_UNINTERRUPTIBLE,
2261                                                 &page, &fsdata);
2262                 if (err)
2263                         goto out;
2264                 zero_user(page, zerofrom, len);
2265                 err = pagecache_write_end(file, mapping, curpos, len, len,
2266                                                 page, fsdata);
2267                 if (err < 0)
2268                         goto out;
2269                 BUG_ON(err != len);
2270                 err = 0;
2271
2272                 balance_dirty_pages_ratelimited(mapping);
2273         }
2274
2275         /* page covers the boundary, find the boundary offset */
2276         if (index == curidx) {
2277                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2278                 /* if we will expand the thing last block will be filled */
2279                 if (offset <= zerofrom) {
2280                         goto out;
2281                 }
2282                 if (zerofrom & (blocksize-1)) {
2283                         *bytes |= (blocksize-1);
2284                         (*bytes)++;
2285                 }
2286                 len = offset - zerofrom;
2287
2288                 err = pagecache_write_begin(file, mapping, curpos, len,
2289                                                 AOP_FLAG_UNINTERRUPTIBLE,
2290                                                 &page, &fsdata);
2291                 if (err)
2292                         goto out;
2293                 zero_user(page, zerofrom, len);
2294                 err = pagecache_write_end(file, mapping, curpos, len, len,
2295                                                 page, fsdata);
2296                 if (err < 0)
2297                         goto out;
2298                 BUG_ON(err != len);
2299                 err = 0;
2300         }
2301 out:
2302         return err;
2303 }
2304
2305 /*
2306  * For moronic filesystems that do not allow holes in file.
2307  * We may have to extend the file.
2308  */
2309 int cont_write_begin(struct file *file, struct address_space *mapping,
2310                         loff_t pos, unsigned len, unsigned flags,
2311                         struct page **pagep, void **fsdata,
2312                         get_block_t *get_block, loff_t *bytes)
2313 {
2314         struct inode *inode = mapping->host;
2315         unsigned blocksize = 1 << inode->i_blkbits;
2316         unsigned zerofrom;
2317         int err;
2318
2319         err = cont_expand_zero(file, mapping, pos, bytes);
2320         if (err)
2321                 return err;
2322
2323         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2324         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2325                 *bytes |= (blocksize-1);
2326                 (*bytes)++;
2327         }
2328
2329         return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2330 }
2331 EXPORT_SYMBOL(cont_write_begin);
2332
2333 int block_commit_write(struct page *page, unsigned from, unsigned to)
2334 {
2335         struct inode *inode = page->mapping->host;
2336         __block_commit_write(inode,page,from,to);
2337         return 0;
2338 }
2339 EXPORT_SYMBOL(block_commit_write);
2340
2341 /*
2342  * block_page_mkwrite() is not allowed to change the file size as it gets
2343  * called from a page fault handler when a page is first dirtied. Hence we must
2344  * be careful to check for EOF conditions here. We set the page up correctly
2345  * for a written page which means we get ENOSPC checking when writing into
2346  * holes and correct delalloc and unwritten extent mapping on filesystems that
2347  * support these features.
2348  *
2349  * We are not allowed to take the i_mutex here so we have to play games to
2350  * protect against truncate races as the page could now be beyond EOF.  Because
2351  * truncate writes the inode size before removing pages, once we have the
2352  * page lock we can determine safely if the page is beyond EOF. If it is not
2353  * beyond EOF, then the page is guaranteed safe against truncation until we
2354  * unlock the page.
2355  */
2356 int
2357 block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2358                    get_block_t get_block)
2359 {
2360         struct page *page = vmf->page;
2361         struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
2362         unsigned long end;
2363         loff_t size;
2364         int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
2365
2366         lock_page(page);
2367         size = i_size_read(inode);
2368         if ((page->mapping != inode->i_mapping) ||
2369             (page_offset(page) > size)) {
2370                 /* page got truncated out from underneath us */
2371                 unlock_page(page);
2372                 goto out;
2373         }
2374
2375         /* page is wholly or partially inside EOF */
2376         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2377                 end = size & ~PAGE_CACHE_MASK;
2378         else
2379                 end = PAGE_CACHE_SIZE;
2380
2381         ret = block_prepare_write(page, 0, end, get_block);
2382         if (!ret)
2383                 ret = block_commit_write(page, 0, end);
2384
2385         if (unlikely(ret)) {
2386                 unlock_page(page);
2387                 if (ret == -ENOMEM)
2388                         ret = VM_FAULT_OOM;
2389                 else /* -ENOSPC, -EIO, etc */
2390                         ret = VM_FAULT_SIGBUS;
2391         } else
2392                 ret = VM_FAULT_LOCKED;
2393
2394 out:
2395         return ret;
2396 }
2397 EXPORT_SYMBOL(block_page_mkwrite);
2398
2399 /*
2400  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2401  * immediately, while under the page lock.  So it needs a special end_io
2402  * handler which does not touch the bh after unlocking it.
2403  */
2404 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2405 {
2406         __end_buffer_read_notouch(bh, uptodate);
2407 }
2408
2409 /*
2410  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2411  * the page (converting it to circular linked list and taking care of page
2412  * dirty races).
2413  */
2414 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2415 {
2416         struct buffer_head *bh;
2417
2418         BUG_ON(!PageLocked(page));
2419
2420         spin_lock(&page->mapping->private_lock);
2421         bh = head;
2422         do {
2423                 if (PageDirty(page))
2424                         set_buffer_dirty(bh);
2425                 if (!bh->b_this_page)
2426                         bh->b_this_page = head;
2427                 bh = bh->b_this_page;
2428         } while (bh != head);
2429         attach_page_buffers(page, head);
2430         spin_unlock(&page->mapping->private_lock);
2431 }
2432
2433 /*
2434  * On entry, the page is fully not uptodate.
2435  * On exit the page is fully uptodate in the areas outside (from,to)
2436  * The filesystem needs to handle block truncation upon failure.
2437  */
2438 int nobh_write_begin(struct address_space *mapping,
2439                         loff_t pos, unsigned len, unsigned flags,
2440                         struct page **pagep, void **fsdata,
2441                         get_block_t *get_block)
2442 {
2443         struct inode *inode = mapping->host;
2444         const unsigned blkbits = inode->i_blkbits;
2445         const unsigned blocksize = 1 << blkbits;
2446         struct buffer_head *head, *bh;
2447         struct page *page;
2448         pgoff_t index;
2449         unsigned from, to;
2450         unsigned block_in_page;
2451         unsigned block_start, block_end;
2452         sector_t block_in_file;
2453         int nr_reads = 0;
2454         int ret = 0;
2455         int is_mapped_to_disk = 1;
2456
2457         index = pos >> PAGE_CACHE_SHIFT;
2458         from = pos & (PAGE_CACHE_SIZE - 1);
2459         to = from + len;
2460
2461         page = grab_cache_page_write_begin(mapping, index, flags);
2462         if (!page)
2463                 return -ENOMEM;
2464         *pagep = page;
2465         *fsdata = NULL;
2466
2467         if (page_has_buffers(page)) {
2468                 unlock_page(page);
2469                 page_cache_release(page);
2470                 *pagep = NULL;
2471                 return block_write_begin(mapping, pos, len, flags, pagep,
2472                                          get_block);
2473         }
2474
2475         if (PageMappedToDisk(page))
2476                 return 0;
2477
2478         /*
2479          * Allocate buffers so that we can keep track of state, and potentially
2480          * attach them to the page if an error occurs. In the common case of
2481          * no error, they will just be freed again without ever being attached
2482          * to the page (which is all OK, because we're under the page lock).
2483          *
2484          * Be careful: the buffer linked list is a NULL terminated one, rather
2485          * than the circular one we're used to.
2486          */
2487         head = alloc_page_buffers(page, blocksize, 0);
2488         if (!head) {
2489                 ret = -ENOMEM;
2490                 goto out_release;
2491         }
2492
2493         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2494
2495         /*
2496          * We loop across all blocks in the page, whether or not they are
2497          * part of the affected region.  This is so we can discover if the
2498          * page is fully mapped-to-disk.
2499          */
2500         for (block_start = 0, block_in_page = 0, bh = head;
2501                   block_start < PAGE_CACHE_SIZE;
2502                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2503                 int create;
2504
2505                 block_end = block_start + blocksize;
2506                 bh->b_state = 0;
2507                 create = 1;
2508                 if (block_start >= to)
2509                         create = 0;
2510                 ret = get_block(inode, block_in_file + block_in_page,
2511                                         bh, create);
2512                 if (ret)
2513                         goto failed;
2514                 if (!buffer_mapped(bh))
2515                         is_mapped_to_disk = 0;
2516                 if (buffer_new(bh))
2517                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2518                 if (PageUptodate(page)) {
2519                         set_buffer_uptodate(bh);
2520                         continue;
2521                 }
2522                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2523                         zero_user_segments(page, block_start, from,
2524                                                         to, block_end);
2525                         continue;
2526                 }
2527                 if (buffer_uptodate(bh))
2528                         continue;       /* reiserfs does this */
2529                 if (block_start < from || block_end > to) {
2530                         lock_buffer(bh);
2531                         bh->b_end_io = end_buffer_read_nobh;
2532                         submit_bh(READ, bh);
2533                         nr_reads++;
2534                 }
2535         }
2536
2537         if (nr_reads) {
2538                 /*
2539                  * The page is locked, so these buffers are protected from
2540                  * any VM or truncate activity.  Hence we don't need to care
2541                  * for the buffer_head refcounts.
2542                  */
2543                 for (bh = head; bh; bh = bh->b_this_page) {
2544                         wait_on_buffer(bh);
2545                         if (!buffer_uptodate(bh))
2546                                 ret = -EIO;
2547                 }
2548                 if (ret)
2549                         goto failed;
2550         }
2551
2552         if (is_mapped_to_disk)
2553                 SetPageMappedToDisk(page);
2554
2555         *fsdata = head; /* to be released by nobh_write_end */
2556
2557         return 0;
2558
2559 failed:
2560         BUG_ON(!ret);
2561         /*
2562          * Error recovery is a bit difficult. We need to zero out blocks that
2563          * were newly allocated, and dirty them to ensure they get written out.
2564          * Buffers need to be attached to the page at this point, otherwise
2565          * the handling of potential IO errors during writeout would be hard
2566          * (could try doing synchronous writeout, but what if that fails too?)
2567          */
2568         attach_nobh_buffers(page, head);
2569         page_zero_new_buffers(page, from, to);
2570
2571 out_release:
2572         unlock_page(page);
2573         page_cache_release(page);
2574         *pagep = NULL;
2575
2576         return ret;
2577 }
2578 EXPORT_SYMBOL(nobh_write_begin);
2579
2580 int nobh_write_end(struct file *file, struct address_space *mapping,
2581                         loff_t pos, unsigned len, unsigned copied,
2582                         struct page *page, void *fsdata)
2583 {
2584         struct inode *inode = page->mapping->host;
2585         struct buffer_head *head = fsdata;
2586         struct buffer_head *bh;
2587         BUG_ON(fsdata != NULL && page_has_buffers(page));
2588
2589         if (unlikely(copied < len) && head)
2590                 attach_nobh_buffers(page, head);
2591         if (page_has_buffers(page))
2592                 return generic_write_end(file, mapping, pos, len,
2593                                         copied, page, fsdata);
2594
2595         SetPageUptodate(page);
2596         set_page_dirty(page);
2597         if (pos+copied > inode->i_size) {
2598                 i_size_write(inode, pos+copied);
2599                 mark_inode_dirty(inode);
2600         }
2601
2602         unlock_page(page);
2603         page_cache_release(page);
2604
2605         while (head) {
2606                 bh = head;
2607                 head = head->b_this_page;
2608                 free_buffer_head(bh);
2609         }
2610
2611         return copied;
2612 }
2613 EXPORT_SYMBOL(nobh_write_end);
2614
2615 /*
2616  * nobh_writepage() - based on block_full_write_page() except
2617  * that it tries to operate without attaching bufferheads to
2618  * the page.
2619  */
2620 int nobh_writepage(struct page *page, get_block_t *get_block,
2621                         struct writeback_control *wbc)
2622 {
2623         struct inode * const inode = page->mapping->host;
2624         loff_t i_size = i_size_read(inode);
2625         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2626         unsigned offset;
2627         int ret;
2628
2629         /* Is the page fully inside i_size? */
2630         if (page->index < end_index)
2631                 goto out;
2632
2633         /* Is the page fully outside i_size? (truncate in progress) */
2634         offset = i_size & (PAGE_CACHE_SIZE-1);
2635         if (page->index >= end_index+1 || !offset) {
2636                 /*
2637                  * The page may have dirty, unmapped buffers.  For example,
2638                  * they may have been added in ext3_writepage().  Make them
2639                  * freeable here, so the page does not leak.
2640                  */
2641 #if 0
2642                 /* Not really sure about this  - do we need this ? */
2643                 if (page->mapping->a_ops->invalidatepage)
2644                         page->mapping->a_ops->invalidatepage(page, offset);
2645 #endif
2646                 unlock_page(page);
2647                 return 0; /* don't care */
2648         }
2649
2650         /*
2651          * The page straddles i_size.  It must be zeroed out on each and every
2652          * writepage invocation because it may be mmapped.  "A file is mapped
2653          * in multiples of the page size.  For a file that is not a multiple of
2654          * the  page size, the remaining memory is zeroed when mapped, and
2655          * writes to that region are not written out to the file."
2656          */
2657         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2658 out:
2659         ret = mpage_writepage(page, get_block, wbc);
2660         if (ret == -EAGAIN)
2661                 ret = __block_write_full_page(inode, page, get_block, wbc,
2662                                               end_buffer_async_write);
2663         return ret;
2664 }
2665 EXPORT_SYMBOL(nobh_writepage);
2666
2667 int nobh_truncate_page(struct address_space *mapping,
2668                         loff_t from, get_block_t *get_block)
2669 {
2670         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2671         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2672         unsigned blocksize;
2673         sector_t iblock;
2674         unsigned length, pos;
2675         struct inode *inode = mapping->host;
2676         struct page *page;
2677         struct buffer_head map_bh;
2678         int err;
2679
2680         blocksize = 1 << inode->i_blkbits;
2681         length = offset & (blocksize - 1);
2682
2683         /* Block boundary? Nothing to do */
2684         if (!length)
2685                 return 0;
2686
2687         length = blocksize - length;
2688         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2689
2690         page = grab_cache_page(mapping, index);
2691         err = -ENOMEM;
2692         if (!page)
2693                 goto out;
2694
2695         if (page_has_buffers(page)) {
2696 has_buffers:
2697                 unlock_page(page);
2698                 page_cache_release(page);
2699                 return block_truncate_page(mapping, from, get_block);
2700         }
2701
2702         /* Find the buffer that contains "offset" */
2703         pos = blocksize;
2704         while (offset >= pos) {
2705                 iblock++;
2706                 pos += blocksize;
2707         }
2708
2709         map_bh.b_size = blocksize;
2710         map_bh.b_state = 0;
2711         err = get_block(inode, iblock, &map_bh, 0);
2712         if (err)
2713                 goto unlock;
2714         /* unmapped? It's a hole - nothing to do */
2715         if (!buffer_mapped(&map_bh))
2716                 goto unlock;
2717
2718         /* Ok, it's mapped. Make sure it's up-to-date */
2719         if (!PageUptodate(page)) {
2720                 err = mapping->a_ops->readpage(NULL, page);
2721                 if (err) {
2722                         page_cache_release(page);
2723                         goto out;
2724                 }
2725                 lock_page(page);
2726                 if (!PageUptodate(page)) {
2727                         err = -EIO;
2728                         goto unlock;
2729                 }
2730                 if (page_has_buffers(page))
2731                         goto has_buffers;
2732         }
2733         zero_user(page, offset, length);
2734         set_page_dirty(page);
2735         err = 0;
2736
2737 unlock:
2738         unlock_page(page);
2739         page_cache_release(page);
2740 out:
2741         return err;
2742 }
2743 EXPORT_SYMBOL(nobh_truncate_page);
2744
2745 int block_truncate_page(struct address_space *mapping,
2746                         loff_t from, get_block_t *get_block)
2747 {
2748         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2749         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2750         unsigned blocksize;
2751         sector_t iblock;
2752         unsigned length, pos;
2753         struct inode *inode = mapping->host;
2754         struct page *page;
2755         struct buffer_head *bh;
2756         int err;
2757
2758         blocksize = 1 << inode->i_blkbits;
2759         length = offset & (blocksize - 1);
2760
2761         /* Block boundary? Nothing to do */
2762         if (!length)
2763                 return 0;
2764
2765         length = blocksize - length;
2766         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2767         
2768         page = grab_cache_page(mapping, index);
2769         err = -ENOMEM;
2770         if (!page)
2771                 goto out;
2772
2773         if (!page_has_buffers(page))
2774                 create_empty_buffers(page, blocksize, 0);
2775
2776         /* Find the buffer that contains "offset" */
2777         bh = page_buffers(page);
2778         pos = blocksize;
2779         while (offset >= pos) {
2780                 bh = bh->b_this_page;
2781                 iblock++;
2782                 pos += blocksize;
2783         }
2784
2785         err = 0;
2786         if (!buffer_mapped(bh)) {
2787                 WARN_ON(bh->b_size != blocksize);
2788                 err = get_block(inode, iblock, bh, 0);
2789                 if (err)
2790                         goto unlock;
2791                 /* unmapped? It's a hole - nothing to do */
2792                 if (!buffer_mapped(bh))
2793                         goto unlock;
2794         }
2795
2796         /* Ok, it's mapped. Make sure it's up-to-date */
2797         if (PageUptodate(page))
2798                 set_buffer_uptodate(bh);
2799
2800         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2801                 err = -EIO;
2802                 ll_rw_block(READ, 1, &bh);
2803                 wait_on_buffer(bh);
2804                 /* Uhhuh. Read error. Complain and punt. */
2805                 if (!buffer_uptodate(bh))
2806                         goto unlock;
2807         }
2808
2809         zero_user(page, offset, length);
2810         mark_buffer_dirty(bh);
2811         err = 0;
2812
2813 unlock:
2814         unlock_page(page);
2815         page_cache_release(page);
2816 out:
2817         return err;
2818 }
2819 EXPORT_SYMBOL(block_truncate_page);
2820
2821 /*
2822  * The generic ->writepage function for buffer-backed address_spaces
2823  * this form passes in the end_io handler used to finish the IO.
2824  */
2825 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2826                         struct writeback_control *wbc, bh_end_io_t *handler)
2827 {
2828         struct inode * const inode = page->mapping->host;
2829         loff_t i_size = i_size_read(inode);
2830         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2831         unsigned offset;
2832
2833         /* Is the page fully inside i_size? */
2834         if (page->index < end_index)
2835                 return __block_write_full_page(inode, page, get_block, wbc,
2836                                                handler);
2837
2838         /* Is the page fully outside i_size? (truncate in progress) */
2839         offset = i_size & (PAGE_CACHE_SIZE-1);
2840         if (page->index >= end_index+1 || !offset) {
2841                 /*
2842                  * The page may have dirty, unmapped buffers.  For example,
2843                  * they may have been added in ext3_writepage().  Make them
2844                  * freeable here, so the page does not leak.
2845                  */
2846                 do_invalidatepage(page, 0);
2847                 unlock_page(page);
2848                 return 0; /* don't care */
2849         }
2850
2851         /*
2852          * The page straddles i_size.  It must be zeroed out on each and every
2853          * writepage invocation because it may be mmapped.  "A file is mapped
2854          * in multiples of the page size.  For a file that is not a multiple of
2855          * the  page size, the remaining memory is zeroed when mapped, and
2856          * writes to that region are not written out to the file."
2857          */
2858         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2859         return __block_write_full_page(inode, page, get_block, wbc, handler);
2860 }
2861 EXPORT_SYMBOL(block_write_full_page_endio);
2862
2863 /*
2864  * The generic ->writepage function for buffer-backed address_spaces
2865  */
2866 int block_write_full_page(struct page *page, get_block_t *get_block,
2867                         struct writeback_control *wbc)
2868 {
2869         return block_write_full_page_endio(page, get_block, wbc,
2870                                            end_buffer_async_write);
2871 }
2872 EXPORT_SYMBOL(block_write_full_page);
2873
2874 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2875                             get_block_t *get_block)
2876 {
2877         struct buffer_head tmp;
2878         struct inode *inode = mapping->host;
2879         tmp.b_state = 0;
2880         tmp.b_blocknr = 0;
2881         tmp.b_size = 1 << inode->i_blkbits;
2882         get_block(inode, block, &tmp, 0);
2883         return tmp.b_blocknr;
2884 }
2885 EXPORT_SYMBOL(generic_block_bmap);
2886
2887 static void end_bio_bh_io_sync(struct bio *bio, int err)
2888 {
2889         struct buffer_head *bh = bio->bi_private;
2890
2891         if (err == -EOPNOTSUPP) {
2892                 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
2893         }
2894
2895         if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
2896                 set_bit(BH_Quiet, &bh->b_state);
2897
2898         bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
2899         bio_put(bio);
2900 }
2901
2902 int submit_bh(int rw, struct buffer_head * bh)
2903 {
2904         struct bio *bio;
2905         int ret = 0;
2906
2907         BUG_ON(!buffer_locked(bh));
2908         BUG_ON(!buffer_mapped(bh));
2909         BUG_ON(!bh->b_end_io);
2910         BUG_ON(buffer_delay(bh));
2911         BUG_ON(buffer_unwritten(bh));
2912
2913         /*
2914          * Only clear out a write error when rewriting
2915          */
2916         if (test_set_buffer_req(bh) && (rw & WRITE))
2917                 clear_buffer_write_io_error(bh);
2918
2919         /*
2920          * from here on down, it's all bio -- do the initial mapping,
2921          * submit_bio -> generic_make_request may further map this bio around
2922          */
2923         bio = bio_alloc(GFP_NOIO, 1);
2924
2925         bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2926         bio->bi_bdev = bh->b_bdev;
2927         bio->bi_io_vec[0].bv_page = bh->b_page;
2928         bio->bi_io_vec[0].bv_len = bh->b_size;
2929         bio->bi_io_vec[0].bv_offset = bh_offset(bh);
2930
2931         bio->bi_vcnt = 1;
2932         bio->bi_idx = 0;
2933         bio->bi_size = bh->b_size;
2934
2935         bio->bi_end_io = end_bio_bh_io_sync;
2936         bio->bi_private = bh;
2937
2938         bio_get(bio);
2939         submit_bio(rw, bio);
2940
2941         if (bio_flagged(bio, BIO_EOPNOTSUPP))
2942                 ret = -EOPNOTSUPP;
2943
2944         bio_put(bio);
2945         return ret;
2946 }
2947 EXPORT_SYMBOL(submit_bh);
2948
2949 /**
2950  * ll_rw_block: low-level access to block devices (DEPRECATED)
2951  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
2952  * @nr: number of &struct buffer_heads in the array
2953  * @bhs: array of pointers to &struct buffer_head
2954  *
2955  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
2956  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
2957  * %READA option is described in the documentation for generic_make_request()
2958  * which ll_rw_block() calls.
2959  *
2960  * This function drops any buffer that it cannot get a lock on (with the
2961  * BH_Lock state bit), any buffer that appears to be clean when doing a write
2962  * request, and any buffer that appears to be up-to-date when doing read
2963  * request.  Further it marks as clean buffers that are processed for
2964  * writing (the buffer cache won't assume that they are actually clean
2965  * until the buffer gets unlocked).
2966  *
2967  * ll_rw_block sets b_end_io to simple completion handler that marks
2968  * the buffer up-to-date (if approriate), unlocks the buffer and wakes
2969  * any waiters. 
2970  *
2971  * All of the buffers must be for the same device, and must also be a
2972  * multiple of the current approved size for the device.
2973  */
2974 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
2975 {
2976         int i;
2977
2978         for (i = 0; i < nr; i++) {
2979                 struct buffer_head *bh = bhs[i];
2980
2981                 if (!trylock_buffer(bh))
2982                         continue;
2983                 if (rw == WRITE) {
2984                         if (test_clear_buffer_dirty(bh)) {
2985                                 bh->b_end_io = end_buffer_write_sync;
2986                                 get_bh(bh);
2987                                 submit_bh(WRITE, bh);
2988                                 continue;
2989                         }
2990                 } else {
2991                         if (!buffer_uptodate(bh)) {
2992                                 bh->b_end_io = end_buffer_read_sync;
2993                                 get_bh(bh);
2994                                 submit_bh(rw, bh);
2995                                 continue;
2996                         }
2997                 }
2998                 unlock_buffer(bh);
2999         }
3000 }
3001 EXPORT_SYMBOL(ll_rw_block);
3002
3003 void write_dirty_buffer(struct buffer_head *bh, int rw)
3004 {
3005         lock_buffer(bh);
3006         if (!test_clear_buffer_dirty(bh)) {
3007                 unlock_buffer(bh);
3008                 return;
3009         }
3010         bh->b_end_io = end_buffer_write_sync;
3011         get_bh(bh);
3012         submit_bh(rw, bh);
3013 }
3014 EXPORT_SYMBOL(write_dirty_buffer);
3015
3016 /*
3017  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3018  * and then start new I/O and then wait upon it.  The caller must have a ref on
3019  * the buffer_head.
3020  */
3021 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3022 {
3023         int ret = 0;
3024
3025         WARN_ON(atomic_read(&bh->b_count) < 1);
3026         lock_buffer(bh);
3027         if (test_clear_buffer_dirty(bh)) {
3028                 get_bh(bh);
3029                 bh->b_end_io = end_buffer_write_sync;
3030                 ret = submit_bh(rw, bh);
3031                 wait_on_buffer(bh);
3032                 if (!ret && !buffer_uptodate(bh))
3033                         ret = -EIO;
3034         } else {
3035                 unlock_buffer(bh);
3036         }
3037         return ret;
3038 }
3039 EXPORT_SYMBOL(__sync_dirty_buffer);
3040
3041 int sync_dirty_buffer(struct buffer_head *bh)
3042 {
3043         return __sync_dirty_buffer(bh, WRITE_SYNC);
3044 }
3045 EXPORT_SYMBOL(sync_dirty_buffer);
3046
3047 /*
3048  * try_to_free_buffers() checks if all the buffers on this particular page
3049  * are unused, and releases them if so.
3050  *
3051  * Exclusion against try_to_free_buffers may be obtained by either
3052  * locking the page or by holding its mapping's private_lock.
3053  *
3054  * If the page is dirty but all the buffers are clean then we need to
3055  * be sure to mark the page clean as well.  This is because the page
3056  * may be against a block device, and a later reattachment of buffers
3057  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3058  * filesystem data on the same device.
3059  *
3060  * The same applies to regular filesystem pages: if all the buffers are
3061  * clean then we set the page clean and proceed.  To do that, we require
3062  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3063  * private_lock.
3064  *
3065  * try_to_free_buffers() is non-blocking.
3066  */
3067 static inline int buffer_busy(struct buffer_head *bh)
3068 {
3069         return atomic_read(&bh->b_count) |
3070                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3071 }
3072
3073 static int
3074 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3075 {
3076         struct buffer_head *head = page_buffers(page);
3077         struct buffer_head *bh;
3078
3079         bh = head;
3080         do {
3081                 if (buffer_write_io_error(bh) && page->mapping)
3082                         set_bit(AS_EIO, &page->mapping->flags);
3083                 if (buffer_busy(bh))
3084                         goto failed;
3085                 bh = bh->b_this_page;
3086         } while (bh != head);
3087
3088         do {
3089                 struct buffer_head *next = bh->b_this_page;
3090
3091                 if (bh->b_assoc_map)
3092                         __remove_assoc_queue(bh);
3093                 bh = next;
3094         } while (bh != head);
3095         *buffers_to_free = head;
3096         __clear_page_buffers(page);
3097         return 1;
3098 failed:
3099         return 0;
3100 }
3101
3102 int try_to_free_buffers(struct page *page)
3103 {
3104         struct address_space * const mapping = page->mapping;
3105         struct buffer_head *buffers_to_free = NULL;
3106         int ret = 0;
3107
3108         BUG_ON(!PageLocked(page));
3109         if (PageWriteback(page))
3110                 return 0;
3111
3112         if (mapping == NULL) {          /* can this still happen? */
3113                 ret = drop_buffers(page, &buffers_to_free);
3114                 goto out;
3115         }
3116
3117         spin_lock(&mapping->private_lock);
3118         ret = drop_buffers(page, &buffers_to_free);
3119
3120         /*
3121          * If the filesystem writes its buffers by hand (eg ext3)
3122          * then we can have clean buffers against a dirty page.  We
3123          * clean the page here; otherwise the VM will never notice
3124          * that the filesystem did any IO at all.
3125          *
3126          * Also, during truncate, discard_buffer will have marked all
3127          * the page's buffers clean.  We discover that here and clean
3128          * the page also.
3129          *
3130          * private_lock must be held over this entire operation in order
3131          * to synchronise against __set_page_dirty_buffers and prevent the
3132          * dirty bit from being lost.
3133          */
3134         if (ret)
3135                 cancel_dirty_page(page, PAGE_CACHE_SIZE);
3136         spin_unlock(&mapping->private_lock);
3137 out:
3138         if (buffers_to_free) {
3139                 struct buffer_head *bh = buffers_to_free;
3140
3141                 do {
3142                         struct buffer_head *next = bh->b_this_page;
3143                         free_buffer_head(bh);
3144                         bh = next;
3145                 } while (bh != buffers_to_free);
3146         }
3147         return ret;
3148 }
3149 EXPORT_SYMBOL(try_to_free_buffers);
3150
3151 void block_sync_page(struct page *page)
3152 {
3153         struct address_space *mapping;
3154
3155         smp_mb();
3156         mapping = page_mapping(page);
3157         if (mapping)
3158                 blk_run_backing_dev(mapping->backing_dev_info, page);
3159 }
3160 EXPORT_SYMBOL(block_sync_page);
3161
3162 /*
3163  * There are no bdflush tunables left.  But distributions are
3164  * still running obsolete flush daemons, so we terminate them here.
3165  *
3166  * Use of bdflush() is deprecated and will be removed in a future kernel.
3167  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3168  */
3169 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3170 {
3171         static int msg_count;
3172
3173         if (!capable(CAP_SYS_ADMIN))
3174                 return -EPERM;
3175
3176         if (msg_count < 5) {
3177                 msg_count++;
3178                 printk(KERN_INFO
3179                         "warning: process `%s' used the obsolete bdflush"
3180                         " system call\n", current->comm);
3181                 printk(KERN_INFO "Fix your initscripts?\n");
3182         }
3183
3184         if (func == 1)
3185                 do_exit(0);
3186         return 0;
3187 }
3188
3189 /*
3190  * Buffer-head allocation
3191  */
3192 static struct kmem_cache *bh_cachep;
3193
3194 /*
3195  * Once the number of bh's in the machine exceeds this level, we start
3196  * stripping them in writeback.
3197  */
3198 static int max_buffer_heads;
3199
3200 int buffer_heads_over_limit;
3201
3202 struct bh_accounting {
3203         int nr;                 /* Number of live bh's */
3204         int ratelimit;          /* Limit cacheline bouncing */
3205 };
3206
3207 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3208
3209 static void recalc_bh_state(void)
3210 {
3211         int i;
3212         int tot = 0;
3213
3214         if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3215                 return;
3216         __get_cpu_var(bh_accounting).ratelimit = 0;
3217         for_each_online_cpu(i)
3218                 tot += per_cpu(bh_accounting, i).nr;
3219         buffer_heads_over_limit = (tot > max_buffer_heads);
3220 }
3221         
3222 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3223 {
3224         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3225         if (ret) {
3226                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3227                 get_cpu_var(bh_accounting).nr++;
3228                 recalc_bh_state();
3229                 put_cpu_var(bh_accounting);
3230         }
3231         return ret;
3232 }
3233 EXPORT_SYMBOL(alloc_buffer_head);
3234
3235 void free_buffer_head(struct buffer_head *bh)
3236 {
3237         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3238         kmem_cache_free(bh_cachep, bh);
3239         get_cpu_var(bh_accounting).nr--;
3240         recalc_bh_state();
3241         put_cpu_var(bh_accounting);
3242 }
3243 EXPORT_SYMBOL(free_buffer_head);
3244
3245 static void buffer_exit_cpu(int cpu)
3246 {
3247         int i;
3248         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3249
3250         for (i = 0; i < BH_LRU_SIZE; i++) {
3251                 brelse(b->bhs[i]);
3252                 b->bhs[i] = NULL;
3253         }
3254         get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3255         per_cpu(bh_accounting, cpu).nr = 0;
3256         put_cpu_var(bh_accounting);
3257 }
3258
3259 static int buffer_cpu_notify(struct notifier_block *self,
3260                               unsigned long action, void *hcpu)
3261 {
3262         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3263                 buffer_exit_cpu((unsigned long)hcpu);
3264         return NOTIFY_OK;
3265 }
3266
3267 /**
3268  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3269  * @bh: struct buffer_head
3270  *
3271  * Return true if the buffer is up-to-date and false,
3272  * with the buffer locked, if not.
3273  */
3274 int bh_uptodate_or_lock(struct buffer_head *bh)
3275 {
3276         if (!buffer_uptodate(bh)) {
3277                 lock_buffer(bh);
3278                 if (!buffer_uptodate(bh))
3279                         return 0;
3280                 unlock_buffer(bh);
3281         }
3282         return 1;
3283 }
3284 EXPORT_SYMBOL(bh_uptodate_or_lock);
3285
3286 /**
3287  * bh_submit_read - Submit a locked buffer for reading
3288  * @bh: struct buffer_head
3289  *
3290  * Returns zero on success and -EIO on error.
3291  */
3292 int bh_submit_read(struct buffer_head *bh)
3293 {
3294         BUG_ON(!buffer_locked(bh));
3295
3296         if (buffer_uptodate(bh)) {
3297                 unlock_buffer(bh);
3298                 return 0;
3299         }
3300
3301         get_bh(bh);
3302         bh->b_end_io = end_buffer_read_sync;
3303         submit_bh(READ, bh);
3304         wait_on_buffer(bh);
3305         if (buffer_uptodate(bh))
3306                 return 0;
3307         return -EIO;
3308 }
3309 EXPORT_SYMBOL(bh_submit_read);
3310
3311 void __init buffer_init(void)
3312 {
3313         int nrpages;
3314
3315         bh_cachep = kmem_cache_create("buffer_head",
3316                         sizeof(struct buffer_head), 0,
3317                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3318                                 SLAB_MEM_SPREAD),
3319                                 NULL);
3320
3321         /*
3322          * Limit the bh occupancy to 10% of ZONE_NORMAL
3323          */
3324         nrpages = (nr_free_buffer_pages() * 10) / 100;
3325         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3326         hotcpu_notifier(buffer_cpu_notify, 0);
3327 }