cifs: don't instantiate new dentries in readdir for inodes that need to be revalidate...
[linux-3.10.git] / fs / buffer.c
index c4e1139..d2a4d1b 100644 (file)
 #include <linux/bitops.h>
 #include <linux/mpage.h>
 #include <linux/bit_spinlock.h>
+#include <trace/events/block.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 
 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
 
-inline void
-init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
+void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
 {
        bh->b_end_io = handler;
        bh->b_private = private;
 }
 EXPORT_SYMBOL(init_buffer);
 
+inline void touch_buffer(struct buffer_head *bh)
+{
+       trace_block_touch_buffer(bh);
+       mark_page_accessed(bh->b_page);
+}
+EXPORT_SYMBOL(touch_buffer);
+
 static int sleep_on_buffer(void *word)
 {
        io_schedule();
@@ -555,7 +562,7 @@ void emergency_thaw_all(void)
  */
 int sync_mapping_buffers(struct address_space *mapping)
 {
-       struct address_space *buffer_mapping = mapping->assoc_mapping;
+       struct address_space *buffer_mapping = mapping->private_data;
 
        if (buffer_mapping == NULL || list_empty(&mapping->private_list))
                return 0;
@@ -588,10 +595,10 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
        struct address_space *buffer_mapping = bh->b_page->mapping;
 
        mark_buffer_dirty(bh);
-       if (!mapping->assoc_mapping) {
-               mapping->assoc_mapping = buffer_mapping;
+       if (!mapping->private_data) {
+               mapping->private_data = buffer_mapping;
        } else {
-               BUG_ON(mapping->assoc_mapping != buffer_mapping);
+               BUG_ON(mapping->private_data != buffer_mapping);
        }
        if (!bh->b_assoc_map) {
                spin_lock(&buffer_mapping->private_lock);
@@ -788,7 +795,7 @@ void invalidate_inode_buffers(struct inode *inode)
        if (inode_has_buffers(inode)) {
                struct address_space *mapping = &inode->i_data;
                struct list_head *list = &mapping->private_list;
-               struct address_space *buffer_mapping = mapping->assoc_mapping;
+               struct address_space *buffer_mapping = mapping->private_data;
 
                spin_lock(&buffer_mapping->private_lock);
                while (!list_empty(list))
@@ -811,7 +818,7 @@ int remove_inode_buffers(struct inode *inode)
        if (inode_has_buffers(inode)) {
                struct address_space *mapping = &inode->i_data;
                struct list_head *list = &mapping->private_list;
-               struct address_space *buffer_mapping = mapping->assoc_mapping;
+               struct address_space *buffer_mapping = mapping->private_data;
 
                spin_lock(&buffer_mapping->private_lock);
                while (!list_empty(list)) {
@@ -850,19 +857,14 @@ try_again:
                if (!bh)
                        goto no_grow;
 
-               bh->b_bdev = NULL;
                bh->b_this_page = head;
                bh->b_blocknr = -1;
                head = bh;
 
-               bh->b_state = 0;
-               atomic_set(&bh->b_count, 0);
                bh->b_size = size;
 
                /* Link the buffer to its page */
                set_bh_page(bh, page, offset);
-
-               init_buffer(bh, NULL, NULL);
        }
        return head;
 /*
@@ -1117,6 +1119,8 @@ void mark_buffer_dirty(struct buffer_head *bh)
 {
        WARN_ON_ONCE(!buffer_uptodate(bh));
 
+       trace_block_dirty_buffer(bh);
+
        /*
         * Very *carefully* optimize the it-is-already-dirty case.
         *
@@ -2336,7 +2340,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                         get_block_t get_block)
 {
        struct page *page = vmf->page;
-       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       struct inode *inode = file_inode(vma->vm_file);
        unsigned long end;
        loff_t size;
        int ret;
@@ -2363,7 +2367,7 @@ int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
        if (unlikely(ret < 0))
                goto out_unlock;
        set_page_dirty(page);
-       wait_on_page_writeback(page);
+       wait_for_stable_page(page);
        return 0;
 out_unlock:
        unlock_page(page);
@@ -2375,7 +2379,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                   get_block_t get_block)
 {
        int ret;
-       struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
+       struct super_block *sb = file_inode(vma->vm_file)->i_sb;
 
        sb_start_pagefault(sb);
 
@@ -2935,14 +2939,15 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
        bio->bi_io_vec[0].bv_len = bytes;
 
        /* ..and clear the end of the buffer for reads */
-       if (rw & READ) {
+       if ((rw & RW_MASK) == READ) {
                void *kaddr = kmap_atomic(bh->b_page);
                memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
                kunmap_atomic(kaddr);
+               flush_dcache_page(bh->b_page);
        }
 }
 
-int submit_bh(int rw, struct buffer_head * bh)
+int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
 {
        struct bio *bio;
        int ret = 0;
@@ -2972,15 +2977,20 @@ int submit_bh(int rw, struct buffer_head * bh)
        bio->bi_io_vec[0].bv_offset = bh_offset(bh);
 
        bio->bi_vcnt = 1;
-       bio->bi_idx = 0;
        bio->bi_size = bh->b_size;
 
        bio->bi_end_io = end_bio_bh_io_sync;
        bio->bi_private = bh;
+       bio->bi_flags |= bio_flags;
 
        /* Take care of bh's that straddle the end of the device */
        guard_bh_eod(rw, bio, bh);
 
+       if (buffer_meta(bh))
+               rw |= REQ_META;
+       if (buffer_prio(bh))
+               rw |= REQ_PRIO;
+
        bio_get(bio);
        submit_bio(rw, bio);
 
@@ -2990,6 +3000,12 @@ int submit_bh(int rw, struct buffer_head * bh)
        bio_put(bio);
        return ret;
 }
+EXPORT_SYMBOL_GPL(_submit_bh);
+
+int submit_bh(int rw, struct buffer_head *bh)
+{
+       return _submit_bh(rw, bh, 0);
+}
 EXPORT_SYMBOL(submit_bh);
 
 /**
@@ -3230,7 +3246,7 @@ static struct kmem_cache *bh_cachep __read_mostly;
  * Once the number of bh's in the machine exceeds this level, we start
  * stripping them in writeback.
  */
-static int max_buffer_heads;
+static unsigned long max_buffer_heads;
 
 int buffer_heads_over_limit;
 
@@ -3346,7 +3362,7 @@ EXPORT_SYMBOL(bh_submit_read);
 
 void __init buffer_init(void)
 {
-       int nrpages;
+       unsigned long nrpages;
 
        bh_cachep = kmem_cache_create("buffer_head",
                        sizeof(struct buffer_head), 0,