]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/filemap.c
memcg: revert gfp mask fix
[linux-2.6.git] / mm / filemap.c
index 100b99c2d504d9d0057267623ba90f359465c597..ceba0bd0366261740b27c8f19785ae8857eeb8c8 100644 (file)
 #include <linux/uio.h>
 #include <linux/hash.h>
 #include <linux/writeback.h>
+#include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/cpuset.h>
-#include "filemap.h"
+#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h> /* for page_is_file_cache() */
 #include "internal.h"
 
 /*
@@ -40,9 +43,6 @@
 
 #include <asm/mman.h>
 
-static ssize_t
-generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-       loff_t offset, unsigned long nr_segs);
 
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
@@ -110,7 +110,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
 /*
  * Remove a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
- * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
+ * is safe.  The caller must hold the mapping's tree_lock.
  */
 void __remove_from_page_cache(struct page *page)
 {
@@ -121,6 +121,19 @@ void __remove_from_page_cache(struct page *page)
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
        BUG_ON(page_mapped(page));
+       mem_cgroup_uncharge_cache_page(page);
+
+       /*
+        * Some filesystems seem to re-dirty the page even after
+        * the VM has canceled the dirty bit (eg ext3 journaling).
+        *
+        * Fix it up by doing a final dirty accounting check after
+        * having removed the page entirely.
+        */
+       if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
+               dec_zone_page_state(page, NR_FILE_DIRTY);
+               dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+       }
 }
 
 void remove_from_page_cache(struct page *page)
@@ -129,9 +142,9 @@ void remove_from_page_cache(struct page *page)
 
        BUG_ON(!PageLocked(page));
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
        __remove_from_page_cache(page);
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
 }
 
 static int sync_page(void *word)
@@ -170,6 +183,12 @@ static int sync_page(void *word)
        return 0;
 }
 
+static int sync_page_killable(void *word)
+{
+       sync_page(word);
+       return fatal_signal_pending(current) ? -EINTR : 0;
+}
+
 /**
  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
  * @mapping:   address space structure to write
@@ -191,7 +210,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
        int ret;
        struct writeback_control wbc = {
                .sync_mode = sync_mode,
-               .nr_to_write = mapping->nrpages * 2,
+               .nr_to_write = LONG_MAX,
                .range_start = start,
                .range_end = end,
        };
@@ -215,11 +234,12 @@ int filemap_fdatawrite(struct address_space *mapping)
 }
 EXPORT_SYMBOL(filemap_fdatawrite);
 
-static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
+int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
                                loff_t end)
 {
        return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 }
+EXPORT_SYMBOL(filemap_fdatawrite_range);
 
 /**
  * filemap_flush - mostly a non-blocking flush
@@ -322,7 +342,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
 EXPORT_SYMBOL(sync_page_range);
 
 /**
- * sync_page_range_nolock
+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking
  * @inode:     target inode
  * @mapping:   target address_space
  * @pos:       beginning offset in pages to write
@@ -423,47 +443,74 @@ int filemap_write_and_wait_range(struct address_space *mapping,
 }
 
 /**
- * add_to_page_cache - add newly allocated pagecache pages
+ * add_to_page_cache_locked - add a locked page to the pagecache
  * @page:      page to add
  * @mapping:   the page's address_space
  * @offset:    page index
  * @gfp_mask:  page allocation mode
  *
- * This function is used to add newly allocated pagecache pages;
- * the page is new, so we can just run SetPageLocked() against it.
- * The other page state flags were set by rmqueue().
- *
+ * This function is used to add a page to the pagecache. It must be locked.
  * This function does not add the page to the LRU.  The caller must do that.
  */
-int add_to_page_cache(struct page *page, struct address_space *mapping,
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                pgoff_t offset, gfp_t gfp_mask)
 {
-       int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+       int error;
+
+       VM_BUG_ON(!PageLocked(page));
 
+       error = mem_cgroup_cache_charge(page, current->mm,
+                                       gfp_mask & GFP_RECLAIM_MASK);
+       if (error)
+               goto out;
+
+       error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
        if (error == 0) {
-               write_lock_irq(&mapping->tree_lock);
+               page_cache_get(page);
+               page->mapping = mapping;
+               page->index = offset;
+
+               spin_lock_irq(&mapping->tree_lock);
                error = radix_tree_insert(&mapping->page_tree, offset, page);
-               if (!error) {
-                       page_cache_get(page);
-                       SetPageLocked(page);
-                       page->mapping = mapping;
-                       page->index = offset;
+               if (likely(!error)) {
                        mapping->nrpages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
+               } else {
+                       page->mapping = NULL;
+                       mem_cgroup_uncharge_cache_page(page);
+                       page_cache_release(page);
                }
-               write_unlock_irq(&mapping->tree_lock);
+
+               spin_unlock_irq(&mapping->tree_lock);
                radix_tree_preload_end();
-       }
+       } else
+               mem_cgroup_uncharge_cache_page(page);
+out:
        return error;
 }
-EXPORT_SYMBOL(add_to_page_cache);
+EXPORT_SYMBOL(add_to_page_cache_locked);
 
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
                                pgoff_t offset, gfp_t gfp_mask)
 {
-       int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
-       if (ret == 0)
-               lru_cache_add(page);
+       int ret;
+
+       /*
+        * Splice_read and readahead add shmem/tmpfs pages into the page cache
+        * before shmem_readpage has a chance to mark them as SwapBacked: they
+        * need to go on the active_anon lru below, and mem_cgroup_cache_charge
+        * (called in add_to_page_cache) needs to know where they're going too.
+        */
+       if (mapping_cap_swap_backed(mapping))
+               SetPageSwapBacked(page);
+
+       ret = add_to_page_cache(page, mapping, offset, gfp_mask);
+       if (ret == 0) {
+               if (page_is_file_cache(page))
+                       lru_cache_add_file(page);
+               else
+                       lru_cache_add_active_anon(page);
+       }
        return ret;
 }
 
@@ -507,7 +554,7 @@ static inline void wake_up_page(struct page *page, int bit)
        __wake_up_bit(page_waitqueue(page), &page->flags, bit);
 }
 
-void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 
@@ -526,17 +573,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
  * mechananism between PageLocked pages and PageWriteback pages is shared.
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  *
- * The first mb is necessary to safely close the critical section opened by the
- * TestSetPageLocked(), the second mb is necessary to enforce ordering between
- * the clear_bit and the read of the waitqueue (to avoid SMP races with a
- * parallel wait_on_page_locked()).
+ * The mb is necessary to enforce ordering between the clear_bit and the read
+ * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
  */
-void fastcall unlock_page(struct page *page)
+void unlock_page(struct page *page)
 {
-       smp_mb__before_clear_bit();
-       if (!TestClearPageLocked(page))
-               BUG();
-       smp_mb__after_clear_bit(); 
+       VM_BUG_ON(!PageLocked(page));
+       clear_bit_unlock(PG_locked, &page->flags);
+       smp_mb__after_clear_bit();
        wake_up_page(page, PG_locked);
 }
 EXPORT_SYMBOL(unlock_page);
@@ -547,10 +591,12 @@ EXPORT_SYMBOL(unlock_page);
  */
 void end_page_writeback(struct page *page)
 {
-       if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
-               if (!test_clear_page_writeback(page))
-                       BUG();
-       }
+       if (TestClearPageReclaim(page))
+               rotate_reclaimable_page(page);
+
+       if (!test_clear_page_writeback(page))
+               BUG();
+
        smp_mb__after_clear_bit();
        wake_up_page(page, PG_writeback);
 }
@@ -565,7 +611,7 @@ EXPORT_SYMBOL(end_page_writeback);
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-void fastcall __lock_page(struct page *page)
+void __lock_page(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
@@ -574,11 +620,22 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
-/*
+int __lock_page_killable(struct page *page)
+{
+       DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+       return __wait_on_bit_lock(page_waitqueue(page), &wait,
+                                       sync_page_killable, TASK_KILLABLE);
+}
+
+/**
+ * __lock_page_nosync - get a lock on the page, without calling sync_page()
+ * @page: the page to lock
+ *
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
  */
-void fastcall __lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
        __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -593,15 +650,35 @@ void fastcall __lock_page_nosync(struct page *page)
  * Is there a pagecache struct page at the given (mapping, offset) tuple?
  * If yes, increment its refcount and return it; if no, return NULL.
  */
-struct page * find_get_page(struct address_space *mapping, unsigned long offset)
+struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
 {
+       void **pagep;
        struct page *page;
 
-       read_lock_irq(&mapping->tree_lock);
-       page = radix_tree_lookup(&mapping->page_tree, offset);
-       if (page)
-               page_cache_get(page);
-       read_unlock_irq(&mapping->tree_lock);
+       rcu_read_lock();
+repeat:
+       page = NULL;
+       pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
+       if (pagep) {
+               page = radix_tree_deref_slot(pagep);
+               if (unlikely(!page || page == RADIX_TREE_RETRY))
+                       goto repeat;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /*
+                * Has the page moved?
+                * This is part of the lockless pagecache protocol. See
+                * include/linux/pagemap.h for details.
+                */
+               if (unlikely(page != *pagep)) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+       }
+       rcu_read_unlock();
+
        return page;
 }
 EXPORT_SYMBOL(find_get_page);
@@ -616,31 +693,22 @@ EXPORT_SYMBOL(find_get_page);
  *
  * Returns zero if the page was not present. find_lock_page() may sleep.
  */
-struct page *find_lock_page(struct address_space *mapping,
-                               unsigned long offset)
+struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
 {
        struct page *page;
 
-       read_lock_irq(&mapping->tree_lock);
 repeat:
-       page = radix_tree_lookup(&mapping->page_tree, offset);
+       page = find_get_page(mapping, offset);
        if (page) {
-               page_cache_get(page);
-               if (TestSetPageLocked(page)) {
-                       read_unlock_irq(&mapping->tree_lock);
-                       __lock_page(page);
-                       read_lock_irq(&mapping->tree_lock);
-
-                       /* Has the page been truncated while we slept? */
-                       if (unlikely(page->mapping != mapping ||
-                                    page->index != offset)) {
-                               unlock_page(page);
-                               page_cache_release(page);
-                               goto repeat;
-                       }
+               lock_page(page);
+               /* Has the page been truncated? */
+               if (unlikely(page->mapping != mapping)) {
+                       unlock_page(page);
+                       page_cache_release(page);
+                       goto repeat;
                }
+               VM_BUG_ON(page->index != offset);
        }
-       read_unlock_irq(&mapping->tree_lock);
        return page;
 }
 EXPORT_SYMBOL(find_lock_page);
@@ -663,29 +731,31 @@ EXPORT_SYMBOL(find_lock_page);
  * memory exhaustion.
  */
 struct page *find_or_create_page(struct address_space *mapping,
-               unsigned long index, gfp_t gfp_mask)
+               pgoff_t index, gfp_t gfp_mask)
 {
-       struct page *page, *cached_page = NULL;
+       struct page *page;
        int err;
 repeat:
        page = find_lock_page(mapping, index);
        if (!page) {
-               if (!cached_page) {
-                       cached_page =
-                               __page_cache_alloc(gfp_mask);
-                       if (!cached_page)
-                               return NULL;
+               page = __page_cache_alloc(gfp_mask);
+               if (!page)
+                       return NULL;
+               /*
+                * We want a regular kernel memory (not highmem or DMA etc)
+                * allocation for the radix tree nodes, but we need to honour
+                * the context-specific requirements the caller has asked for.
+                * GFP_RECLAIM_MASK collects those requirements.
+                */
+               err = add_to_page_cache_lru(page, mapping, index,
+                       (gfp_mask & GFP_RECLAIM_MASK));
+               if (unlikely(err)) {
+                       page_cache_release(page);
+                       page = NULL;
+                       if (err == -EEXIST)
+                               goto repeat;
                }
-               err = add_to_page_cache_lru(cached_page, mapping,
-                                       index, gfp_mask);
-               if (!err) {
-                       page = cached_page;
-                       cached_page = NULL;
-               } else if (err == -EEXIST)
-                       goto repeat;
        }
-       if (cached_page)
-               page_cache_release(cached_page);
        return page;
 }
 EXPORT_SYMBOL(find_or_create_page);
@@ -711,13 +781,39 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+                               (void ***)pages, start, nr_pages);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup(&mapping->page_tree,
-                               (void **)pages, start, nr_pages);
-       for (i = 0; i < ret; i++)
-               page_cache_get(pages[i]);
-       read_unlock_irq(&mapping->tree_lock);
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+
+               pages[ret] = page;
+               ret++;
+       }
+       rcu_read_unlock();
        return ret;
 }
 
@@ -738,19 +834,44 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+                               (void ***)pages, index, nr_pages);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup(&mapping->page_tree,
-                               (void **)pages, index, nr_pages);
-       for (i = 0; i < ret; i++) {
-               if (pages[i]->mapping == NULL || pages[i]->index != index)
+               if (page->mapping == NULL || page->index != index)
                        break;
 
-               page_cache_get(pages[i]);
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+
+               pages[ret] = page;
+               ret++;
                index++;
        }
-       read_unlock_irq(&mapping->tree_lock);
-       return i;
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(find_get_pages_contig);
 
@@ -770,15 +891,43 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
+                               (void ***)pages, *index, nr_pages, tag);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+
+               pages[ret] = page;
+               ret++;
+       }
+       rcu_read_unlock();
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
-                               (void **)pages, *index, nr_pages, tag);
-       for (i = 0; i < ret; i++)
-               page_cache_get(pages[i]);
        if (ret)
                *index = pages[ret - 1]->index + 1;
-       read_unlock_irq(&mapping->tree_lock);
+
        return ret;
 }
 EXPORT_SYMBOL(find_get_pages_tag);
@@ -797,18 +946,18 @@ EXPORT_SYMBOL(find_get_pages_tag);
  * and deadlock against the caller's locked page.
  */
 struct page *
-grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
+grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
 {
        struct page *page = find_get_page(mapping, index);
 
        if (page) {
-               if (!TestSetPageLocked(page))
+               if (trylock_page(page))
                        return page;
                page_cache_release(page);
                return NULL;
        }
        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
-       if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
+       if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
                page_cache_release(page);
                page = NULL;
        }
@@ -841,9 +990,7 @@ static void shrink_readahead_size_eio(struct file *filp,
 }
 
 /**
- * do_generic_mapping_read - generic file read routine
- * @mapping:   address_space to be read
- * @_ra:       file's readahead state
+ * do_generic_file_read - generic file read routine
  * @filp:      the file to read
  * @ppos:      current file position
  * @desc:      read_descriptor
@@ -854,74 +1001,87 @@ static void shrink_readahead_size_eio(struct file *filp,
  *
  * This is really ugly. But the goto's actually try to clarify some
  * of the logic when it comes to error handling etc.
- *
- * Note the struct file* is only passed for the use of readpage.
- * It may be NULL.
  */
-void do_generic_mapping_read(struct address_space *mapping,
-                            struct file_ra_state *_ra,
-                            struct file *filp,
-                            loff_t *ppos,
-                            read_descriptor_t *desc,
-                            read_actor_t actor)
+static void do_generic_file_read(struct file *filp, loff_t *ppos,
+               read_descriptor_t *desc, read_actor_t actor)
 {
+       struct address_space *mapping = filp->f_mapping;
        struct inode *inode = mapping->host;
-       unsigned long index;
-       unsigned long end_index;
-       unsigned long offset;
-       unsigned long last_index;
-       unsigned long next_index;
-       unsigned long prev_index;
+       struct file_ra_state *ra = &filp->f_ra;
+       pgoff_t index;
+       pgoff_t last_index;
+       pgoff_t prev_index;
+       unsigned long offset;      /* offset into pagecache page */
        unsigned int prev_offset;
-       loff_t isize;
-       struct page *cached_page;
        int error;
-       struct file_ra_state ra = *_ra;
 
-       cached_page = NULL;
        index = *ppos >> PAGE_CACHE_SHIFT;
-       next_index = index;
-       prev_index = ra.prev_index;
-       prev_offset = ra.prev_offset;
+       prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
+       prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
        last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
        offset = *ppos & ~PAGE_CACHE_MASK;
 
-       isize = i_size_read(inode);
-       if (!isize)
-               goto out;
-
-       end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
        for (;;) {
                struct page *page;
+               pgoff_t end_index;
+               loff_t isize;
                unsigned long nr, ret;
 
+               cond_resched();
+find_page:
+               page = find_get_page(mapping, index);
+               if (!page) {
+                       page_cache_sync_readahead(mapping,
+                                       ra, filp,
+                                       index, last_index - index);
+                       page = find_get_page(mapping, index);
+                       if (unlikely(page == NULL))
+                               goto no_cached_page;
+               }
+               if (PageReadahead(page)) {
+                       page_cache_async_readahead(mapping,
+                                       ra, filp, page,
+                                       index, last_index - index);
+               }
+               if (!PageUptodate(page)) {
+                       if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
+                                       !mapping->a_ops->is_partially_uptodate)
+                               goto page_not_up_to_date;
+                       if (!trylock_page(page))
+                               goto page_not_up_to_date;
+                       if (!mapping->a_ops->is_partially_uptodate(page,
+                                                               desc, offset))
+                               goto page_not_up_to_date_locked;
+                       unlock_page(page);
+               }
+page_ok:
+               /*
+                * i_size must be checked after we know the page is Uptodate.
+                *
+                * Checking i_size after the check allows us to calculate
+                * the correct value for "nr", which means the zero-filled
+                * part of the page is not copied back to userspace (unless
+                * another truncate extends the file - this is desired though).
+                */
+
+               isize = i_size_read(inode);
+               end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+               if (unlikely(!isize || index > end_index)) {
+                       page_cache_release(page);
+                       goto out;
+               }
+
                /* nr is the maximum number of bytes to copy from this page */
                nr = PAGE_CACHE_SIZE;
-               if (index >= end_index) {
-                       if (index > end_index)
-                               goto out;
+               if (index == end_index) {
                        nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
                        if (nr <= offset) {
+                               page_cache_release(page);
                                goto out;
                        }
                }
                nr = nr - offset;
 
-               cond_resched();
-               if (index == next_index)
-                       next_index = page_cache_readahead(mapping, &ra, filp,
-                                       index, last_index - index);
-
-find_page:
-               page = find_get_page(mapping, index);
-               if (unlikely(page == NULL)) {
-                       handle_ra_miss(mapping, &ra, index);
-                       goto no_cached_page;
-               }
-               if (!PageUptodate(page))
-                       goto page_not_up_to_date;
-page_ok:
-
                /* If users can be writing to this page using arbitrary
                 * virtual addresses, take care about potential aliasing
                 * before reading the page on the kernel side.
@@ -952,7 +1112,6 @@ page_ok:
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
                prev_offset = offset;
-               ra.prev_offset = offset;
 
                page_cache_release(page);
                if (ret == nr && desc->count)
@@ -961,8 +1120,11 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               lock_page(page);
+               error = lock_page_killable(page);
+               if (unlikely(error))
+                       goto readpage_error;
 
+page_not_up_to_date_locked:
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
                        unlock_page(page);
@@ -989,7 +1151,9 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       lock_page(page);
+                       error = lock_page_killable(page);
+                       if (unlikely(error))
+                               goto readpage_error;
                        if (!PageUptodate(page)) {
                                if (page->mapping == NULL) {
                                        /*
@@ -1000,38 +1164,13 @@ readpage:
                                        goto find_page;
                                }
                                unlock_page(page);
+                               shrink_readahead_size_eio(filp, ra);
                                error = -EIO;
-                               shrink_readahead_size_eio(filp, &ra);
                                goto readpage_error;
                        }
                        unlock_page(page);
                }
 
-               /*
-                * i_size must be checked after we have done ->readpage.
-                *
-                * Checking i_size after the readpage allows us to calculate
-                * the correct value for "nr", which means the zero-filled
-                * part of the page is not copied back to userspace (unless
-                * another truncate extends the file - this is desired though).
-                */
-               isize = i_size_read(inode);
-               end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
-               if (unlikely(!isize || index > end_index)) {
-                       page_cache_release(page);
-                       goto out;
-               }
-
-               /* nr is the maximum number of bytes to copy from this page */
-               nr = PAGE_CACHE_SIZE;
-               if (index == end_index) {
-                       nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
-                       if (nr <= offset) {
-                               page_cache_release(page);
-                               goto out;
-                       }
-               }
-               nr = nr - offset;
                goto page_ok;
 
 readpage_error:
@@ -1045,36 +1184,31 @@ no_cached_page:
                 * Ok, it wasn't cached, so we need to create a new
                 * page..
                 */
-               if (!cached_page) {
-                       cached_page = page_cache_alloc_cold(mapping);
-                       if (!cached_page) {
-                               desc->error = -ENOMEM;
-                               goto out;
-                       }
+               page = page_cache_alloc_cold(mapping);
+               if (!page) {
+                       desc->error = -ENOMEM;
+                       goto out;
                }
-               error = add_to_page_cache_lru(cached_page, mapping,
+               error = add_to_page_cache_lru(page, mapping,
                                                index, GFP_KERNEL);
                if (error) {
+                       page_cache_release(page);
                        if (error == -EEXIST)
                                goto find_page;
                        desc->error = error;
                        goto out;
                }
-               page = cached_page;
-               cached_page = NULL;
                goto readpage;
        }
 
 out:
-       *_ra = ra;
+       ra->prev_pos = prev_index;
+       ra->prev_pos <<= PAGE_CACHE_SHIFT;
+       ra->prev_pos |= prev_offset;
 
-       *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
-       if (cached_page)
-               page_cache_release(cached_page);
-       if (filp)
-               file_accessed(filp);
+       *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
+       file_accessed(filp);
 }
-EXPORT_SYMBOL(do_generic_mapping_read);
 
 int file_read_actor(read_descriptor_t *desc, struct page *page,
                        unsigned long offset, unsigned long size)
@@ -1186,71 +1320,51 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
 
                mapping = filp->f_mapping;
                inode = mapping->host;
-               retval = 0;
                if (!count)
                        goto out; /* skip atime */
                size = i_size_read(inode);
                if (pos < size) {
-                       retval = generic_file_direct_IO(READ, iocb,
-                                               iov, pos, nr_segs);
+                       retval = filemap_write_and_wait_range(mapping, pos,
+                                       pos + iov_length(iov, nr_segs) - 1);
+                       if (!retval) {
+                               retval = mapping->a_ops->direct_IO(READ, iocb,
+                                                       iov, pos, nr_segs);
+                       }
                        if (retval > 0)
                                *ppos = pos + retval;
-               }
-               if (likely(retval != 0)) {
-                       file_accessed(filp);
-                       goto out;
+                       if (retval) {
+                               file_accessed(filp);
+                               goto out;
+                       }
                }
        }
 
-       retval = 0;
-       if (count) {
-               for (seg = 0; seg < nr_segs; seg++) {
-                       read_descriptor_t desc;
+       for (seg = 0; seg < nr_segs; seg++) {
+               read_descriptor_t desc;
 
-                       desc.written = 0;
-                       desc.arg.buf = iov[seg].iov_base;
-                       desc.count = iov[seg].iov_len;
-                       if (desc.count == 0)
-                               continue;
-                       desc.error = 0;
-                       do_generic_file_read(filp,ppos,&desc,file_read_actor);
-                       retval += desc.written;
-                       if (desc.error) {
-                               retval = retval ?: desc.error;
-                               break;
-                       }
-                       if (desc.count > 0)
-                               break;
+               desc.written = 0;
+               desc.arg.buf = iov[seg].iov_base;
+               desc.count = iov[seg].iov_len;
+               if (desc.count == 0)
+                       continue;
+               desc.error = 0;
+               do_generic_file_read(filp, ppos, &desc, file_read_actor);
+               retval += desc.written;
+               if (desc.error) {
+                       retval = retval ?: desc.error;
+                       break;
                }
+               if (desc.count > 0)
+                       break;
        }
 out:
        return retval;
 }
 EXPORT_SYMBOL(generic_file_aio_read);
 
-int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
-{
-       ssize_t written;
-       unsigned long count = desc->count;
-       struct file *file = desc->arg.data;
-
-       if (size > count)
-               size = count;
-
-       written = file->f_op->sendpage(file, page, offset,
-                                      size, &file->f_pos, size<count);
-       if (written < 0) {
-               desc->error = written;
-               written = 0;
-       }
-       desc->count = count - written;
-       desc->written += written;
-       return written;
-}
-
 static ssize_t
 do_readahead(struct address_space *mapping, struct file *filp,
-            unsigned long index, unsigned long nr)
+            pgoff_t index, unsigned long nr)
 {
        if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
                return -EINVAL;
@@ -1270,8 +1384,8 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
        if (file) {
                if (file->f_mode & FMODE_READ) {
                        struct address_space *mapping = file->f_mapping;
-                       unsigned long start = offset >> PAGE_CACHE_SHIFT;
-                       unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+                       pgoff_t start = offset >> PAGE_CACHE_SHIFT;
+                       pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
                        unsigned long len = end - start + 1;
                        ret = do_readahead(mapping, file, start, len);
                }
@@ -1281,7 +1395,6 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
 }
 
 #ifdef CONFIG_MMU
-static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
 /**
  * page_cache_read - adds requested page to the page cache if not already there
  * @file:      file to read
@@ -1290,7 +1403,7 @@ static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
  * This adds the requested page to the page cache if it isn't already there,
  * and schedules an I/O to read in its contents from disk.
  */
-static int fastcall page_cache_read(struct file * file, unsigned long offset)
+static int page_cache_read(struct file *file, pgoff_t offset)
 {
        struct address_space *mapping = file->f_mapping;
        struct page *page; 
@@ -1317,69 +1430,69 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset)
 #define MMAP_LOTSAMISS  (100)
 
 /**
- * filemap_nopage - read in file data for page fault handling
- * @area:      the applicable vm_area
- * @address:   target address to read in
- * @type:      returned with VM_FAULT_{MINOR,MAJOR} if not %NULL
+ * filemap_fault - read in file data for page fault handling
+ * @vma:       vma in which the fault was taken
+ * @vmf:       struct vm_fault containing details of the fault
  *
- * filemap_nopage() is invoked via the vma operations vector for a
+ * filemap_fault() is invoked via the vma operations vector for a
  * mapped memory region to read in file data during a page fault.
  *
  * The goto's are kind of ugly, but this streamlines the normal case of having
  * it in the page cache, and handles the special cases reasonably without
  * having a lot of duplicated code.
  */
-struct page *filemap_nopage(struct vm_area_struct *area,
-                               unsigned long address, int *type)
+int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        int error;
-       struct file *file = area->vm_file;
+       struct file *file = vma->vm_file;
        struct address_space *mapping = file->f_mapping;
        struct file_ra_state *ra = &file->f_ra;
        struct inode *inode = mapping->host;
        struct page *page;
-       unsigned long size, pgoff;
-       int did_readaround = 0, majmin = VM_FAULT_MINOR;
-
-       pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
+       pgoff_t size;
+       int did_readaround = 0;
+       int ret = 0;
 
-retry_all:
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (pgoff >= size)
-               goto outside_data_content;
+       if (vmf->pgoff >= size)
+               return VM_FAULT_SIGBUS;
 
        /* If we don't want any read-ahead, don't bother */
-       if (VM_RandomReadHint(area))
+       if (VM_RandomReadHint(vma))
                goto no_cached_page;
 
-       /*
-        * The readahead code wants to be told about each and every page
-        * so it can build and shrink its windows appropriately
-        *
-        * For sequential accesses, we use the generic readahead logic.
-        */
-       if (VM_SequentialReadHint(area))
-               page_cache_readahead(mapping, ra, file, pgoff, 1);
-
        /*
         * Do we have something in the page cache already?
         */
 retry_find:
-       page = find_get_page(mapping, pgoff);
+       page = find_lock_page(mapping, vmf->pgoff);
+       /*
+        * For sequential accesses, we use the generic readahead logic.
+        */
+       if (VM_SequentialReadHint(vma)) {
+               if (!page) {
+                       page_cache_sync_readahead(mapping, ra, file,
+                                                          vmf->pgoff, 1);
+                       page = find_lock_page(mapping, vmf->pgoff);
+                       if (!page)
+                               goto no_cached_page;
+               }
+               if (PageReadahead(page)) {
+                       page_cache_async_readahead(mapping, ra, file, page,
+                                                          vmf->pgoff, 1);
+               }
+       }
+
        if (!page) {
                unsigned long ra_pages;
 
-               if (VM_SequentialReadHint(area)) {
-                       handle_ra_miss(mapping, ra, pgoff);
-                       goto no_cached_page;
-               }
                ra->mmap_miss++;
 
                /*
                 * Do we miss much more than hit in this file? If so,
                 * stop bothering with read-ahead. It will only hurt.
                 */
-               if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
+               if (ra->mmap_miss > MMAP_LOTSAMISS)
                        goto no_cached_page;
 
                /*
@@ -1387,7 +1500,7 @@ retry_find:
                 * check did_readaround, as this is an inner loop.
                 */
                if (!did_readaround) {
-                       majmin = VM_FAULT_MAJOR;
+                       ret = VM_FAULT_MAJOR;
                        count_vm_event(PGMAJFAULT);
                }
                did_readaround = 1;
@@ -1395,48 +1508,46 @@ retry_find:
                if (ra_pages) {
                        pgoff_t start = 0;
 
-                       if (pgoff > ra_pages / 2)
-                               start = pgoff - ra_pages / 2;
+                       if (vmf->pgoff > ra_pages / 2)
+                               start = vmf->pgoff - ra_pages / 2;
                        do_page_cache_readahead(mapping, file, start, ra_pages);
                }
-               page = find_get_page(mapping, pgoff);
+               page = find_lock_page(mapping, vmf->pgoff);
                if (!page)
                        goto no_cached_page;
        }
 
        if (!did_readaround)
-               ra->mmap_hit++;
+               ra->mmap_miss--;
 
        /*
-        * Ok, found a page in the page cache, now we need to check
-        * that it's up-to-date.
+        * We have a locked page in the page cache, now we need to check
+        * that it's up-to-date. If not, it is going to be due to an error.
         */
-       if (!PageUptodate(page))
+       if (unlikely(!PageUptodate(page)))
                goto page_not_uptodate;
 
-success:
+       /* Must recheck i_size under page lock */
+       size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       if (unlikely(vmf->pgoff >= size)) {
+               unlock_page(page);
+               page_cache_release(page);
+               return VM_FAULT_SIGBUS;
+       }
+
        /*
         * Found the page and have a reference on it.
         */
-       mark_page_accessed(page);
-       if (type)
-               *type = majmin;
-       return page;
+       ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       vmf->page = page;
+       return ret | VM_FAULT_LOCKED;
 
-outside_data_content:
-       /*
-        * An external ptracer can access pages that normally aren't
-        * accessible..
-        */
-       if (area->vm_mm == current->mm)
-               return NOPAGE_SIGBUS;
-       /* Fall through to the non-read-ahead case */
 no_cached_page:
        /*
         * We're only likely to ever get here if MADV_RANDOM is in
         * effect.
         */
-       error = page_cache_read(file, pgoff);
+       error = page_cache_read(file, vmf->pgoff);
 
        /*
         * The page we want has now been added to the page cache.
@@ -1452,12 +1563,13 @@ no_cached_page:
         * to schedule I/O.
         */
        if (error == -ENOMEM)
-               return NOPAGE_OOM;
-       return NOPAGE_SIGBUS;
+               return VM_FAULT_OOM;
+       return VM_FAULT_SIGBUS;
 
 page_not_uptodate:
+       /* IO error path */
        if (!did_readaround) {
-               majmin = VM_FAULT_MAJOR;
+               ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
        }
 
@@ -1467,217 +1579,26 @@ page_not_uptodate:
         * because there really aren't any performance issues here
         * and we need to check for errors.
         */
-       lock_page(page);
-
-       /* Somebody truncated the page on us? */
-       if (!page->mapping) {
-               unlock_page(page);
-               page_cache_release(page);
-               goto retry_all;
-       }
-
-       /* Somebody else successfully read it in? */
-       if (PageUptodate(page)) {
-               unlock_page(page);
-               goto success;
-       }
        ClearPageError(page);
        error = mapping->a_ops->readpage(file, page);
        if (!error) {
                wait_on_page_locked(page);
-               if (PageUptodate(page))
-                       goto success;
-       } else if (error == AOP_TRUNCATED_PAGE) {
-               page_cache_release(page);
-               goto retry_find;
+               if (!PageUptodate(page))
+                       error = -EIO;
        }
+       page_cache_release(page);
 
-       /*
-        * Things didn't work out. Return zero to tell the
-        * mm layer so, possibly freeing the page cache page first.
-        */
+       if (!error || error == AOP_TRUNCATED_PAGE)
+               goto retry_find;
+
+       /* Things didn't work out. Return zero to tell the mm layer so. */
        shrink_readahead_size_eio(file, ra);
-       page_cache_release(page);
-       return NOPAGE_SIGBUS;
+       return VM_FAULT_SIGBUS;
 }
-EXPORT_SYMBOL(filemap_nopage);
-
-static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
-                                       int nonblock)
-{
-       struct address_space *mapping = file->f_mapping;
-       struct page *page;
-       int error;
-
-       /*
-        * Do we have something in the page cache already?
-        */
-retry_find:
-       page = find_get_page(mapping, pgoff);
-       if (!page) {
-               if (nonblock)
-                       return NULL;
-               goto no_cached_page;
-       }
-
-       /*
-        * Ok, found a page in the page cache, now we need to check
-        * that it's up-to-date.
-        */
-       if (!PageUptodate(page)) {
-               if (nonblock) {
-                       page_cache_release(page);
-                       return NULL;
-               }
-               goto page_not_uptodate;
-       }
-
-success:
-       /*
-        * Found the page and have a reference on it.
-        */
-       mark_page_accessed(page);
-       return page;
-
-no_cached_page:
-       error = page_cache_read(file, pgoff);
-
-       /*
-        * The page we want has now been added to the page cache.
-        * In the unlikely event that someone removed it in the
-        * meantime, we'll just come back here and read it again.
-        */
-       if (error >= 0)
-               goto retry_find;
-
-       /*
-        * An error return from page_cache_read can result if the
-        * system is low on memory, or a problem occurs while trying
-        * to schedule I/O.
-        */
-       return NULL;
-
-page_not_uptodate:
-       lock_page(page);
-
-       /* Did it get truncated while we waited for it? */
-       if (!page->mapping) {
-               unlock_page(page);
-               goto err;
-       }
-
-       /* Did somebody else get it up-to-date? */
-       if (PageUptodate(page)) {
-               unlock_page(page);
-               goto success;
-       }
-
-       error = mapping->a_ops->readpage(file, page);
-       if (!error) {
-               wait_on_page_locked(page);
-               if (PageUptodate(page))
-                       goto success;
-       } else if (error == AOP_TRUNCATED_PAGE) {
-               page_cache_release(page);
-               goto retry_find;
-       }
-
-       /*
-        * Umm, take care of errors if the page isn't up-to-date.
-        * Try to re-read it _once_. We do this synchronously,
-        * because there really aren't any performance issues here
-        * and we need to check for errors.
-        */
-       lock_page(page);
-
-       /* Somebody truncated the page on us? */
-       if (!page->mapping) {
-               unlock_page(page);
-               goto err;
-       }
-       /* Somebody else successfully read it in? */
-       if (PageUptodate(page)) {
-               unlock_page(page);
-               goto success;
-       }
-
-       ClearPageError(page);
-       error = mapping->a_ops->readpage(file, page);
-       if (!error) {
-               wait_on_page_locked(page);
-               if (PageUptodate(page))
-                       goto success;
-       } else if (error == AOP_TRUNCATED_PAGE) {
-               page_cache_release(page);
-               goto retry_find;
-       }
-
-       /*
-        * Things didn't work out. Return zero to tell the
-        * mm layer so, possibly freeing the page cache page first.
-        */
-err:
-       page_cache_release(page);
-
-       return NULL;
-}
-
-int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
-               unsigned long len, pgprot_t prot, unsigned long pgoff,
-               int nonblock)
-{
-       struct file *file = vma->vm_file;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       unsigned long size;
-       struct mm_struct *mm = vma->vm_mm;
-       struct page *page;
-       int err;
-
-       if (!nonblock)
-               force_page_cache_readahead(mapping, vma->vm_file,
-                                       pgoff, len >> PAGE_CACHE_SHIFT);
-
-repeat:
-       size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
-               return -EINVAL;
-
-       page = filemap_getpage(file, pgoff, nonblock);
-
-       /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
-        * done in shmem_populate calling shmem_getpage */
-       if (!page && !nonblock)
-               return -ENOMEM;
-
-       if (page) {
-               err = install_page(mm, vma, addr, page, prot);
-               if (err) {
-                       page_cache_release(page);
-                       return err;
-               }
-       } else if (vma->vm_flags & VM_NONLINEAR) {
-               /* No page was found just because we can't read it in now (being
-                * here implies nonblock != 0), but the page may exist, so set
-                * the PTE to fault it in later. */
-               err = install_file_pte(mm, vma, addr, pgoff, prot);
-               if (err)
-                       return err;
-       }
-
-       len -= PAGE_SIZE;
-       addr += PAGE_SIZE;
-       pgoff++;
-       if (len)
-               goto repeat;
-
-       return 0;
-}
-EXPORT_SYMBOL(filemap_populate);
+EXPORT_SYMBOL(filemap_fault);
 
 struct vm_operations_struct generic_file_vm_ops = {
-       .nopage         = filemap_nopage,
-       .populate       = filemap_populate,
+       .fault          = filemap_fault,
 };
 
 /* This is used for a general mmap of a disk file */
@@ -1690,6 +1611,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
                return -ENOEXEC;
        file_accessed(file);
        vma->vm_ops = &generic_file_vm_ops;
+       vma->vm_flags |= VM_CAN_NONLINEAR;
        return 0;
 }
 
@@ -1717,48 +1639,52 @@ EXPORT_SYMBOL(generic_file_mmap);
 EXPORT_SYMBOL(generic_file_readonly_mmap);
 
 static struct page *__read_cache_page(struct address_space *mapping,
-                               unsigned long index,
+                               pgoff_t index,
                                int (*filler)(void *,struct page*),
                                void *data)
 {
-       struct page *page, *cached_page = NULL;
+       struct page *page;
        int err;
 repeat:
        page = find_get_page(mapping, index);
        if (!page) {
-               if (!cached_page) {
-                       cached_page = page_cache_alloc_cold(mapping);
-                       if (!cached_page)
-                               return ERR_PTR(-ENOMEM);
-               }
-               err = add_to_page_cache_lru(cached_page, mapping,
-                                       index, GFP_KERNEL);
-               if (err == -EEXIST)
-                       goto repeat;
-               if (err < 0) {
+               page = page_cache_alloc_cold(mapping);
+               if (!page)
+                       return ERR_PTR(-ENOMEM);
+               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               if (unlikely(err)) {
+                       page_cache_release(page);
+                       if (err == -EEXIST)
+                               goto repeat;
                        /* Presumably ENOMEM for radix tree node */
-                       page_cache_release(cached_page);
                        return ERR_PTR(err);
                }
-               page = cached_page;
-               cached_page = NULL;
                err = filler(data, page);
                if (err < 0) {
                        page_cache_release(page);
                        page = ERR_PTR(err);
                }
        }
-       if (cached_page)
-               page_cache_release(cached_page);
        return page;
 }
 
-/*
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping:   the page's address_space
+ * @index:     the page index
+ * @filler:    function to perform the read
+ * @data:      destination for read data
+ *
  * Same as read_cache_page, but don't wait for page to become unlocked
  * after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
  */
 struct page *read_cache_page_async(struct address_space *mapping,
-                               unsigned long index,
+                               pgoff_t index,
                                int (*filler)(void *,struct page*),
                                void *data)
 {
@@ -1806,7 +1732,7 @@ EXPORT_SYMBOL(read_cache_page_async);
  * If the page does not get brought uptodate, return -EIO.
  */
 struct page *read_cache_page(struct address_space *mapping,
-                               unsigned long index,
+                               pgoff_t index,
                                int (*filler)(void *,struct page*),
                                void *data)
 {
@@ -1825,40 +1751,6 @@ struct page *read_cache_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page);
 
-/*
- * If the page was newly created, increment its refcount and add it to the
- * caller's lru-buffering pagevec.  This function is specifically for
- * generic_file_write().
- */
-static inline struct page *
-__grab_cache_page(struct address_space *mapping, unsigned long index,
-                       struct page **cached_page, struct pagevec *lru_pvec)
-{
-       int err;
-       struct page *page;
-repeat:
-       page = find_lock_page(mapping, index);
-       if (!page) {
-               if (!*cached_page) {
-                       *cached_page = page_cache_alloc(mapping);
-                       if (!*cached_page)
-                               return NULL;
-               }
-               err = add_to_page_cache(*cached_page, mapping,
-                                       index, GFP_KERNEL);
-               if (err == -EEXIST)
-                       goto repeat;
-               if (err == 0) {
-                       page = *cached_page;
-                       page_cache_get(page);
-                       if (!pagevec_add(lru_pvec, page))
-                               __pagevec_lru_add(lru_pvec);
-                       *cached_page = NULL;
-               }
-       }
-       return page;
-}
-
 /*
  * The logic we want is
  *
@@ -1881,14 +1773,14 @@ int should_remove_suid(struct dentry *dentry)
        if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
                kill |= ATTR_KILL_SGID;
 
-       if (unlikely(kill && !capable(CAP_FSETID)))
+       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
                return kill;
 
        return 0;
 }
 EXPORT_SYMBOL(should_remove_suid);
 
-int __remove_suid(struct dentry *dentry, int kill)
+static int __remove_suid(struct dentry *dentry, int kill)
 {
        struct iattr newattrs;
 
@@ -1896,19 +1788,25 @@ int __remove_suid(struct dentry *dentry, int kill)
        return notify_change(dentry, &newattrs);
 }
 
-int remove_suid(struct dentry *dentry)
+int file_remove_suid(struct file *file)
 {
-       int kill = should_remove_suid(dentry);
+       struct dentry *dentry = file->f_path.dentry;
+       int killsuid = should_remove_suid(dentry);
+       int killpriv = security_inode_need_killpriv(dentry);
+       int error = 0;
 
-       if (unlikely(kill))
-               return __remove_suid(dentry, kill);
+       if (killpriv < 0)
+               return killpriv;
+       if (killpriv)
+               error = security_inode_killpriv(dentry);
+       if (!error && killsuid)
+               error = __remove_suid(dentry, killsuid);
 
-       return 0;
+       return error;
 }
-EXPORT_SYMBOL(remove_suid);
+EXPORT_SYMBOL(file_remove_suid);
 
-size_t
-__filemap_copy_from_user_iovec_inatomic(char *vaddr,
+static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
 {
        size_t copied = 0, left = 0;
@@ -1930,6 +1828,126 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr,
        return copied - left;
 }
 
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied.  If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page, KM_USER0);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = i->iov->iov_base + i->iov_offset;
+               left = __copy_from_user_inatomic_nocache(kaddr + offset,
+                                                       buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+                                               i->iov, i->iov_offset, bytes);
+       }
+       kunmap_atomic(kaddr, KM_USER0);
+
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * iov_iter_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+size_t iov_iter_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = i->iov->iov_base + i->iov_offset;
+               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+                                               i->iov, i->iov_offset, bytes);
+       }
+       kunmap(page);
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user);
+
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+       BUG_ON(i->count < bytes);
+
+       if (likely(i->nr_segs == 1)) {
+               i->iov_offset += bytes;
+               i->count -= bytes;
+       } else {
+               const struct iovec *iov = i->iov;
+               size_t base = i->iov_offset;
+
+               /*
+                * The !iov->iov_len check ensures we skip over unlikely
+                * zero-length segments (without overruning the iovec).
+                */
+               while (bytes || unlikely(i->count && !iov->iov_len)) {
+                       int copy;
+
+                       copy = min(bytes, iov->iov_len - base);
+                       BUG_ON(!i->count || i->count < copy);
+                       i->count -= copy;
+                       bytes -= copy;
+                       base += copy;
+                       if (iov->iov_len == base) {
+                               iov++;
+                               base = 0;
+                       }
+               }
+               i->iov = iov;
+               i->iov_offset = base;
+       }
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       char __user *buf = i->iov->iov_base + i->iov_offset;
+       bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+       return fault_in_pages_readable(buf, bytes);
+}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(struct iov_iter *i)
+{
+       const struct iovec *iov = i->iov;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, iov->iov_len - i->iov_offset);
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
+
 /*
  * Performs necessary checks before doing a write
  *
@@ -2012,6 +2030,28 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
 }
 EXPORT_SYMBOL(generic_write_checks);
 
+int pagecache_write_begin(struct file *file, struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned flags,
+                               struct page **pagep, void **fsdata)
+{
+       const struct address_space_operations *aops = mapping->a_ops;
+
+       return aops->write_begin(file, mapping, pos, len, flags,
+                                                       pagep, fsdata);
+}
+EXPORT_SYMBOL(pagecache_write_begin);
+
+int pagecache_write_end(struct file *file, struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned copied,
+                               struct page *page, void *fsdata)
+{
+       const struct address_space_operations *aops = mapping->a_ops;
+
+       mark_page_accessed(page);
+       return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
+}
+EXPORT_SYMBOL(pagecache_write_end);
+
 ssize_t
 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long *nr_segs, loff_t pos, loff_t *ppos,
@@ -2021,11 +2061,54 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        struct address_space *mapping = file->f_mapping;
        struct inode    *inode = mapping->host;
        ssize_t         written;
+       size_t          write_len;
+       pgoff_t         end;
 
        if (count != ocount)
                *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
 
-       written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+       write_len = iov_length(iov, *nr_segs);
+       end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
+
+       written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
+       if (written)
+               goto out;
+
+       /*
+        * After a write we want buffered reads to be sure to go to disk to get
+        * the new data.  We invalidate clean cached page from the region we're
+        * about to write.  We do this *before* the write so that we can return
+        * without clobbering -EIOCBQUEUED from ->direct_IO().
+        */
+       if (mapping->nrpages) {
+               written = invalidate_inode_pages2_range(mapping,
+                                       pos >> PAGE_CACHE_SHIFT, end);
+               /*
+                * If a page can not be invalidated, return 0 to fall back
+                * to buffered write.
+                */
+               if (written) {
+                       if (written == -EBUSY)
+                               return 0;
+                       goto out;
+               }
+       }
+
+       written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+
+       /*
+        * Finally, try again to invalidate clean pages which might have been
+        * cached by non-direct readahead, or faulted in by get_user_pages()
+        * if the source of the write was an mmap'ed region of the file
+        * we're writing.  Either one is a pretty crazy thing to do,
+        * so we don't support it 100%.  If this invalidation
+        * fails, tough, the write still worked...
+        */
+       if (mapping->nrpages) {
+               invalidate_inode_pages2_range(mapping,
+                                             pos >> PAGE_CACHE_SHIFT, end);
+       }
+
        if (written > 0) {
                loff_t end = pos + written;
                if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
@@ -2041,6 +2124,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
         * i_mutex is held, which protects generic_osync_inode() from
         * livelocking.  AIO O_DIRECT ops attempt to sync metadata here.
         */
+out:
        if ((written >= 0 || written == -EIOCBQUEUED) &&
            ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
                int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
@@ -2051,151 +2135,148 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_direct_write);
 
-ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, ssize_t written)
+/*
+ * Find or create a page at the given pagecache position. Return the locked
+ * page. This function is specifically for buffered writes.
+ */
+struct page *grab_cache_page_write_begin(struct address_space *mapping,
+                                       pgoff_t index, unsigned flags)
 {
-       struct file *file = iocb->ki_filp;
-       struct address_space * mapping = file->f_mapping;
-       const struct address_space_operations *a_ops = mapping->a_ops;
-       struct inode    *inode = mapping->host;
-       long            status = 0;
-       struct page     *page;
-       struct page     *cached_page = NULL;
-       size_t          bytes;
-       struct pagevec  lru_pvec;
-       const struct iovec *cur_iov = iov; /* current iovec */
-       size_t          iov_base = 0;      /* offset in the current iovec */
-       char __user     *buf;
+       int status;
+       struct page *page;
+       gfp_t gfp_notmask = 0;
+       if (flags & AOP_FLAG_NOFS)
+               gfp_notmask = __GFP_FS;
+repeat:
+       page = find_lock_page(mapping, index);
+       if (likely(page))
+               return page;
 
-       pagevec_init(&lru_pvec, 0);
+       page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
+       if (!page)
+               return NULL;
+       status = add_to_page_cache_lru(page, mapping, index,
+                                               GFP_KERNEL & ~gfp_notmask);
+       if (unlikely(status)) {
+               page_cache_release(page);
+               if (status == -EEXIST)
+                       goto repeat;
+               return NULL;
+       }
+       return page;
+}
+EXPORT_SYMBOL(grab_cache_page_write_begin);
+
+static ssize_t generic_perform_write(struct file *file,
+                               struct iov_iter *i, loff_t pos)
+{
+       struct address_space *mapping = file->f_mapping;
+       const struct address_space_operations *a_ops = mapping->a_ops;
+       long status = 0;
+       ssize_t written = 0;
+       unsigned int flags = 0;
 
        /*
-        * handle partial DIO write.  Adjust cur_iov if needed.
+        * Copies from kernel address space cannot fail (NFSD is a big user).
         */
-       if (likely(nr_segs == 1))
-               buf = iov->iov_base + written;
-       else {
-               filemap_set_next_iovec(&cur_iov, &iov_base, written);
-               buf = cur_iov->iov_base + iov_base;
-       }
+       if (segment_eq(get_fs(), KERNEL_DS))
+               flags |= AOP_FLAG_UNINTERRUPTIBLE;
 
        do {
-               unsigned long index;
-               unsigned long offset;
-               size_t copied;
+               struct page *page;
+               pgoff_t index;          /* Pagecache index for current page */
+               unsigned long offset;   /* Offset into pagecache page */
+               unsigned long bytes;    /* Bytes to write to page */
+               size_t copied;          /* Bytes copied from user */
+               void *fsdata;
 
-               offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
+               offset = (pos & (PAGE_CACHE_SIZE - 1));
                index = pos >> PAGE_CACHE_SHIFT;
-               bytes = PAGE_CACHE_SIZE - offset;
+               bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+                                               iov_iter_count(i));
 
-               /* Limit the size of the copy to the caller's write size */
-               bytes = min(bytes, count);
+again:
 
-               /* We only need to worry about prefaulting when writes are from
-                * user-space.  NFSd uses vfs_writev with several non-aligned
-                * segments in the vector, and limiting to one segment a time is
-                * a noticeable performance for re-write
+               /*
+                * Bring in the user page that we will copy from _first_.
+                * Otherwise there's a nasty deadlock on copying from the
+                * same page as we're writing to, without it being marked
+                * up-to-date.
+                *
+                * Not only is this an optimisation, but it is also required
+                * to check that the address is actually valid, when atomic
+                * usercopies are used, below.
                 */
-               if (!segment_eq(get_fs(), KERNEL_DS)) {
-                       /*
-                        * Limit the size of the copy to that of the current
-                        * segment, because fault_in_pages_readable() doesn't
-                        * know how to walk segments.
-                        */
-                       bytes = min(bytes, cur_iov->iov_len - iov_base);
-
-                       /*
-                        * Bring in the user page that we will copy from
-                        * _first_.  Otherwise there's a nasty deadlock on
-                        * copying from the same page as we're writing to,
-                        * without it being marked up-to-date.
-                        */
-                       fault_in_pages_readable(buf, bytes);
-               }
-               page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
-               if (!page) {
-                       status = -ENOMEM;
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+                       status = -EFAULT;
                        break;
                }
 
-               if (unlikely(bytes == 0)) {
-                       status = 0;
-                       copied = 0;
-                       goto zero_length_segment;
-               }
+               status = a_ops->write_begin(file, mapping, pos, bytes, flags,
+                                               &page, &fsdata);
+               if (unlikely(status))
+                       break;
+
+               pagefault_disable();
+               copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+               pagefault_enable();
+               flush_dcache_page(page);
 
-               status = a_ops->prepare_write(file, page, offset, offset+bytes);
-               if (unlikely(status)) {
-                       loff_t isize = i_size_read(inode);
+               status = a_ops->write_end(file, mapping, pos, bytes, copied,
+                                               page, fsdata);
+               if (unlikely(status < 0))
+                       break;
+               copied = status;
 
-                       if (status != AOP_TRUNCATED_PAGE)
-                               unlock_page(page);
-                       page_cache_release(page);
-                       if (status == AOP_TRUNCATED_PAGE)
-                               continue;
+               cond_resched();
+
+               iov_iter_advance(i, copied);
+               if (unlikely(copied == 0)) {
                        /*
-                        * prepare_write() may have instantiated a few blocks
-                        * outside i_size.  Trim these off again.
+                        * If we were unable to copy any data at all, we must
+                        * fall back to a single segment length write.
+                        *
+                        * If we didn't fallback here, we could livelock
+                        * because not all segments in the iov can be copied at
+                        * once without a pagefault.
                         */
-                       if (pos + bytes > isize)
-                               vmtruncate(inode, isize);
-                       break;
+                       bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
+                                               iov_iter_single_seg_count(i));
+                       goto again;
                }
-               if (likely(nr_segs == 1))
-                       copied = filemap_copy_from_user(page, offset,
-                                                       buf, bytes);
-               else
-                       copied = filemap_copy_from_user_iovec(page, offset,
-                                               cur_iov, iov_base, bytes);
-               flush_dcache_page(page);
-               status = a_ops->commit_write(file, page, offset, offset+bytes);
-               if (status == AOP_TRUNCATED_PAGE) {
-                       page_cache_release(page);
-                       continue;
-               }
-zero_length_segment:
-               if (likely(copied >= 0)) {
-                       if (!status)
-                               status = copied;
-
-                       if (status >= 0) {
-                               written += status;
-                               count -= status;
-                               pos += status;
-                               buf += status;
-                               if (unlikely(nr_segs > 1)) {
-                                       filemap_set_next_iovec(&cur_iov,
-                                                       &iov_base, status);
-                                       if (count)
-                                               buf = cur_iov->iov_base +
-                                                       iov_base;
-                               } else {
-                                       iov_base += status;
-                               }
-                       }
-               }
-               if (unlikely(copied != bytes))
-                       if (status >= 0)
-                               status = -EFAULT;
-               unlock_page(page);
-               mark_page_accessed(page);
-               page_cache_release(page);
-               if (status < 0)
-                       break;
+               pos += copied;
+               written += copied;
+
                balance_dirty_pages_ratelimited(mapping);
-               cond_resched();
-       } while (count);
-       *ppos = pos;
 
-       if (cached_page)
-               page_cache_release(cached_page);
+       } while (iov_iter_count(i));
+
+       return written ? written : status;
+}
+
+ssize_t
+generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, ssize_t written)
+{
+       struct file *file = iocb->ki_filp;
+       struct address_space *mapping = file->f_mapping;
+       const struct address_space_operations *a_ops = mapping->a_ops;
+       struct inode *inode = mapping->host;
+       ssize_t status;
+       struct iov_iter i;
+
+       iov_iter_init(&i, iov, nr_segs, count, written);
+       status = generic_perform_write(file, &i, pos);
 
-       /*
-        * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
-        */
        if (likely(status >= 0)) {
+               written += status;
+               *ppos = pos + status;
+
+               /*
+                * For now, when the user asks for O_SYNC, we'll actually give
+                * O_DSYNC
+                */
                if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
                        if (!a_ops->writepage || !is_sync_kiocb(iocb))
                                status = generic_osync_inode(inode, mapping,
@@ -2209,9 +2290,9 @@ zero_length_segment:
         * the file data here, to try to honour O_DIRECT expectations.
         */
        if (unlikely(file->f_flags & O_DIRECT) && written)
-               status = filemap_write_and_wait(mapping);
+               status = filemap_write_and_wait_range(mapping,
+                                       pos, pos + written - 1);
 
-       pagevec_lru_add(&lru_pvec);
        return written ? written : status;
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
@@ -2250,7 +2331,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
        if (count == 0)
                goto out;
 
-       err = remove_suid(file->f_path.dentry);
+       err = file_remove_suid(file);
        if (err)
                goto out;
 
@@ -2366,70 +2447,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_aio_write);
 
-/*
- * Called under i_mutex for writes to S_ISREG files.   Returns -EIO if something
- * went wrong during pagecache shootdown.
- */
-static ssize_t
-generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-       loff_t offset, unsigned long nr_segs)
-{
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       ssize_t retval;
-       size_t write_len;
-       pgoff_t end = 0; /* silence gcc */
-
-       /*
-        * If it's a write, unmap all mmappings of the file up-front.  This
-        * will cause any pte dirty bits to be propagated into the pageframes
-        * for the subsequent filemap_write_and_wait().
-        */
-       if (rw == WRITE) {
-               write_len = iov_length(iov, nr_segs);
-               end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
-               if (mapping_mapped(mapping))
-                       unmap_mapping_range(mapping, offset, write_len, 0);
-       }
-
-       retval = filemap_write_and_wait(mapping);
-       if (retval)
-               goto out;
-
-       /*
-        * After a write we want buffered reads to be sure to go to disk to get
-        * the new data.  We invalidate clean cached page from the region we're
-        * about to write.  We do this *before* the write so that we can return
-        * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
-        */
-       if (rw == WRITE && mapping->nrpages) {
-               retval = invalidate_inode_pages2_range(mapping,
-                                       offset >> PAGE_CACHE_SHIFT, end);
-               if (retval)
-                       goto out;
-       }
-
-       retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
-       if (retval)
-               goto out;
-
-       /*
-        * Finally, try again to invalidate clean pages which might have been
-        * faulted in by get_user_pages() if the source of the write was an
-        * mmap()ed region of the file we're writing.  That's a pretty crazy
-        * thing to do, so we don't support it 100%.  If this invalidation
-        * fails and we have -EIOCBQUEUED we ignore the failure.
-        */
-       if (rw == WRITE && mapping->nrpages) {
-               int err = invalidate_inode_pages2_range(mapping,
-                                             offset >> PAGE_CACHE_SHIFT, end);
-               if (err && retval >= 0)
-                       retval = err;
-       }
-out:
-       return retval;
-}
-
 /**
  * try_to_release_page() - release old fs-specific metadata on a page
  *
@@ -2441,9 +2458,8 @@ out:
  * Otherwise return zero.
  *
  * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
+ * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
  *
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
  */
 int try_to_release_page(struct page *page, gfp_t gfp_mask)
 {