config: tegra3: enable /dev mount with ACL
[linux-2.6.git] / mm / shmem.c
index d176e48..fba53ca 100644 (file)
@@ -6,7 +6,8 @@
  *              2000-2001 Christoph Rohland
  *              2000-2001 SAP AG
  *              2002 Red Hat Inc.
- * Copyright (C) 2002-2005 Hugh Dickins.
+ * Copyright (C) 2002-2011 Hugh Dickins.
+ * Copyright (C) 2011 Google Inc.
  * Copyright (C) 2002-2005 VERITAS Software Corporation.
  * Copyright (C) 2004 Andi Kleen, SuSE Labs
  *
@@ -28,7 +29,6 @@
 #include <linux/file.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/percpu_counter.h>
 #include <linux/swap.h>
 
 static struct vfsmount *shm_mnt;
@@ -51,6 +51,8 @@ static struct vfsmount *shm_mnt;
 #include <linux/shmem_fs.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
+#include <linux/pagevec.h>
+#include <linux/percpu_counter.h>
 #include <linux/splice.h>
 #include <linux/security.h>
 #include <linux/swapops.h>
@@ -63,43 +65,17 @@ static struct vfsmount *shm_mnt;
 #include <linux/magic.h>
 
 #include <asm/uaccess.h>
-#include <asm/div64.h>
 #include <asm/pgtable.h>
 
-/*
- * The maximum size of a shmem/tmpfs file is limited by the maximum size of
- * its triple-indirect swap vector - see illustration at shmem_swp_entry().
- *
- * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
- * but one eighth of that on a 64-bit kernel.  With 8kB page size, maximum
- * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
- * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
- *
- * We use / and * instead of shifts in the definitions below, so that the swap
- * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
- */
-#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
-#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
-
-#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
-#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
-
-#define SHMEM_MAX_BYTES  min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
-#define SHMEM_MAX_INDEX  ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
-
 #define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
 #define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
 
-/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
-#define SHMEM_PAGEIN    VM_READ
-#define SHMEM_TRUNCATE  VM_WRITE
-
-/* Definition to limit shmem_truncate's steps between cond_rescheds */
-#define LATENCY_LIMIT   64
-
 /* Pretend that each entry is of this size in directory's i_size */
 #define BOGO_DIRENT_SIZE 20
 
+/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
+#define SHORT_SYMLINK_LEN 128
+
 struct shmem_xattr {
        struct list_head list;  /* anchored by shmem_inode_info->xattr_list */
        char *name;             /* xattr name */
@@ -107,7 +83,7 @@ struct shmem_xattr {
        char value[0];
 };
 
-/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
+/* Flag allocation requirements to shmem_getpage */
 enum sgp_type {
        SGP_READ,       /* don't exceed i_size, don't allocate page */
        SGP_CACHE,      /* don't exceed i_size, may allocate page */
@@ -127,57 +103,14 @@ static unsigned long shmem_default_max_inodes(void)
 }
 #endif
 
-static int shmem_getpage(struct inode *inode, unsigned long idx,
-                        struct page **pagep, enum sgp_type sgp, int *type);
-
-static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
-{
-       /*
-        * The above definition of ENTRIES_PER_PAGE, and the use of
-        * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
-        * might be reconsidered if it ever diverges from PAGE_SIZE.
-        *
-        * Mobility flags are masked out as swap vectors cannot move
-        */
-       return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
-                               PAGE_CACHE_SHIFT-PAGE_SHIFT);
-}
-
-static inline void shmem_dir_free(struct page *page)
-{
-       __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
-}
-
-static struct page **shmem_dir_map(struct page *page)
-{
-       return (struct page **)kmap_atomic(page, KM_USER0);
-}
-
-static inline void shmem_dir_unmap(struct page **dir)
-{
-       kunmap_atomic(dir, KM_USER0);
-}
-
-static swp_entry_t *shmem_swp_map(struct page *page)
-{
-       return (swp_entry_t *)kmap_atomic(page, KM_USER1);
-}
-
-static inline void shmem_swp_balance_unmap(void)
-{
-       /*
-        * When passing a pointer to an i_direct entry, to code which
-        * also handles indirect entries and so will shmem_swp_unmap,
-        * we must arrange for the preempt count to remain in balance.
-        * What kmap_atomic of a lowmem page does depends on config
-        * and architecture, so pretend to kmap_atomic some lowmem page.
-        */
-       (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
-}
+static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
+       struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
 
-static inline void shmem_swp_unmap(swp_entry_t *entry)
+static inline int shmem_getpage(struct inode *inode, pgoff_t index,
+       struct page **pagep, enum sgp_type sgp, int *fault_type)
 {
-       kunmap_atomic(entry, KM_USER1);
+       return shmem_getpage_gfp(inode, index, pagep, sgp,
+                       mapping_gfp_mask(inode->i_mapping), fault_type);
 }
 
 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
@@ -237,15 +170,6 @@ static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
 static LIST_HEAD(shmem_swaplist);
 static DEFINE_MUTEX(shmem_swaplist_mutex);
 
-static void shmem_free_blocks(struct inode *inode, long pages)
-{
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
-       if (sbinfo->max_blocks) {
-               percpu_counter_add(&sbinfo->used_blocks, -pages);
-               inode->i_blocks -= pages*BLOCKS_PER_PAGE;
-       }
-}
-
 static int shmem_reserve_inode(struct super_block *sb)
 {
        struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
@@ -272,7 +196,7 @@ static void shmem_free_inode(struct super_block *sb)
 }
 
 /**
- * shmem_recalc_inode - recalculate the size of an inode
+ * shmem_recalc_inode - recalculate the block usage of an inode
  * @inode: inode to recalc
  *
  * We have to calculate the free blocks since the mm can drop
@@ -290,472 +214,297 @@ static void shmem_recalc_inode(struct inode *inode)
 
        freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
        if (freed > 0) {
+               struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+               if (sbinfo->max_blocks)
+                       percpu_counter_add(&sbinfo->used_blocks, -freed);
                info->alloced -= freed;
+               inode->i_blocks -= freed * BLOCKS_PER_PAGE;
                shmem_unacct_blocks(info->flags, freed);
-               shmem_free_blocks(inode, freed);
        }
 }
 
-/**
- * shmem_swp_entry - find the swap vector position in the info structure
- * @info:  info structure for the inode
- * @index: index of the page to find
- * @page:  optional page to add to the structure. Has to be preset to
- *         all zeros
- *
- * If there is no space allocated yet it will return NULL when
- * page is NULL, else it will use the page for the needed block,
- * setting it to NULL on return to indicate that it has been used.
- *
- * The swap vector is organized the following way:
- *
- * There are SHMEM_NR_DIRECT entries directly stored in the
- * shmem_inode_info structure. So small files do not need an addional
- * allocation.
- *
- * For pages with index > SHMEM_NR_DIRECT there is the pointer
- * i_indirect which points to a page which holds in the first half
- * doubly indirect blocks, in the second half triple indirect blocks:
- *
- * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
- * following layout (for SHMEM_NR_DIRECT == 16):
- *
- * i_indirect -> dir --> 16-19
- *           |      +-> 20-23
- *           |
- *           +-->dir2 --> 24-27
- *           |        +-> 28-31
- *           |        +-> 32-35
- *           |        +-> 36-39
- *           |
- *           +-->dir3 --> 40-43
- *                    +-> 44-47
- *                    +-> 48-51
- *                    +-> 52-55
+/*
+ * Replace item expected in radix tree by a new item, while holding tree lock.
  */
-static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
-{
-       unsigned long offset;
-       struct page **dir;
-       struct page *subdir;
+static int shmem_radix_tree_replace(struct address_space *mapping,
+                       pgoff_t index, void *expected, void *replacement)
+{
+       void **pslot;
+       void *item = NULL;
+
+       VM_BUG_ON(!expected);
+       pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
+       if (pslot)
+               item = radix_tree_deref_slot_protected(pslot,
+                                                       &mapping->tree_lock);
+       if (item != expected)
+               return -ENOENT;
+       if (replacement)
+               radix_tree_replace_slot(pslot, replacement);
+       else
+               radix_tree_delete(&mapping->page_tree, index);
+       return 0;
+}
 
-       if (index < SHMEM_NR_DIRECT) {
-               shmem_swp_balance_unmap();
-               return info->i_direct+index;
-       }
-       if (!info->i_indirect) {
-               if (page) {
-                       info->i_indirect = *page;
-                       *page = NULL;
-               }
-               return NULL;                    /* need another page */
-       }
+/*
+ * Like add_to_page_cache_locked, but error if expected item has gone.
+ */
+static int shmem_add_to_page_cache(struct page *page,
+                                  struct address_space *mapping,
+                                  pgoff_t index, gfp_t gfp, void *expected)
+{
+       int error = 0;
 
-       index -= SHMEM_NR_DIRECT;
-       offset = index % ENTRIES_PER_PAGE;
-       index /= ENTRIES_PER_PAGE;
-       dir = shmem_dir_map(info->i_indirect);
-
-       if (index >= ENTRIES_PER_PAGE/2) {
-               index -= ENTRIES_PER_PAGE/2;
-               dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
-               index %= ENTRIES_PER_PAGE;
-               subdir = *dir;
-               if (!subdir) {
-                       if (page) {
-                               *dir = *page;
-                               *page = NULL;
-                       }
-                       shmem_dir_unmap(dir);
-                       return NULL;            /* need another page */
-               }
-               shmem_dir_unmap(dir);
-               dir = shmem_dir_map(subdir);
-       }
+       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON(!PageSwapBacked(page));
 
-       dir += index;
-       subdir = *dir;
-       if (!subdir) {
-               if (!page || !(subdir = *page)) {
-                       shmem_dir_unmap(dir);
-                       return NULL;            /* need a page */
+       if (!expected)
+               error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+       if (!error) {
+               page_cache_get(page);
+               page->mapping = mapping;
+               page->index = index;
+
+               spin_lock_irq(&mapping->tree_lock);
+               if (!expected)
+                       error = radix_tree_insert(&mapping->page_tree,
+                                                       index, page);
+               else
+                       error = shmem_radix_tree_replace(mapping, index,
+                                                       expected, page);
+               if (!error) {
+                       mapping->nrpages++;
+                       __inc_zone_page_state(page, NR_FILE_PAGES);
+                       __inc_zone_page_state(page, NR_SHMEM);
+                       spin_unlock_irq(&mapping->tree_lock);
+               } else {
+                       page->mapping = NULL;
+                       spin_unlock_irq(&mapping->tree_lock);
+                       page_cache_release(page);
                }
-               *dir = subdir;
-               *page = NULL;
+               if (!expected)
+                       radix_tree_preload_end();
        }
-       shmem_dir_unmap(dir);
-       return shmem_swp_map(subdir) + offset;
+       if (error)
+               mem_cgroup_uncharge_cache_page(page);
+       return error;
 }
 
-static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
+/*
+ * Like delete_from_page_cache, but substitutes swap for page.
+ */
+static void shmem_delete_from_page_cache(struct page *page, void *radswap)
 {
-       long incdec = value? 1: -1;
+       struct address_space *mapping = page->mapping;
+       int error;
 
-       entry->val = value;
-       info->swapped += incdec;
-       if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
-               struct page *page = kmap_atomic_to_page(entry);
-               set_page_private(page, page_private(page) + incdec);
-       }
+       spin_lock_irq(&mapping->tree_lock);
+       error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
+       page->mapping = NULL;
+       mapping->nrpages--;
+       __dec_zone_page_state(page, NR_FILE_PAGES);
+       __dec_zone_page_state(page, NR_SHMEM);
+       spin_unlock_irq(&mapping->tree_lock);
+       page_cache_release(page);
+       BUG_ON(error);
 }
 
-/**
- * shmem_swp_alloc - get the position of the swap entry for the page.
- * @info:      info structure for the inode
- * @index:     index of the page to find
- * @sgp:       check and recheck i_size? skip allocation?
- *
- * If the entry does not exist, allocate it.
+/*
+ * Like find_get_pages, but collecting swap entries as well as pages.
  */
-static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
-{
-       struct inode *inode = &info->vfs_inode;
-       struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
-       struct page *page = NULL;
-       swp_entry_t *entry;
-
-       if (sgp != SGP_WRITE &&
-           ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
-               return ERR_PTR(-EINVAL);
-
-       while (!(entry = shmem_swp_entry(info, index, &page))) {
-               if (sgp == SGP_READ)
-                       return shmem_swp_map(ZERO_PAGE(0));
-               /*
-                * Test used_blocks against 1 less max_blocks, since we have 1 data
-                * page (and perhaps indirect index pages) yet to allocate:
-                * a waste to allocate index if we cannot allocate data.
-                */
-               if (sbinfo->max_blocks) {
-                       if (percpu_counter_compare(&sbinfo->used_blocks,
-                                               sbinfo->max_blocks - 1) >= 0)
-                               return ERR_PTR(-ENOSPC);
-                       percpu_counter_inc(&sbinfo->used_blocks);
-                       inode->i_blocks += BLOCKS_PER_PAGE;
+static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
+                                       pgoff_t start, unsigned int nr_pages,
+                                       struct page **pages, pgoff_t *indices)
+{
+       unsigned int i;
+       unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+                               (void ***)pages, indices, start, nr_pages);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               if (radix_tree_exception(page)) {
+                       if (radix_tree_deref_retry(page))
+                               goto restart;
+                       /*
+                        * Otherwise, we must be storing a swap entry
+                        * here as an exceptional entry: so return it
+                        * without attempting to raise page count.
+                        */
+                       goto export;
                }
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
 
-               spin_unlock(&info->lock);
-               page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
-               spin_lock(&info->lock);
-
-               if (!page) {
-                       shmem_free_blocks(inode, 1);
-                       return ERR_PTR(-ENOMEM);
-               }
-               if (sgp != SGP_WRITE &&
-                   ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
-                       entry = ERR_PTR(-EINVAL);
-                       break;
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
                }
-               if (info->next_index <= index)
-                       info->next_index = index + 1;
-       }
-       if (page) {
-               /* another task gave its page, or truncated the file */
-               shmem_free_blocks(inode, 1);
-               shmem_dir_free(page);
-       }
-       if (info->next_index <= index && !IS_ERR(entry))
-               info->next_index = index + 1;
-       return entry;
+export:
+               indices[ret] = indices[i];
+               pages[ret] = page;
+               ret++;
+       }
+       if (unlikely(!ret && nr_found))
+               goto restart;
+       rcu_read_unlock();
+       return ret;
 }
 
-/**
- * shmem_free_swp - free some swap entries in a directory
- * @dir:        pointer to the directory
- * @edir:       pointer after last entry of the directory
- * @punch_lock: pointer to spinlock when needed for the holepunch case
+/*
+ * Remove swap entry from radix tree, free the swap and its page cache.
  */
-static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
-                                               spinlock_t *punch_lock)
-{
-       spinlock_t *punch_unlock = NULL;
-       swp_entry_t *ptr;
-       int freed = 0;
-
-       for (ptr = dir; ptr < edir; ptr++) {
-               if (ptr->val) {
-                       if (unlikely(punch_lock)) {
-                               punch_unlock = punch_lock;
-                               punch_lock = NULL;
-                               spin_lock(punch_unlock);
-                               if (!ptr->val)
-                                       continue;
-                       }
-                       free_swap_and_cache(*ptr);
-                       *ptr = (swp_entry_t){0};
-                       freed++;
-               }
-       }
-       if (punch_unlock)
-               spin_unlock(punch_unlock);
-       return freed;
-}
-
-static int shmem_map_and_free_swp(struct page *subdir, int offset,
-               int limit, struct page ***dir, spinlock_t *punch_lock)
-{
-       swp_entry_t *ptr;
-       int freed = 0;
-
-       ptr = shmem_swp_map(subdir);
-       for (; offset < limit; offset += LATENCY_LIMIT) {
-               int size = limit - offset;
-               if (size > LATENCY_LIMIT)
-                       size = LATENCY_LIMIT;
-               freed += shmem_free_swp(ptr+offset, ptr+offset+size,
-                                                       punch_lock);
-               if (need_resched()) {
-                       shmem_swp_unmap(ptr);
-                       if (*dir) {
-                               shmem_dir_unmap(*dir);
-                               *dir = NULL;
-                       }
-                       cond_resched();
-                       ptr = shmem_swp_map(subdir);
-               }
-       }
-       shmem_swp_unmap(ptr);
-       return freed;
+static int shmem_free_swap(struct address_space *mapping,
+                          pgoff_t index, void *radswap)
+{
+       int error;
+
+       spin_lock_irq(&mapping->tree_lock);
+       error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
+       spin_unlock_irq(&mapping->tree_lock);
+       if (!error)
+               free_swap_and_cache(radix_to_swp_entry(radswap));
+       return error;
 }
 
-static void shmem_free_pages(struct list_head *next)
+/*
+ * Pagevec may contain swap entries, so shuffle up pages before releasing.
+ */
+static void shmem_pagevec_release(struct pagevec *pvec)
 {
-       struct page *page;
-       int freed = 0;
-
-       do {
-               page = container_of(next, struct page, lru);
-               next = next->next;
-               shmem_dir_free(page);
-               freed++;
-               if (freed >= LATENCY_LIMIT) {
-                       cond_resched();
-                       freed = 0;
-               }
-       } while (next);
+       int i, j;
+
+       for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
+               struct page *page = pvec->pages[i];
+               if (!radix_tree_exceptional_entry(page))
+                       pvec->pages[j++] = page;
+       }
+       pvec->nr = j;
+       pagevec_release(pvec);
 }
 
-void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+/*
+ * Remove range of pages and swap entries from radix tree, and free them.
+ */
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
+       struct address_space *mapping = inode->i_mapping;
        struct shmem_inode_info *info = SHMEM_I(inode);
-       unsigned long idx;
-       unsigned long size;
-       unsigned long limit;
-       unsigned long stage;
-       unsigned long diroff;
-       struct page **dir;
-       struct page *topdir;
-       struct page *middir;
-       struct page *subdir;
-       swp_entry_t *ptr;
-       LIST_HEAD(pages_to_free);
-       long nr_pages_to_free = 0;
+       pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
+       pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
+       struct pagevec pvec;
+       pgoff_t indices[PAGEVEC_SIZE];
        long nr_swaps_freed = 0;
-       int offset;
-       int freed;
-       int punch_hole;
-       spinlock_t *needs_lock;
-       spinlock_t *punch_lock;
-       unsigned long upper_limit;
+       pgoff_t index;
+       int i;
 
-       truncate_inode_pages_range(inode->i_mapping, start, end);
+       BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
 
-       inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-       idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (idx >= info->next_index)
-               return;
+       pagevec_init(&pvec, 0);
+       index = start;
+       while (index <= end) {
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                                                       pvec.pages, indices);
+               if (!pvec.nr)
+                       break;
+               mem_cgroup_uncharge_start();
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
 
-       spin_lock(&info->lock);
-       info->flags |= SHMEM_TRUNCATE;
-       if (likely(end == (loff_t) -1)) {
-               limit = info->next_index;
-               upper_limit = SHMEM_MAX_INDEX;
-               info->next_index = idx;
-               needs_lock = NULL;
-               punch_hole = 0;
-       } else {
-               if (end + 1 >= inode->i_size) { /* we may free a little more */
-                       limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
-                                                       PAGE_CACHE_SHIFT;
-                       upper_limit = SHMEM_MAX_INDEX;
-               } else {
-                       limit = (end + 1) >> PAGE_CACHE_SHIFT;
-                       upper_limit = limit;
-               }
-               needs_lock = &info->lock;
-               punch_hole = 1;
-       }
+                       index = indices[i];
+                       if (index > end)
+                               break;
+
+                       if (radix_tree_exceptional_entry(page)) {
+                               nr_swaps_freed += !shmem_free_swap(mapping,
+                                                               index, page);
+                               continue;
+                       }
 
-       topdir = info->i_indirect;
-       if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
-               info->i_indirect = NULL;
-               nr_pages_to_free++;
-               list_add(&topdir->lru, &pages_to_free);
+                       if (!trylock_page(page))
+                               continue;
+                       if (page->mapping == mapping) {
+                               VM_BUG_ON(PageWriteback(page));
+                               truncate_inode_page(mapping, page);
+                       }
+                       unlock_page(page);
+               }
+               shmem_pagevec_release(&pvec);
+               mem_cgroup_uncharge_end();
+               cond_resched();
+               index++;
        }
-       spin_unlock(&info->lock);
 
-       if (info->swapped && idx < SHMEM_NR_DIRECT) {
-               ptr = info->i_direct;
-               size = limit;
-               if (size > SHMEM_NR_DIRECT)
-                       size = SHMEM_NR_DIRECT;
-               nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
+       if (partial) {
+               struct page *page = NULL;
+               shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
+               if (page) {
+                       zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+                       set_page_dirty(page);
+                       unlock_page(page);
+                       page_cache_release(page);
+               }
        }
 
-       /*
-        * If there are no indirect blocks or we are punching a hole
-        * below indirect blocks, nothing to be done.
-        */
-       if (!topdir || limit <= SHMEM_NR_DIRECT)
-               goto done2;
+       index = start;
+       for ( ; ; ) {
+               cond_resched();
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
+                                                       pvec.pages, indices);
+               if (!pvec.nr) {
+                       if (index == start)
+                               break;
+                       index = start;
+                       continue;
+               }
+               if (index == start && indices[0] > end) {
+                       shmem_pagevec_release(&pvec);
+                       break;
+               }
+               mem_cgroup_uncharge_start();
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
 
-       /*
-        * The truncation case has already dropped info->lock, and we're safe
-        * because i_size and next_index have already been lowered, preventing
-        * access beyond.  But in the punch_hole case, we still need to take
-        * the lock when updating the swap directory, because there might be
-        * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
-        * shmem_writepage.  However, whenever we find we can remove a whole
-        * directory page (not at the misaligned start or end of the range),
-        * we first NULLify its pointer in the level above, and then have no
-        * need to take the lock when updating its contents: needs_lock and
-        * punch_lock (either pointing to info->lock or NULL) manage this.
-        */
+                       index = indices[i];
+                       if (index > end)
+                               break;
 
-       upper_limit -= SHMEM_NR_DIRECT;
-       limit -= SHMEM_NR_DIRECT;
-       idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
-       offset = idx % ENTRIES_PER_PAGE;
-       idx -= offset;
-
-       dir = shmem_dir_map(topdir);
-       stage = ENTRIES_PER_PAGEPAGE/2;
-       if (idx < ENTRIES_PER_PAGEPAGE/2) {
-               middir = topdir;
-               diroff = idx/ENTRIES_PER_PAGE;
-       } else {
-               dir += ENTRIES_PER_PAGE/2;
-               dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
-               while (stage <= idx)
-                       stage += ENTRIES_PER_PAGEPAGE;
-               middir = *dir;
-               if (*dir) {
-                       diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
-                               ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
-                       if (!diroff && !offset && upper_limit >= stage) {
-                               if (needs_lock) {
-                                       spin_lock(needs_lock);
-                                       *dir = NULL;
-                                       spin_unlock(needs_lock);
-                                       needs_lock = NULL;
-                               } else
-                                       *dir = NULL;
-                               nr_pages_to_free++;
-                               list_add(&middir->lru, &pages_to_free);
+                       if (radix_tree_exceptional_entry(page)) {
+                               nr_swaps_freed += !shmem_free_swap(mapping,
+                                                               index, page);
+                               continue;
                        }
-                       shmem_dir_unmap(dir);
-                       dir = shmem_dir_map(middir);
-               } else {
-                       diroff = 0;
-                       offset = 0;
-                       idx = stage;
-               }
-       }
 
-       for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
-               if (unlikely(idx == stage)) {
-                       shmem_dir_unmap(dir);
-                       dir = shmem_dir_map(topdir) +
-                           ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
-                       while (!*dir) {
-                               dir++;
-                               idx += ENTRIES_PER_PAGEPAGE;
-                               if (idx >= limit)
-                                       goto done1;
-                       }
-                       stage = idx + ENTRIES_PER_PAGEPAGE;
-                       middir = *dir;
-                       if (punch_hole)
-                               needs_lock = &info->lock;
-                       if (upper_limit >= stage) {
-                               if (needs_lock) {
-                                       spin_lock(needs_lock);
-                                       *dir = NULL;
-                                       spin_unlock(needs_lock);
-                                       needs_lock = NULL;
-                               } else
-                                       *dir = NULL;
-                               nr_pages_to_free++;
-                               list_add(&middir->lru, &pages_to_free);
+                       lock_page(page);
+                       if (page->mapping == mapping) {
+                               VM_BUG_ON(PageWriteback(page));
+                               truncate_inode_page(mapping, page);
                        }
-                       shmem_dir_unmap(dir);
-                       cond_resched();
-                       dir = shmem_dir_map(middir);
-                       diroff = 0;
-               }
-               punch_lock = needs_lock;
-               subdir = dir[diroff];
-               if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
-                       if (needs_lock) {
-                               spin_lock(needs_lock);
-                               dir[diroff] = NULL;
-                               spin_unlock(needs_lock);
-                               punch_lock = NULL;
-                       } else
-                               dir[diroff] = NULL;
-                       nr_pages_to_free++;
-                       list_add(&subdir->lru, &pages_to_free);
-               }
-               if (subdir && page_private(subdir) /* has swap entries */) {
-                       size = limit - idx;
-                       if (size > ENTRIES_PER_PAGE)
-                               size = ENTRIES_PER_PAGE;
-                       freed = shmem_map_and_free_swp(subdir,
-                                       offset, size, &dir, punch_lock);
-                       if (!dir)
-                               dir = shmem_dir_map(middir);
-                       nr_swaps_freed += freed;
-                       if (offset || punch_lock) {
-                               spin_lock(&info->lock);
-                               set_page_private(subdir,
-                                       page_private(subdir) - freed);
-                               spin_unlock(&info->lock);
-                       } else
-                               BUG_ON(page_private(subdir) != freed);
+                       unlock_page(page);
                }
-               offset = 0;
-       }
-done1:
-       shmem_dir_unmap(dir);
-done2:
-       if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
-               /*
-                * Call truncate_inode_pages again: racing shmem_unuse_inode
-                * may have swizzled a page in from swap since
-                * truncate_pagecache or generic_delete_inode did it, before we
-                * lowered next_index.  Also, though shmem_getpage checks
-                * i_size before adding to cache, no recheck after: so fix the
-                * narrow window there too.
-                */
-               truncate_inode_pages_range(inode->i_mapping, start, end);
+               shmem_pagevec_release(&pvec);
+               mem_cgroup_uncharge_end();
+               index++;
        }
 
        spin_lock(&info->lock);
-       info->flags &= ~SHMEM_TRUNCATE;
        info->swapped -= nr_swaps_freed;
-       if (nr_pages_to_free)
-               shmem_free_blocks(inode, nr_pages_to_free);
        shmem_recalc_inode(inode);
        spin_unlock(&info->lock);
 
-       /*
-        * Empty swap vector directory pages to be freed?
-        */
-       if (!list_empty(&pages_to_free)) {
-               pages_to_free.prev->next = NULL;
-               shmem_free_pages(pages_to_free.next);
-       }
+       inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 }
 EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
@@ -771,37 +520,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
        if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
                loff_t oldsize = inode->i_size;
                loff_t newsize = attr->ia_size;
-               struct page *page = NULL;
 
-               if (newsize < oldsize) {
-                       /*
-                        * If truncating down to a partial page, then
-                        * if that page is already allocated, hold it
-                        * in memory until the truncation is over, so
-                        * truncate_partial_page cannot miss it were
-                        * it assigned to swap.
-                        */
-                       if (newsize & (PAGE_CACHE_SIZE-1)) {
-                               (void) shmem_getpage(inode,
-                                       newsize >> PAGE_CACHE_SHIFT,
-                                               &page, SGP_READ, NULL);
-                               if (page)
-                                       unlock_page(page);
-                       }
-                       /*
-                        * Reset SHMEM_PAGEIN flag so that shmem_truncate can
-                        * detect if any pages might have been added to cache
-                        * after truncate_inode_pages.  But we needn't bother
-                        * if it's being fully truncated to zero-length: the
-                        * nrpages check is efficient enough in that case.
-                        */
-                       if (newsize) {
-                               struct shmem_inode_info *info = SHMEM_I(inode);
-                               spin_lock(&info->lock);
-                               info->flags &= ~SHMEM_PAGEIN;
-                               spin_unlock(&info->lock);
-                       }
-               }
                if (newsize != oldsize) {
                        i_size_write(inode, newsize);
                        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
@@ -813,8 +532,6 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
                        /* unmap again to remove racily COWed private pages */
                        unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
                }
-               if (page)
-                       page_cache_release(page);
        }
 
        setattr_copy(inode, attr);
@@ -839,7 +556,8 @@ static void shmem_evict_inode(struct inode *inode)
                        list_del_init(&info->swaplist);
                        mutex_unlock(&shmem_swaplist_mutex);
                }
-       }
+       } else
+               kfree(info->symlink);
 
        list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
                kfree(xattr->name);
@@ -850,106 +568,27 @@ static void shmem_evict_inode(struct inode *inode)
        end_writeback(inode);
 }
 
-static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
-{
-       swp_entry_t *ptr;
-
-       for (ptr = dir; ptr < edir; ptr++) {
-               if (ptr->val == entry.val)
-                       return ptr - dir;
-       }
-       return -1;
-}
-
-static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
+/*
+ * If swap found in inode, free it and move page from swapcache to filecache.
+ */
+static int shmem_unuse_inode(struct shmem_inode_info *info,
+                            swp_entry_t swap, struct page *page)
 {
-       struct address_space *mapping;
-       unsigned long idx;
-       unsigned long size;
-       unsigned long limit;
-       unsigned long stage;
-       struct page **dir;
-       struct page *subdir;
-       swp_entry_t *ptr;
-       int offset;
+       struct address_space *mapping = info->vfs_inode.i_mapping;
+       void *radswap;
+       pgoff_t index;
        int error;
 
-       idx = 0;
-       ptr = info->i_direct;
-       spin_lock(&info->lock);
-       if (!info->swapped) {
-               list_del_init(&info->swaplist);
-               goto lost2;
-       }
-       limit = info->next_index;
-       size = limit;
-       if (size > SHMEM_NR_DIRECT)
-               size = SHMEM_NR_DIRECT;
-       offset = shmem_find_swp(entry, ptr, ptr+size);
-       if (offset >= 0) {
-               shmem_swp_balance_unmap();
-               goto found;
-       }
-       if (!info->i_indirect)
-               goto lost2;
-
-       dir = shmem_dir_map(info->i_indirect);
-       stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
-
-       for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
-               if (unlikely(idx == stage)) {
-                       shmem_dir_unmap(dir-1);
-                       if (cond_resched_lock(&info->lock)) {
-                               /* check it has not been truncated */
-                               if (limit > info->next_index) {
-                                       limit = info->next_index;
-                                       if (idx >= limit)
-                                               goto lost2;
-                               }
-                       }
-                       dir = shmem_dir_map(info->i_indirect) +
-                           ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
-                       while (!*dir) {
-                               dir++;
-                               idx += ENTRIES_PER_PAGEPAGE;
-                               if (idx >= limit)
-                                       goto lost1;
-                       }
-                       stage = idx + ENTRIES_PER_PAGEPAGE;
-                       subdir = *dir;
-                       shmem_dir_unmap(dir);
-                       dir = shmem_dir_map(subdir);
-               }
-               subdir = *dir;
-               if (subdir && page_private(subdir)) {
-                       ptr = shmem_swp_map(subdir);
-                       size = limit - idx;
-                       if (size > ENTRIES_PER_PAGE)
-                               size = ENTRIES_PER_PAGE;
-                       offset = shmem_find_swp(entry, ptr, ptr+size);
-                       shmem_swp_unmap(ptr);
-                       if (offset >= 0) {
-                               shmem_dir_unmap(dir);
-                               ptr = shmem_swp_map(subdir);
-                               goto found;
-                       }
-               }
-       }
-lost1:
-       shmem_dir_unmap(dir-1);
-lost2:
-       spin_unlock(&info->lock);
-       return 0;
-found:
-       idx += offset;
-       ptr += offset;
+       radswap = swp_to_radix_entry(swap);
+       index = radix_tree_locate_item(&mapping->page_tree, radswap);
+       if (index == -1)
+               return 0;
 
        /*
         * Move _head_ to start search for next from here.
         * But be careful: shmem_evict_inode checks list_empty without taking
         * mutex, and there's an instant in list_move_tail when info->swaplist
-        * would appear empty, if it were the only one on shmem_swaplist.  We
-        * could avoid doing it if inode NULL; or use this minor optimization.
+        * would appear empty, if it were the only one on shmem_swaplist.
         */
        if (shmem_swaplist.next != &info->swaplist)
                list_move_tail(&shmem_swaplist, &info->swaplist);
@@ -959,42 +598,34 @@ found:
         * but also to hold up shmem_evict_inode(): so inode cannot be freed
         * beneath us (pagelock doesn't help until the page is in pagecache).
         */
-       mapping = info->vfs_inode.i_mapping;
-       error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
+       error = shmem_add_to_page_cache(page, mapping, index,
+                                               GFP_NOWAIT, radswap);
        /* which does mem_cgroup_uncharge_cache_page on error */
 
-       if (error == -EEXIST) {
-               struct page *filepage = find_get_page(mapping, idx);
-               error = 1;
-               if (filepage) {
-                       /*
-                        * There might be a more uptodate page coming down
-                        * from a stacked writepage: forget our swappage if so.
-                        */
-                       if (PageUptodate(filepage))
-                               error = 0;
-                       page_cache_release(filepage);
-               }
-       }
-       if (!error) {
+       if (error != -ENOMEM) {
+               /*
+                * Truncation and eviction use free_swap_and_cache(), which
+                * only does trylock page: if we raced, best clean up here.
+                */
                delete_from_swap_cache(page);
                set_page_dirty(page);
-               info->flags |= SHMEM_PAGEIN;
-               shmem_swp_set(info, ptr, 0);
-               swap_free(entry);
+               if (!error) {
+                       spin_lock(&info->lock);
+                       info->swapped--;
+                       spin_unlock(&info->lock);
+                       swap_free(swap);
+               }
                error = 1;      /* not an error, but entry was found */
        }
-       shmem_swp_unmap(ptr);
-       spin_unlock(&info->lock);
        return error;
 }
 
 /*
- * shmem_unuse() search for an eventually swapped out shmem page.
+ * Search through swapped inodes to find and replace swap by page.
  */
-int shmem_unuse(swp_entry_t entry, struct page *page)
+int shmem_unuse(swp_entry_t swap, struct page *page)
 {
-       struct list_head *p, *next;
+       struct list_head *this, *next;
        struct shmem_inode_info *info;
        int found = 0;
        int error;
@@ -1003,32 +634,25 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
         * Charge page using GFP_KERNEL while we can wait, before taking
         * the shmem_swaplist_mutex which might hold up shmem_writepage().
         * Charged back to the user (not to caller) when swap account is used.
-        * add_to_page_cache() will be called with GFP_NOWAIT.
         */
        error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
        if (error)
                goto out;
-       /*
-        * Try to preload while we can wait, to not make a habit of
-        * draining atomic reserves; but don't latch on to this cpu,
-        * it's okay if sometimes we get rescheduled after this.
-        */
-       error = radix_tree_preload(GFP_KERNEL);
-       if (error)
-               goto uncharge;
-       radix_tree_preload_end();
+       /* No radix_tree_preload: swap entry keeps a place for page in tree */
 
        mutex_lock(&shmem_swaplist_mutex);
-       list_for_each_safe(p, next, &shmem_swaplist) {
-               info = list_entry(p, struct shmem_inode_info, swaplist);
-               found = shmem_unuse_inode(info, entry, page);
+       list_for_each_safe(this, next, &shmem_swaplist) {
+               info = list_entry(this, struct shmem_inode_info, swaplist);
+               if (info->swapped)
+                       found = shmem_unuse_inode(info, swap, page);
+               else
+                       list_del_init(&info->swaplist);
                cond_resched();
                if (found)
                        break;
        }
        mutex_unlock(&shmem_swaplist_mutex);
 
-uncharge:
        if (!found)
                mem_cgroup_uncharge_cache_page(page);
        if (found < 0)
@@ -1045,10 +669,10 @@ out:
 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
 {
        struct shmem_inode_info *info;
-       swp_entry_t *entry, swap;
        struct address_space *mapping;
-       unsigned long index;
        struct inode *inode;
+       swp_entry_t swap;
+       pgoff_t index;
 
        BUG_ON(!PageLocked(page));
        mapping = page->mapping;
@@ -1063,69 +687,46 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        /*
         * shmem_backing_dev_info's capabilities prevent regular writeback or
         * sync from ever calling shmem_writepage; but a stacking filesystem
-        * may use the ->writepage of its underlying filesystem, in which case
+        * might use ->writepage of its underlying filesystem, in which case
         * tmpfs should write out to swap only in response to memory pressure,
-        * and not for the writeback threads or sync.  However, in those cases,
-        * we do still want to check if there's a redundant swappage to be
-        * discarded.
+        * and not for the writeback threads or sync.
         */
-       if (wbc->for_reclaim)
-               swap = get_swap_page();
-       else
-               swap.val = 0;
+       if (!wbc->for_reclaim) {
+               WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
+               goto redirty;
+       }
+       swap = get_swap_page();
+       if (!swap.val)
+               goto redirty;
 
        /*
         * Add inode to shmem_unuse()'s list of swapped-out inodes,
-        * if it's not already there.  Do it now because we cannot take
-        * mutex while holding spinlock, and must do so before the page
-        * is moved to swap cache, when its pagelock no longer protects
+        * if it's not already there.  Do it now before the page is
+        * moved to swap cache, when its pagelock no longer protects
         * the inode from eviction.  But don't unlock the mutex until
-        * we've taken the spinlock, because shmem_unuse_inode() will
-        * prune a !swapped inode from the swaplist under both locks.
+        * we've incremented swapped, because shmem_unuse_inode() will
+        * prune a !swapped inode from the swaplist under this mutex.
         */
-       if (swap.val) {
-               mutex_lock(&shmem_swaplist_mutex);
-               if (list_empty(&info->swaplist))
-                       list_add_tail(&info->swaplist, &shmem_swaplist);
-       }
-
-       spin_lock(&info->lock);
-       if (swap.val)
-               mutex_unlock(&shmem_swaplist_mutex);
-
-       if (index >= info->next_index) {
-               BUG_ON(!(info->flags & SHMEM_TRUNCATE));
-               goto unlock;
-       }
-       entry = shmem_swp_entry(info, index, NULL);
-       if (entry->val) {
-               /*
-                * The more uptodate page coming down from a stacked
-                * writepage should replace our old swappage.
-                */
-               free_swap_and_cache(*entry);
-               shmem_swp_set(info, entry, 0);
-       }
-       shmem_recalc_inode(inode);
+       mutex_lock(&shmem_swaplist_mutex);
+       if (list_empty(&info->swaplist))
+               list_add_tail(&info->swaplist, &shmem_swaplist);
 
-       if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
-               delete_from_page_cache(page);
-               shmem_swp_set(info, entry, swap.val);
-               shmem_swp_unmap(entry);
+       if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
                swap_shmem_alloc(swap);
+               shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
+
+               spin_lock(&info->lock);
+               info->swapped++;
+               shmem_recalc_inode(inode);
                spin_unlock(&info->lock);
+
+               mutex_unlock(&shmem_swaplist_mutex);
                BUG_ON(page_mapped(page));
                swap_writepage(page, wbc);
                return 0;
        }
 
-       shmem_swp_unmap(entry);
-unlock:
-       spin_unlock(&info->lock);
-       /*
-        * add_to_swap_cache() doesn't return -EEXIST, so we can safely
-        * clear SWAP_HAS_CACHE flag.
-        */
+       mutex_unlock(&shmem_swaplist_mutex);
        swapcache_free(swap, NULL);
 redirty:
        set_page_dirty(page);
@@ -1162,35 +763,33 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
 }
 #endif /* CONFIG_TMPFS */
 
-static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
-                       struct shmem_inode_info *info, unsigned long idx)
+static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
+                       struct shmem_inode_info *info, pgoff_t index)
 {
        struct mempolicy mpol, *spol;
        struct vm_area_struct pvma;
-       struct page *page;
 
        spol = mpol_cond_copy(&mpol,
-                               mpol_shared_policy_lookup(&info->policy, idx));
+                       mpol_shared_policy_lookup(&info->policy, index));
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
-       pvma.vm_pgoff = idx;
+       pvma.vm_pgoff = index;
        pvma.vm_ops = NULL;
        pvma.vm_policy = spol;
-       page = swapin_readahead(entry, gfp, &pvma, 0);
-       return page;
+       return swapin_readahead(swap, gfp, &pvma, 0);
 }
 
 static struct page *shmem_alloc_page(gfp_t gfp,
-                       struct shmem_inode_info *info, unsigned long idx)
+                       struct shmem_inode_info *info, pgoff_t index)
 {
        struct vm_area_struct pvma;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
-       pvma.vm_pgoff = idx;
+       pvma.vm_pgoff = index;
        pvma.vm_ops = NULL;
-       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
 
        /*
         * alloc_page_vma() will drop the shared policy reference
@@ -1199,19 +798,19 @@ static struct page *shmem_alloc_page(gfp_t gfp,
 }
 #else /* !CONFIG_NUMA */
 #ifdef CONFIG_TMPFS
-static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
 {
 }
 #endif /* CONFIG_TMPFS */
 
-static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
-                       struct shmem_inode_info *info, unsigned long idx)
+static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
+                       struct shmem_inode_info *info, pgoff_t index)
 {
-       return swapin_readahead(entry, gfp, NULL, 0);
+       return swapin_readahead(swap, gfp, NULL, 0);
 }
 
 static inline struct page *shmem_alloc_page(gfp_t gfp,
-                       struct shmem_inode_info *info, unsigned long idx)
+                       struct shmem_inode_info *info, pgoff_t index)
 {
        return alloc_page(gfp);
 }
@@ -1225,313 +824,195 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
 #endif
 
 /*
- * shmem_getpage - either get the page from swap or allocate a new one
+ * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
  *
  * If we allocate a new one we do not mark it dirty. That's up to the
  * vm. If we swap it in we mark it dirty since we also free the swap
  * entry since a page cannot live in both the swap and page cache
  */
-static int shmem_getpage(struct inode *inode, unsigned long idx,
-                       struct page **pagep, enum sgp_type sgp, int *type)
+static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
+       struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
 {
        struct address_space *mapping = inode->i_mapping;
-       struct shmem_inode_info *info = SHMEM_I(inode);
+       struct shmem_inode_info *info;
        struct shmem_sb_info *sbinfo;
-       struct page *filepage = *pagep;
-       struct page *swappage;
-       struct page *prealloc_page = NULL;
-       swp_entry_t *entry;
+       struct page *page;
        swp_entry_t swap;
-       gfp_t gfp;
        int error;
+       int once = 0;
 
-       if (idx >= SHMEM_MAX_INDEX)
+       if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
                return -EFBIG;
+repeat:
+       swap.val = 0;
+       page = find_lock_page(mapping, index);
+       if (radix_tree_exceptional_entry(page)) {
+               swap = radix_to_swp_entry(page);
+               page = NULL;
+       }
 
-       if (type)
-               *type = 0;
+       if (sgp != SGP_WRITE &&
+           ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+               error = -EINVAL;
+               goto failed;
+       }
 
-       /*
-        * Normally, filepage is NULL on entry, and either found
-        * uptodate immediately, or allocated and zeroed, or read
-        * in under swappage, which is then assigned to filepage.
-        * But shmem_readpage (required for splice) passes in a locked
-        * filepage, which may be found not uptodate by other callers
-        * too, and may need to be copied from the swappage read in.
-        */
-repeat:
-       if (!filepage)
-               filepage = find_lock_page(mapping, idx);
-       if (filepage && PageUptodate(filepage))
-               goto done;
-       gfp = mapping_gfp_mask(mapping);
-       if (!filepage) {
+       if (page || (sgp == SGP_READ && !swap.val)) {
                /*
-                * Try to preload while we can wait, to not make a habit of
-                * draining atomic reserves; but don't latch on to this cpu.
+                * Once we can get the page lock, it must be uptodate:
+                * if there were an error in reading back from swap,
+                * the page would not be inserted into the filecache.
                 */
-               error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
-               if (error)
-                       goto failed;
-               radix_tree_preload_end();
-               if (sgp != SGP_READ && !prealloc_page) {
-                       /* We don't care if this fails */
-                       prealloc_page = shmem_alloc_page(gfp, info, idx);
-                       if (prealloc_page) {
-                               if (mem_cgroup_cache_charge(prealloc_page,
-                                               current->mm, GFP_KERNEL)) {
-                                       page_cache_release(prealloc_page);
-                                       prealloc_page = NULL;
-                               }
-                       }
-               }
+               BUG_ON(page && !PageUptodate(page));
+               *pagep = page;
+               return 0;
        }
-       error = 0;
 
-       spin_lock(&info->lock);
-       shmem_recalc_inode(inode);
-       entry = shmem_swp_alloc(info, idx, sgp);
-       if (IS_ERR(entry)) {
-               spin_unlock(&info->lock);
-               error = PTR_ERR(entry);
-               goto failed;
-       }
-       swap = *entry;
+       /*
+        * Fast cache lookup did not find it:
+        * bring it back from swap or allocate.
+        */
+       info = SHMEM_I(inode);
+       sbinfo = SHMEM_SB(inode->i_sb);
 
        if (swap.val) {
                /* Look it up and read it in.. */
-               swappage = lookup_swap_cache(swap);
-               if (!swappage) {
-                       shmem_swp_unmap(entry);
-                       spin_unlock(&info->lock);
+               page = lookup_swap_cache(swap);
+               if (!page) {
                        /* here we actually do the io */
-                       if (type)
-                               *type |= VM_FAULT_MAJOR;
-                       swappage = shmem_swapin(swap, gfp, info, idx);
-                       if (!swappage) {
-                               spin_lock(&info->lock);
-                               entry = shmem_swp_alloc(info, idx, sgp);
-                               if (IS_ERR(entry))
-                                       error = PTR_ERR(entry);
-                               else {
-                                       if (entry->val == swap.val)
-                                               error = -ENOMEM;
-                                       shmem_swp_unmap(entry);
-                               }
-                               spin_unlock(&info->lock);
-                               if (error)
-                                       goto failed;
-                               goto repeat;
+                       if (fault_type)
+                               *fault_type |= VM_FAULT_MAJOR;
+                       page = shmem_swapin(swap, gfp, info, index);
+                       if (!page) {
+                               error = -ENOMEM;
+                               goto failed;
                        }
-                       wait_on_page_locked(swappage);
-                       page_cache_release(swappage);
-                       goto repeat;
                }
 
                /* We have to do this with page locked to prevent races */
-               if (!trylock_page(swappage)) {
-                       shmem_swp_unmap(entry);
-                       spin_unlock(&info->lock);
-                       wait_on_page_locked(swappage);
-                       page_cache_release(swappage);
-                       goto repeat;
-               }
-               if (PageWriteback(swappage)) {
-                       shmem_swp_unmap(entry);
-                       spin_unlock(&info->lock);
-                       wait_on_page_writeback(swappage);
-                       unlock_page(swappage);
-                       page_cache_release(swappage);
-                       goto repeat;
-               }
-               if (!PageUptodate(swappage)) {
-                       shmem_swp_unmap(entry);
-                       spin_unlock(&info->lock);
-                       unlock_page(swappage);
-                       page_cache_release(swappage);
+               lock_page(page);
+               if (!PageUptodate(page)) {
                        error = -EIO;
                        goto failed;
                }
-
-               if (filepage) {
-                       shmem_swp_set(info, entry, 0);
-                       shmem_swp_unmap(entry);
-                       delete_from_swap_cache(swappage);
-                       spin_unlock(&info->lock);
-                       copy_highpage(filepage, swappage);
-                       unlock_page(swappage);
-                       page_cache_release(swappage);
-                       flush_dcache_page(filepage);
-                       SetPageUptodate(filepage);
-                       set_page_dirty(filepage);
-                       swap_free(swap);
-               } else if (!(error = add_to_page_cache_locked(swappage, mapping,
-                                       idx, GFP_NOWAIT))) {
-                       info->flags |= SHMEM_PAGEIN;
-                       shmem_swp_set(info, entry, 0);
-                       shmem_swp_unmap(entry);
-                       delete_from_swap_cache(swappage);
-                       spin_unlock(&info->lock);
-                       filepage = swappage;
-                       set_page_dirty(filepage);
-                       swap_free(swap);
-               } else {
-                       shmem_swp_unmap(entry);
-                       spin_unlock(&info->lock);
-                       if (error == -ENOMEM) {
-                               /*
-                                * reclaim from proper memory cgroup and
-                                * call memcg's OOM if needed.
-                                */
-                               error = mem_cgroup_shmem_charge_fallback(
-                                                               swappage,
-                                                               current->mm,
-                                                               gfp);
-                               if (error) {
-                                       unlock_page(swappage);
-                                       page_cache_release(swappage);
-                                       goto failed;
-                               }
-                       }
-                       unlock_page(swappage);
-                       page_cache_release(swappage);
-                       goto repeat;
-               }
-       } else if (sgp == SGP_READ && !filepage) {
-               shmem_swp_unmap(entry);
-               filepage = find_get_page(mapping, idx);
-               if (filepage &&
-                   (!PageUptodate(filepage) || !trylock_page(filepage))) {
-                       spin_unlock(&info->lock);
-                       wait_on_page_locked(filepage);
-                       page_cache_release(filepage);
-                       filepage = NULL;
-                       goto repeat;
+               wait_on_page_writeback(page);
+
+               /* Someone may have already done it for us */
+               if (page->mapping) {
+                       if (page->mapping == mapping &&
+                           page->index == index)
+                               goto done;
+                       error = -EEXIST;
+                       goto failed;
                }
+
+               error = mem_cgroup_cache_charge(page, current->mm,
+                                               gfp & GFP_RECLAIM_MASK);
+               if (!error)
+                       error = shmem_add_to_page_cache(page, mapping, index,
+                                               gfp, swp_to_radix_entry(swap));
+               if (error)
+                       goto failed;
+
+               spin_lock(&info->lock);
+               info->swapped--;
+               shmem_recalc_inode(inode);
                spin_unlock(&info->lock);
+
+               delete_from_swap_cache(page);
+               set_page_dirty(page);
+               swap_free(swap);
+
        } else {
-               shmem_swp_unmap(entry);
-               sbinfo = SHMEM_SB(inode->i_sb);
+               if (shmem_acct_block(info->flags)) {
+                       error = -ENOSPC;
+                       goto failed;
+               }
                if (sbinfo->max_blocks) {
                        if (percpu_counter_compare(&sbinfo->used_blocks,
-                                               sbinfo->max_blocks) >= 0 ||
-                           shmem_acct_block(info->flags))
-                               goto nospace;
-                       percpu_counter_inc(&sbinfo->used_blocks);
-                       inode->i_blocks += BLOCKS_PER_PAGE;
-               } else if (shmem_acct_block(info->flags))
-                       goto nospace;
-
-               if (!filepage) {
-                       int ret;
-
-                       if (!prealloc_page) {
-                               spin_unlock(&info->lock);
-                               filepage = shmem_alloc_page(gfp, info, idx);
-                               if (!filepage) {
-                                       spin_lock(&info->lock);
-                                       shmem_unacct_blocks(info->flags, 1);
-                                       shmem_free_blocks(inode, 1);
-                                       spin_unlock(&info->lock);
-                                       error = -ENOMEM;
-                                       goto failed;
-                               }
-                               SetPageSwapBacked(filepage);
-
-                               /*
-                                * Precharge page while we can wait, compensate
-                                * after
-                                */
-                               error = mem_cgroup_cache_charge(filepage,
-                                       current->mm, GFP_KERNEL);
-                               if (error) {
-                                       page_cache_release(filepage);
-                                       spin_lock(&info->lock);
-                                       shmem_unacct_blocks(info->flags, 1);
-                                       shmem_free_blocks(inode, 1);
-                                       spin_unlock(&info->lock);
-                                       filepage = NULL;
-                                       goto failed;
-                               }
-
-                               spin_lock(&info->lock);
-                       } else {
-                               filepage = prealloc_page;
-                               prealloc_page = NULL;
-                               SetPageSwapBacked(filepage);
+                                               sbinfo->max_blocks) >= 0) {
+                               error = -ENOSPC;
+                               goto unacct;
                        }
+                       percpu_counter_inc(&sbinfo->used_blocks);
+               }
 
-                       entry = shmem_swp_alloc(info, idx, sgp);
-                       if (IS_ERR(entry))
-                               error = PTR_ERR(entry);
-                       else {
-                               swap = *entry;
-                               shmem_swp_unmap(entry);
-                       }
-                       ret = error || swap.val;
-                       if (ret)
-                               mem_cgroup_uncharge_cache_page(filepage);
-                       else
-                               ret = add_to_page_cache_lru(filepage, mapping,
-                                               idx, GFP_NOWAIT);
-                       /*
-                        * At add_to_page_cache_lru() failure, uncharge will
-                        * be done automatically.
-                        */
-                       if (ret) {
-                               shmem_unacct_blocks(info->flags, 1);
-                               shmem_free_blocks(inode, 1);
-                               spin_unlock(&info->lock);
-                               page_cache_release(filepage);
-                               filepage = NULL;
-                               if (error)
-                                       goto failed;
-                               goto repeat;
-                       }
-                       info->flags |= SHMEM_PAGEIN;
+               page = shmem_alloc_page(gfp, info, index);
+               if (!page) {
+                       error = -ENOMEM;
+                       goto decused;
                }
 
+               SetPageSwapBacked(page);
+               __set_page_locked(page);
+               error = mem_cgroup_cache_charge(page, current->mm,
+                                               gfp & GFP_RECLAIM_MASK);
+               if (!error)
+                       error = shmem_add_to_page_cache(page, mapping, index,
+                                               gfp, NULL);
+               if (error)
+                       goto decused;
+               lru_cache_add_anon(page);
+
+               spin_lock(&info->lock);
                info->alloced++;
+               inode->i_blocks += BLOCKS_PER_PAGE;
+               shmem_recalc_inode(inode);
                spin_unlock(&info->lock);
-               clear_highpage(filepage);
-               flush_dcache_page(filepage);
-               SetPageUptodate(filepage);
+
+               clear_highpage(page);
+               flush_dcache_page(page);
+               SetPageUptodate(page);
                if (sgp == SGP_DIRTY)
-                       set_page_dirty(filepage);
+                       set_page_dirty(page);
        }
 done:
-       *pagep = filepage;
-       error = 0;
-       goto out;
+       /* Perhaps the file has been truncated since we checked */
+       if (sgp != SGP_WRITE &&
+           ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
+               error = -EINVAL;
+               goto trunc;
+       }
+       *pagep = page;
+       return 0;
 
-nospace:
        /*
-        * Perhaps the page was brought in from swap between find_lock_page
-        * and taking info->lock?  We allow for that at add_to_page_cache_lru,
-        * but must also avoid reporting a spurious ENOSPC while working on a
-        * full tmpfs.  (When filepage has been passed in to shmem_getpage, it
-        * is already in page cache, which prevents this race from occurring.)
+        * Error recovery.
         */
-       if (!filepage) {
-               struct page *page = find_get_page(mapping, idx);
-               if (page) {
-                       spin_unlock(&info->lock);
-                       page_cache_release(page);
-                       goto repeat;
-               }
-       }
+trunc:
+       ClearPageDirty(page);
+       delete_from_page_cache(page);
+       spin_lock(&info->lock);
+       info->alloced--;
+       inode->i_blocks -= BLOCKS_PER_PAGE;
        spin_unlock(&info->lock);
-       error = -ENOSPC;
+decused:
+       if (sbinfo->max_blocks)
+               percpu_counter_add(&sbinfo->used_blocks, -1);
+unacct:
+       shmem_unacct_blocks(info->flags, 1);
 failed:
-       if (*pagep != filepage) {
-               unlock_page(filepage);
-               page_cache_release(filepage);
+       if (swap.val && error != -EINVAL) {
+               struct page *test = find_get_page(mapping, index);
+               if (test && !radix_tree_exceptional_entry(test))
+                       page_cache_release(test);
+               /* Have another try if the entry has changed */
+               if (test != swp_to_radix_entry(swap))
+                       error = -EEXIST;
        }
-out:
-       if (prealloc_page) {
-               mem_cgroup_uncharge_cache_page(prealloc_page);
-               page_cache_release(prealloc_page);
+       if (page) {
+               unlock_page(page);
+               page_cache_release(page);
        }
+       if (error == -ENOSPC && !once++) {
+               info = SHMEM_I(inode);
+               spin_lock(&info->lock);
+               shmem_recalc_inode(inode);
+               spin_unlock(&info->lock);
+               goto repeat;
+       }
+       if (error == -EEXIST)
+               goto repeat;
        return error;
 }
 
@@ -1539,36 +1020,34 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        int error;
-       int ret;
-
-       if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
-               return VM_FAULT_SIGBUS;
+       int ret = VM_FAULT_LOCKED;
 
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+
        if (ret & VM_FAULT_MAJOR) {
                count_vm_event(PGMAJFAULT);
                mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
        }
-       return ret | VM_FAULT_LOCKED;
+       return ret;
 }
 
 #ifdef CONFIG_NUMA
-static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
+static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
 {
-       struct inode *i = vma->vm_file->f_path.dentry->d_inode;
-       return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
 }
 
 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
                                          unsigned long addr)
 {
-       struct inode *i = vma->vm_file->f_path.dentry->d_inode;
-       unsigned long idx;
+       struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+       pgoff_t index;
 
-       idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-       return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
+       index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+       return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
 }
 #endif
 
@@ -1666,20 +1145,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 
 #ifdef CONFIG_TMPFS
 static const struct inode_operations shmem_symlink_inode_operations;
-static const struct inode_operations shmem_symlink_inline_operations;
-
-/*
- * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
- * but providing them allows a tmpfs file to be used for splice, sendfile, and
- * below the loop driver, in the generic fashion that many filesystems support.
- */
-static int shmem_readpage(struct file *file, struct page *page)
-{
-       struct inode *inode = page->mapping->host;
-       int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
-       unlock_page(page);
-       return error;
-}
+static const struct inode_operations shmem_short_symlink_operations;
 
 static int
 shmem_write_begin(struct file *file, struct address_space *mapping,
@@ -1688,7 +1154,6 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       *pagep = NULL;
        return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
 }
 
@@ -1713,7 +1178,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
 {
        struct inode *inode = filp->f_path.dentry->d_inode;
        struct address_space *mapping = inode->i_mapping;
-       unsigned long index, offset;
+       pgoff_t index;
+       unsigned long offset;
        enum sgp_type sgp = SGP_READ;
 
        /*
@@ -1729,7 +1195,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
 
        for (;;) {
                struct page *page = NULL;
-               unsigned long end_index, nr, ret;
+               pgoff_t end_index;
+               unsigned long nr, ret;
                loff_t i_size = i_size_read(inode);
 
                end_index = i_size >> PAGE_CACHE_SHIFT;
@@ -1850,6 +1317,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
                                unsigned int flags)
 {
        struct address_space *mapping = in->f_mapping;
+       struct inode *inode = mapping->host;
        unsigned int loff, nr_pages, req_pages;
        struct page *pages[PIPE_DEF_BUFFERS];
        struct partial_page partial[PIPE_DEF_BUFFERS];
@@ -1865,7 +1333,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
                .spd_release = spd_release_page,
        };
 
-       isize = i_size_read(in->f_mapping->host);
+       isize = i_size_read(inode);
        if (unlikely(*ppos >= isize))
                return 0;
 
@@ -1881,153 +1349,55 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        nr_pages = min(req_pages, pipe->buffers);
 
-       /*
-        * Lookup the (hopefully) full range of pages we need.
-        */
        spd.nr_pages = find_get_pages_contig(mapping, index,
                                                nr_pages, spd.pages);
        index += spd.nr_pages;
-
-       /*
-        * If find_get_pages_contig() returned fewer pages than we needed,
-        * readahead/allocate the rest and fill in the holes.
-        */
-       if (spd.nr_pages < nr_pages)
-               page_cache_sync_readahead(mapping, &in->f_ra, in,
-                               index, req_pages - spd.nr_pages);
-
        error = 0;
-       while (spd.nr_pages < nr_pages) {
-               /*
-                * Page could be there, find_get_pages_contig() breaks on
-                * the first hole.
-                */
-               page = find_get_page(mapping, index);
-               if (!page) {
-                       /*
-                        * page didn't exist, allocate one.
-                        */
-                       page = page_cache_alloc_cold(mapping);
-                       if (!page)
-                               break;
-
-                       error = add_to_page_cache_lru(page, mapping, index,
-                                               GFP_KERNEL);
-                       if (unlikely(error)) {
-                               page_cache_release(page);
-                               if (error == -EEXIST)
-                                       continue;
-                               break;
-                       }
-                       /*
-                        * add_to_page_cache() locks the page, unlock it
-                        * to avoid convoluting the logic below even more.
-                        */
-                       unlock_page(page);
-               }
 
+       while (spd.nr_pages < nr_pages) {
+               error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
+               if (error)
+                       break;
+               unlock_page(page);
                spd.pages[spd.nr_pages++] = page;
                index++;
        }
 
-       /*
-        * Now loop over the map and see if we need to start IO on any
-        * pages, fill in the partial map, etc.
-        */
        index = *ppos >> PAGE_CACHE_SHIFT;
        nr_pages = spd.nr_pages;
        spd.nr_pages = 0;
+
        for (page_nr = 0; page_nr < nr_pages; page_nr++) {
                unsigned int this_len;
 
                if (!len)
                        break;
 
-               /*
-                * this_len is the max we'll use from this page
-                */
                this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
                page = spd.pages[page_nr];
 
-               if (PageReadahead(page))
-                       page_cache_async_readahead(mapping, &in->f_ra, in,
-                                       page, index, req_pages - page_nr);
-
-               /*
-                * If the page isn't uptodate, we may need to start io on it
-                */
-               if (!PageUptodate(page)) {
-                       lock_page(page);
-
-                       /*
-                        * Page was truncated, or invalidated by the
-                        * filesystem.  Redo the find/create, but this time the
-                        * page is kept locked, so there's no chance of another
-                        * race with truncate/invalidate.
-                        */
-                       if (!page->mapping) {
-                               unlock_page(page);
-                               page = find_or_create_page(mapping, index,
-                                               mapping_gfp_mask(mapping));
-
-                               if (!page) {
-                                       error = -ENOMEM;
-                                       break;
-                               }
-                               page_cache_release(spd.pages[page_nr]);
-                               spd.pages[page_nr] = page;
-                       }
-                       /*
-                        * page was already under io and is now done, great
-                        */
-                       if (PageUptodate(page)) {
-                               unlock_page(page);
-                               goto fill_it;
-                       }
-
-                       /*
-                        * need to read in the page
-                        */
-                       error = mapping->a_ops->readpage(in, page);
-                       if (unlikely(error)) {
-                               /*
-                                * We really should re-lookup the page here,
-                                * but it complicates things a lot. Instead
-                                * lets just do what we already stored, and
-                                * we'll get it the next time we are called.
-                                */
-                               if (error == AOP_TRUNCATED_PAGE)
-                                       error = 0;
-
+               if (!PageUptodate(page) || page->mapping != mapping) {
+                       error = shmem_getpage(inode, index, &page,
+                                                       SGP_CACHE, NULL);
+                       if (error)
                                break;
-                       }
+                       unlock_page(page);
+                       page_cache_release(spd.pages[page_nr]);
+                       spd.pages[page_nr] = page;
                }
-fill_it:
-               /*
-                * i_size must be checked after PageUptodate.
-                */
-               isize = i_size_read(mapping->host);
+
+               isize = i_size_read(inode);
                end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
                if (unlikely(!isize || index > end_index))
                        break;
 
-               /*
-                * if this is the last page, see if we need to shrink
-                * the length and stop
-                */
                if (end_index == index) {
                        unsigned int plen;
 
-                       /*
-                        * max good bytes in this page
-                        */
                        plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
                        if (plen <= loff)
                                break;
 
-                       /*
-                        * force quit after adding this page
-                        */
                        this_len = min(this_len, plen - loff);
                        len = this_len;
                }
@@ -2040,13 +1410,8 @@ fill_it:
                index++;
        }
 
-       /*
-        * Release any pages at the end, if we quit early. 'page_nr' is how far
-        * we got, 'nr_pages' is how many pages are in the map.
-        */
        while (page_nr < nr_pages)
                page_cache_release(spd.pages[page_nr++]);
-       in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
 
        if (spd.nr_pages)
                error = splice_to_pipe(pipe, &spd);
@@ -2069,8 +1434,9 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_namelen = NAME_MAX;
        if (sbinfo->max_blocks) {
                buf->f_blocks = sbinfo->max_blocks;
-               buf->f_bavail = buf->f_bfree =
-                               sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
+               buf->f_bavail =
+               buf->f_bfree  = sbinfo->max_blocks -
+                               percpu_counter_sum(&sbinfo->used_blocks);
        }
        if (sbinfo->max_inodes) {
                buf->f_files = sbinfo->max_inodes;
@@ -2220,7 +1586,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
        int error;
        int len;
        struct inode *inode;
-       struct page *page = NULL;
+       struct page *page;
        char *kaddr;
        struct shmem_inode_info *info;
 
@@ -2244,10 +1610,13 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
 
        info = SHMEM_I(inode);
        inode->i_size = len-1;
-       if (len <= SHMEM_SYMLINK_INLINE_LEN) {
-               /* do it inline */
-               memcpy(info->inline_symlink, symname, len);
-               inode->i_op = &shmem_symlink_inline_operations;
+       if (len <= SHORT_SYMLINK_LEN) {
+               info->symlink = kmemdup(symname, len, GFP_KERNEL);
+               if (!info->symlink) {
+                       iput(inode);
+                       return -ENOMEM;
+               }
+               inode->i_op = &shmem_short_symlink_operations;
        } else {
                error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
                if (error) {
@@ -2270,17 +1639,17 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
        return 0;
 }
 
-static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
+static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
 {
-       nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
+       nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
        return NULL;
 }
 
 static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct page *page = NULL;
-       int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
-       nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
+       int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
+       nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
        if (page)
                unlock_page(page);
        return page;
@@ -2391,7 +1760,6 @@ out:
        return err;
 }
 
-
 static const struct xattr_handler *shmem_xattr_handlers[] = {
 #ifdef CONFIG_TMPFS_POSIX_ACL
        &generic_acl_access_handler,
@@ -2521,9 +1889,9 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
 }
 #endif /* CONFIG_TMPFS_XATTR */
 
-static const struct inode_operations shmem_symlink_inline_operations = {
+static const struct inode_operations shmem_short_symlink_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = shmem_follow_link_inline,
+       .follow_link    = shmem_follow_short_symlink,
 #ifdef CONFIG_TMPFS_XATTR
        .setxattr       = shmem_setxattr,
        .getxattr       = shmem_getxattr,
@@ -2723,8 +2091,7 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
        if (config.max_inodes < inodes)
                goto out;
        /*
-        * Those tests also disallow limited->unlimited while any are in
-        * use, so i_blocks will always be zero when max_blocks is zero;
+        * Those tests disallow limited->unlimited while any are in use;
         * but we must separately disallow unlimited->limited, because
         * in that case we have no record of how much is already in use.
         */
@@ -2816,7 +2183,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
                goto failed;
        sbinfo->free_inodes = sbinfo->max_inodes;
 
-       sb->s_maxbytes = SHMEM_MAX_BYTES;
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
        sb->s_blocksize = PAGE_CACHE_SIZE;
        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
        sb->s_magic = TMPFS_MAGIC;
@@ -2851,14 +2218,14 @@ static struct kmem_cache *shmem_inode_cachep;
 
 static struct inode *shmem_alloc_inode(struct super_block *sb)
 {
-       struct shmem_inode_info *p;
-       p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
-       if (!p)
+       struct shmem_inode_info *info;
+       info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
+       if (!info)
                return NULL;
-       return &p->vfs_inode;
+       return &info->vfs_inode;
 }
 
-static void shmem_i_callback(struct rcu_head *head)
+static void shmem_destroy_callback(struct rcu_head *head)
 {
        struct inode *inode = container_of(head, struct inode, i_rcu);
        INIT_LIST_HEAD(&inode->i_dentry);
@@ -2867,29 +2234,26 @@ static void shmem_i_callback(struct rcu_head *head)
 
 static void shmem_destroy_inode(struct inode *inode)
 {
-       if ((inode->i_mode & S_IFMT) == S_IFREG) {
-               /* only struct inode is valid if it's an inline symlink */
+       if ((inode->i_mode & S_IFMT) == S_IFREG)
                mpol_free_shared_policy(&SHMEM_I(inode)->policy);
-       }
-       call_rcu(&inode->i_rcu, shmem_i_callback);
+       call_rcu(&inode->i_rcu, shmem_destroy_callback);
 }
 
-static void init_once(void *foo)
+static void shmem_init_inode(void *foo)
 {
-       struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
-
-       inode_init_once(&p->vfs_inode);
+       struct shmem_inode_info *info = foo;
+       inode_init_once(&info->vfs_inode);
 }
 
-static int init_inodecache(void)
+static int shmem_init_inodecache(void)
 {
        shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
                                sizeof(struct shmem_inode_info),
-                               0, SLAB_PANIC, init_once);
+                               0, SLAB_PANIC, shmem_init_inode);
        return 0;
 }
 
-static void destroy_inodecache(void)
+static void shmem_destroy_inodecache(void)
 {
        kmem_cache_destroy(shmem_inode_cachep);
 }
@@ -2898,7 +2262,6 @@ static const struct address_space_operations shmem_aops = {
        .writepage      = shmem_writepage,
        .set_page_dirty = __set_page_dirty_no_writeback,
 #ifdef CONFIG_TMPFS
-       .readpage       = shmem_readpage,
        .write_begin    = shmem_write_begin,
        .write_end      = shmem_write_end,
 #endif
@@ -2929,10 +2292,6 @@ static const struct inode_operations shmem_inode_operations = {
        .listxattr      = shmem_listxattr,
        .removexattr    = shmem_removexattr,
 #endif
-#ifdef CONFIG_TMPFS_POSIX_ACL
-       .check_acl      = generic_check_acl,
-#endif
-
 };
 
 static const struct inode_operations shmem_dir_inode_operations = {
@@ -2955,7 +2314,6 @@ static const struct inode_operations shmem_dir_inode_operations = {
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
        .setattr        = shmem_setattr,
-       .check_acl      = generic_check_acl,
 #endif
 };
 
@@ -2968,7 +2326,6 @@ static const struct inode_operations shmem_special_inode_operations = {
 #endif
 #ifdef CONFIG_TMPFS_POSIX_ACL
        .setattr        = shmem_setattr,
-       .check_acl      = generic_check_acl,
 #endif
 };
 
@@ -2993,21 +2350,20 @@ static const struct vm_operations_struct shmem_vm_ops = {
 #endif
 };
 
-
 static struct dentry *shmem_mount(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *data)
 {
        return mount_nodev(fs_type, flags, data, shmem_fill_super);
 }
 
-static struct file_system_type tmpfs_fs_type = {
+static struct file_system_type shmem_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "tmpfs",
        .mount          = shmem_mount,
        .kill_sb        = kill_litter_super,
 };
 
-int __init init_tmpfs(void)
+int __init shmem_init(void)
 {
        int error;
 
@@ -3015,18 +2371,18 @@ int __init init_tmpfs(void)
        if (error)
                goto out4;
 
-       error = init_inodecache();
+       error = shmem_init_inodecache();
        if (error)
                goto out3;
 
-       error = register_filesystem(&tmpfs_fs_type);
+       error = register_filesystem(&shmem_fs_type);
        if (error) {
                printk(KERN_ERR "Could not register tmpfs\n");
                goto out2;
        }
 
-       shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
-                               tmpfs_fs_type.name, NULL);
+       shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
+                                shmem_fs_type.name, NULL);
        if (IS_ERR(shm_mnt)) {
                error = PTR_ERR(shm_mnt);
                printk(KERN_ERR "Could not kern_mount tmpfs\n");
@@ -3035,9 +2391,9 @@ int __init init_tmpfs(void)
        return 0;
 
 out1:
-       unregister_filesystem(&tmpfs_fs_type);
+       unregister_filesystem(&shmem_fs_type);
 out2:
-       destroy_inodecache();
+       shmem_destroy_inodecache();
 out3:
        bdi_destroy(&shmem_backing_dev_info);
 out4:
@@ -3045,45 +2401,6 @@ out4:
        return error;
 }
 
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
- * @inode: the inode to be searched
- * @pgoff: the offset to be searched
- * @pagep: the pointer for the found page to be stored
- * @ent: the pointer for the found swap entry to be stored
- *
- * If a page is found, refcount of it is incremented. Callers should handle
- * these refcount.
- */
-void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
-                                       struct page **pagep, swp_entry_t *ent)
-{
-       swp_entry_t entry = { .val = 0 }, *ptr;
-       struct page *page = NULL;
-       struct shmem_inode_info *info = SHMEM_I(inode);
-
-       if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
-               goto out;
-
-       spin_lock(&info->lock);
-       ptr = shmem_swp_entry(info, pgoff, NULL);
-#ifdef CONFIG_SWAP
-       if (ptr && ptr->val) {
-               entry.val = ptr->val;
-               page = find_get_page(&swapper_space, entry.val);
-       } else
-#endif
-               page = find_get_page(inode->i_mapping, pgoff);
-       if (ptr)
-               shmem_swp_unmap(ptr);
-       spin_unlock(&info->lock);
-out:
-       *pagep = page;
-       *ent = entry;
-}
-#endif
-
 #else /* !CONFIG_SHMEM */
 
 /*
@@ -3097,23 +2414,23 @@ out:
 
 #include <linux/ramfs.h>
 
-static struct file_system_type tmpfs_fs_type = {
+static struct file_system_type shmem_fs_type = {
        .name           = "tmpfs",
        .mount          = ramfs_mount,
        .kill_sb        = kill_litter_super,
 };
 
-int __init init_tmpfs(void)
+int __init shmem_init(void)
 {
-       BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
+       BUG_ON(register_filesystem(&shmem_fs_type) != 0);
 
-       shm_mnt = kern_mount(&tmpfs_fs_type);
+       shm_mnt = kern_mount(&shmem_fs_type);
        BUG_ON(IS_ERR(shm_mnt));
 
        return 0;
 }
 
-int shmem_unuse(swp_entry_t entry, struct page *page)
+int shmem_unuse(swp_entry_t swap, struct page *page)
 {
        return 0;
 }
@@ -3123,43 +2440,17 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
        return 0;
 }
 
-void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
-       truncate_inode_pages_range(inode->i_mapping, start, end);
+       truncate_inode_pages_range(inode->i_mapping, lstart, lend);
 }
 EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-/**
- * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
- * @inode: the inode to be searched
- * @pgoff: the offset to be searched
- * @pagep: the pointer for the found page to be stored
- * @ent: the pointer for the found swap entry to be stored
- *
- * If a page is found, refcount of it is incremented. Callers should handle
- * these refcount.
- */
-void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
-                                       struct page **pagep, swp_entry_t *ent)
-{
-       struct page *page = NULL;
-
-       if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
-               goto out;
-       page = find_get_page(inode->i_mapping, pgoff);
-out:
-       *pagep = page;
-       *ent = (swp_entry_t){ .val = 0 };
-}
-#endif
-
 #define shmem_vm_ops                           generic_file_vm_ops
 #define shmem_file_operations                  ramfs_file_operations
 #define shmem_get_inode(sb, dir, mode, dev, flags)     ramfs_get_inode(sb, dir, mode, dev)
 #define shmem_acct_size(flags, size)           0
 #define shmem_unacct_size(flags, size)         do {} while (0)
-#define SHMEM_MAX_BYTES                                MAX_LFS_FILESIZE
 
 #endif /* CONFIG_SHMEM */
 
@@ -3183,7 +2474,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
        if (IS_ERR(shm_mnt))
                return (void *)shm_mnt;
 
-       if (size < 0 || size > SHMEM_MAX_BYTES)
+       if (size < 0 || size > MAX_LFS_FILESIZE)
                return ERR_PTR(-EINVAL);
 
        if (shmem_acct_size(flags, size))
@@ -3229,6 +2520,15 @@ put_memory:
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+       if (vma->vm_file)
+               fput(vma->vm_file);
+       vma->vm_file = file;
+       vma->vm_ops = &shmem_vm_ops;
+       vma->vm_flags |= VM_CAN_NONLINEAR;
+}
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -3242,11 +2542,7 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        if (IS_ERR(file))
                return PTR_ERR(file);
 
-       if (vma->vm_file)
-               fput(vma->vm_file);
-       vma->vm_file = file;
-       vma->vm_ops = &shmem_vm_ops;
-       vma->vm_flags |= VM_CAN_NONLINEAR;
+       shmem_set_file(vma, file);
        return 0;
 }
 
@@ -3262,13 +2558,29 @@ int shmem_zero_setup(struct vm_area_struct *vma)
  * suit tmpfs, since it may have pages in swapcache, and needs to find those
  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
  *
- * Provide a stub for those callers to start using now, then later
- * flesh it out to call shmem_getpage() with additional gfp mask, when
- * shmem_file_splice_read() is added and shmem_readpage() is removed.
+ * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
+ * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
  */
 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
                                         pgoff_t index, gfp_t gfp)
 {
+#ifdef CONFIG_SHMEM
+       struct inode *inode = mapping->host;
+       struct page *page;
+       int error;
+
+       BUG_ON(mapping->a_ops != &shmem_aops);
+       error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
+       if (error)
+               page = ERR_PTR(error);
+       else
+               unlock_page(page);
+       return page;
+#else
+       /*
+        * The tiny !SHMEM case uses ramfs without swap
+        */
        return read_cache_page_gfp(mapping, index, gfp);
+#endif
 }
 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);