]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/vmscan.c
mm: return boolean from page_has_private()
[linux-2.6.git] / mm / vmscan.c
index ae6f4c174a1218afcdaaa301612bc9423f49dcab..f5b5f029288c6028b61a38304c5d78149d849480 100644 (file)
@@ -60,6 +60,9 @@ struct scan_control {
 
        int may_writepage;
 
+       /* Can mapped pages be reclaimed? */
+       int may_unmap;
+
        /* Can pages be swapped as part of reclaim? */
        int may_swap;
 
@@ -78,6 +81,12 @@ struct scan_control {
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
+       /*
+        * Nodemask of nodes allowed by the caller. If NULL, all nodes
+        * are scanned.
+        */
+       nodemask_t      *nodemask;
+
        /* Pluggable isolate pages callback */
        unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
                        unsigned long *scanned, int order, int mode,
@@ -214,8 +223,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
                if (shrinker->nr < 0) {
-                       printk(KERN_ERR "%s: nr=%ld\n",
-                                       __func__, shrinker->nr);
+                       printk(KERN_ERR "shrink_slab: %pF negative objects to "
+                              "delete nr=%ld\n",
+                              shrinker->shrink, shrinker->nr);
                        shrinker->nr = max_pass;
                }
 
@@ -276,7 +286,7 @@ static inline int page_mapping_inuse(struct page *page)
 
 static inline int is_page_cache_freeable(struct page *page)
 {
-       return page_count(page) - !!PagePrivate(page) == 2;
+       return page_count(page) - page_has_private(page) == 2;
 }
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -360,7 +370,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * Some data journaling orphaned pages can have
                 * page->mapping == NULL while being dirty with clean buffers.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
                                printk("%s: orphaned page\n", __func__);
@@ -460,10 +470,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
                swp_entry_t swap = { .val = page_private(page) };
                __delete_from_swap_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
-               swap_free(swap);
+               swapcache_free(swap, page);
        } else {
                __remove_from_page_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
+               mem_cgroup_uncharge_cache_page(page);
        }
 
        return 1;
@@ -502,7 +513,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
  *
  * lru_lock must not be held, interrupts must be enabled.
  */
-#ifdef CONFIG_UNEVICTABLE_LRU
 void putback_lru_page(struct page *page)
 {
        int lru;
@@ -521,7 +531,7 @@ redo:
                 * unevictable page on [in]active list.
                 * We know how to handle that.
                 */
-               lru = active + page_is_file_cache(page);
+               lru = active + page_lru_base_type(page);
                lru_cache_add_lru(page, lru);
        } else {
                /*
@@ -556,20 +566,6 @@ redo:
        put_page(page);         /* drop ref from isolate */
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-void putback_lru_page(struct page *page)
-{
-       int lru;
-       VM_BUG_ON(PageLRU(page));
-
-       lru = !!TestClearPageActive(page) + page_is_file_cache(page);
-       lru_cache_add_lru(page, lru);
-       put_page(page);
-}
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -581,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
        struct pagevec freed_pvec;
        int pgactivate = 0;
        unsigned long nr_reclaimed = 0;
+       unsigned long vm_flags;
 
        cond_resched();
 
@@ -606,7 +603,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (unlikely(!page_evictable(page, NULL)))
                        goto cull_mlocked;
 
-               if (!sc->may_swap && page_mapped(page))
+               if (!sc->may_unmap && page_mapped(page))
                        goto keep_locked;
 
                /* Double the slab pressure for mapped and swapcache pages */
@@ -631,10 +628,16 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
                }
 
-               referenced = page_referenced(page, 1, sc->mem_cgroup);
-               /* In active use or really unfreeable?  Activate it. */
+               referenced = page_referenced(page, 1,
+                                               sc->mem_cgroup, &vm_flags);
+               /*
+                * In active use or really unfreeable?  Activate it.
+                * If page which have PG_mlocked lost isoltation race,
+                * try_to_unmap moves it to unevictable list
+                */
                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
-                                       referenced && page_mapping_inuse(page))
+                                       referenced && page_mapping_inuse(page)
+                                       && !(vm_flags & VM_LOCKED))
                        goto activate_locked;
 
                /*
@@ -720,7 +723,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * process address space (page_count == 1) it can be freed.
                 * Otherwise, leave the page on the LRU so it is swappable.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (!try_to_release_page(page, sc->gfp_mask))
                                goto activate_locked;
                        if (!mapping && page_count(page) == 1) {
@@ -818,7 +821,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
        if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
                return ret;
 
-       if (mode != ISOLATE_BOTH && (!page_is_file_cache(page) != !file))
+       if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
                return ret;
 
        /*
@@ -839,7 +842,6 @@ int __isolate_lru_page(struct page *page, int mode, int file)
                 */
                ClearPageLRU(page);
                ret = 0;
-               mem_cgroup_del_lru(page);
        }
 
        return ret;
@@ -887,12 +889,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                switch (__isolate_lru_page(page, mode, file)) {
                case 0:
                        list_move(&page->lru, dst);
+                       mem_cgroup_del_lru(page);
                        nr_taken++;
                        break;
 
                case -EBUSY:
                        /* else it is being freed elsewhere */
                        list_move(&page->lru, src);
+                       mem_cgroup_rotate_lru_list(page, page_lru(page));
                        continue;
 
                default:
@@ -931,18 +935,21 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                        /* Check that we have not crossed a zone boundary. */
                        if (unlikely(page_zone_id(cursor_page) != zone_id))
                                continue;
-                       switch (__isolate_lru_page(cursor_page, mode, file)) {
-                       case 0:
+
+                       /*
+                        * If we don't have enough swap space, reclaiming of
+                        * anon page which don't already have a swap slot is
+                        * pointless.
+                        */
+                       if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+                                       !PageSwapCache(cursor_page))
+                               continue;
+
+                       if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
+                               mem_cgroup_del_lru(cursor_page);
                                nr_taken++;
                                scan++;
-                               break;
-
-                       case -EBUSY:
-                               /* else it is being freed elsewhere */
-                               list_move(&cursor_page->lru, src);
-                       default:
-                               break;  /* ! on LRU or wrong list */
                        }
                }
        }
@@ -964,7 +971,7 @@ static unsigned long isolate_pages_global(unsigned long nr,
        if (file)
                lru += LRU_FILE;
        return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
-                                                               mode, !!file);
+                                                               mode, file);
 }
 
 /*
@@ -979,7 +986,7 @@ static unsigned long clear_active_flags(struct list_head *page_list,
        struct page *page;
 
        list_for_each_entry(page, page_list, lru) {
-               lru = page_is_file_cache(page);
+               lru = page_lru_base_type(page);
                if (PageActive(page)) {
                        lru += LRU_ACTIVE;
                        ClearPageActive(page);
@@ -1036,6 +1043,31 @@ int isolate_lru_page(struct page *page)
        return ret;
 }
 
+/*
+ * Are there way too many processes in the direct reclaim path already?
+ */
+static int too_many_isolated(struct zone *zone, int file,
+               struct scan_control *sc)
+{
+       unsigned long inactive, isolated;
+
+       if (current_is_kswapd())
+               return 0;
+
+       if (!scanning_global_lru(sc))
+               return 0;
+
+       if (file) {
+               inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+               isolated = zone_page_state(zone, NR_ISOLATED_FILE);
+       } else {
+               inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+               isolated = zone_page_state(zone, NR_ISOLATED_ANON);
+       }
+
+       return isolated > inactive;
+}
+
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
@@ -1049,6 +1081,27 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        unsigned long nr_scanned = 0;
        unsigned long nr_reclaimed = 0;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       int lumpy_reclaim = 0;
+
+       while (unlikely(too_many_isolated(zone, file, sc))) {
+               congestion_wait(WRITE, HZ/10);
+
+               /* We are about to die and free our memory. Return now. */
+               if (fatal_signal_pending(current))
+                       return SWAP_CLUSTER_MAX;
+       }
+
+       /*
+        * If we need a large contiguous chunk of memory, or have
+        * trouble getting a small set of contiguous pages, we
+        * will reclaim both active and inactive pages.
+        *
+        * We use the same threshold as pageout congestion_wait below.
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               lumpy_reclaim = 1;
+       else if (sc->order && priority < DEF_PRIORITY - 2)
+               lumpy_reclaim = 1;
 
        pagevec_init(&pvec, 1);
 
@@ -1061,23 +1114,27 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_freed;
                unsigned long nr_active;
                unsigned int count[NR_LRU_LISTS] = { 0, };
-               int mode = ISOLATE_INACTIVE;
-
-               /*
-                * If we need a large contiguous chunk of memory, or have
-                * trouble getting a small set of contiguous pages, we
-                * will reclaim both active and inactive pages.
-                *
-                * We use the same threshold as pageout congestion_wait below.
-                */
-               if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-                       mode = ISOLATE_BOTH;
-               else if (sc->order && priority < DEF_PRIORITY - 2)
-                       mode = ISOLATE_BOTH;
+               int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
+               unsigned long nr_anon;
+               unsigned long nr_file;
 
                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
                             &page_list, &nr_scan, sc->order, mode,
                                zone, sc->mem_cgroup, 0, file);
+
+               if (scanning_global_lru(sc)) {
+                       zone->pages_scanned += nr_scan;
+                       if (current_is_kswapd())
+                               __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+                                                      nr_scan);
+                       else
+                               __count_zone_vm_events(PGSCAN_DIRECT, zone,
+                                                      nr_scan);
+               }
+
+               if (nr_taken == 0)
+                       goto done;
+
                nr_active = clear_active_flags(&page_list, count);
                __count_vm_events(PGDEACTIVATE, nr_active);
 
@@ -1090,8 +1147,10 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                __mod_zone_page_state(zone, NR_INACTIVE_ANON,
                                                -count[LRU_INACTIVE_ANON]);
 
-               if (scanning_global_lru(sc))
-                       zone->pages_scanned += nr_scan;
+               nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+               nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+               __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
+               __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
 
                reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
                reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
@@ -1110,8 +1169,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 * but that should be acceptable to the caller
                 */
                if (nr_freed < nr_taken && !current_is_kswapd() &&
-                                       sc->order > PAGE_ALLOC_COSTLY_ORDER) {
-                       congestion_wait(WRITE, HZ/10);
+                   lumpy_reclaim) {
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                        /*
                         * The attempt at page out may have made some
@@ -1125,18 +1184,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                }
 
                nr_reclaimed += nr_freed;
+
                local_irq_disable();
-               if (current_is_kswapd()) {
-                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+               if (current_is_kswapd())
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
-               } else if (scanning_global_lru(sc))
-                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-
                __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
-               if (nr_taken == 0)
-                       goto done;
-
                spin_lock(&zone->lru_lock);
                /*
                 * Put back any unfreeable pages.
@@ -1155,8 +1208,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        SetPageLRU(page);
                        lru = page_lru(page);
                        add_page_to_lru_list(zone, page, lru);
-                       if (PageActive(page)) {
-                               int file = !!page_is_file_cache(page);
+                       if (is_active_lru(lru)) {
+                               int file = is_file_lru(lru);
                                reclaim_stat->recent_rotated[file]++;
                        }
                        if (!pagevec_add(&pvec, page)) {
@@ -1165,10 +1218,13 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                                spin_lock_irq(&zone->lru_lock);
                        }
                }
+               __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
+               __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+
        } while (nr_scanned < max_scan);
-       spin_unlock(&zone->lru_lock);
+
 done:
-       local_irq_enable();
+       spin_unlock_irq(&zone->lru_lock);
        pagevec_release(&pvec);
        return nr_reclaimed;
 }
@@ -1205,23 +1261,55 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority)
  * But we had to alter page->flags anyway.
  */
 
+static void move_active_pages_to_lru(struct zone *zone,
+                                    struct list_head *list,
+                                    enum lru_list lru)
+{
+       unsigned long pgmoved = 0;
+       struct pagevec pvec;
+       struct page *page;
+
+       pagevec_init(&pvec, 1);
+
+       while (!list_empty(list)) {
+               page = lru_to_page(list);
+
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
+
+               list_move(&page->lru, &zone->lru[lru].list);
+               mem_cgroup_add_lru_list(page, lru);
+               pgmoved++;
+
+               if (!pagevec_add(&pvec, page) || list_empty(list)) {
+                       spin_unlock_irq(&zone->lru_lock);
+                       if (buffer_heads_over_limit)
+                               pagevec_strip(&pvec);
+                       __pagevec_release(&pvec);
+                       spin_lock_irq(&zone->lru_lock);
+               }
+       }
+       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
+       if (!is_active_lru(lru))
+               __count_vm_events(PGDEACTIVATE, pgmoved);
+}
 
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        struct scan_control *sc, int priority, int file)
 {
-       unsigned long pgmoved;
-       int pgdeactivate = 0;
+       unsigned long nr_taken;
        unsigned long pgscanned;
+       unsigned long vm_flags;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
+       LIST_HEAD(l_active);
        LIST_HEAD(l_inactive);
        struct page *page;
-       struct pagevec pvec;
-       enum lru_list lru;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       unsigned long nr_rotated = 0;
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
-       pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+       nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
                                        ISOLATE_ACTIVE, zone,
                                        sc->mem_cgroup, 1, file);
        /*
@@ -1231,15 +1319,16 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        if (scanning_global_lru(sc)) {
                zone->pages_scanned += pgscanned;
        }
-       reclaim_stat->recent_scanned[!!file] += pgmoved;
+       reclaim_stat->recent_scanned[file] += nr_taken;
 
+       __count_zone_vm_events(PGREFILL, zone, pgscanned);
        if (file)
-               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
+               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
        else
-               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
+               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
-       pgmoved = 0;
        while (!list_empty(&l_hold)) {
                cond_resched();
                page = lru_to_page(&l_hold);
@@ -1252,64 +1341,45 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
 
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
-                   page_referenced(page, 0, sc->mem_cgroup))
-                       pgmoved++;
+                   page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
+                       nr_rotated++;
+                       /*
+                        * Identify referenced, file-backed active pages and
+                        * give them one more trip around the active list. So
+                        * that executable code get better chances to stay in
+                        * memory under moderate memory pressure.  Anon pages
+                        * are not likely to be evicted by use-once streaming
+                        * IO, plus JVM can create lots of anon VM_EXEC pages,
+                        * so we ignore them here.
+                        */
+                       if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
+                               list_add(&page->lru, &l_active);
+                               continue;
+                       }
+               }
 
+               ClearPageActive(page);  /* we are de-activating */
                list_add(&page->lru, &l_inactive);
        }
 
        /*
-        * Move the pages to the [file or anon] inactive list.
+        * Move pages back to the lru list.
         */
-       pagevec_init(&pvec, 1);
-       pgmoved = 0;
-       lru = LRU_BASE + file * LRU_FILE;
-
        spin_lock_irq(&zone->lru_lock);
        /*
-        * Count referenced pages from currently used mappings as
-        * rotated, even though they are moved to the inactive list.
-        * This helps balance scan pressure between file and anonymous
-        * pages in get_scan_ratio.
+        * Count referenced pages from currently used mappings as rotated,
+        * even though only some of them are actually re-activated.  This
+        * helps balance scan pressure between file and anonymous pages in
+        * get_scan_ratio.
         */
-       reclaim_stat->recent_rotated[!!file] += pgmoved;
+       reclaim_stat->recent_rotated[file] += nr_rotated;
 
-       while (!list_empty(&l_inactive)) {
-               page = lru_to_page(&l_inactive);
-               prefetchw_prev_lru_page(page, &l_inactive, flags);
-               VM_BUG_ON(PageLRU(page));
-               SetPageLRU(page);
-               VM_BUG_ON(!PageActive(page));
-               ClearPageActive(page);
-
-               list_move(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_add_lru_list(page, lru);
-               pgmoved++;
-               if (!pagevec_add(&pvec, page)) {
-                       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
-                       spin_unlock_irq(&zone->lru_lock);
-                       pgdeactivate += pgmoved;
-                       pgmoved = 0;
-                       if (buffer_heads_over_limit)
-                               pagevec_strip(&pvec);
-                       __pagevec_release(&pvec);
-                       spin_lock_irq(&zone->lru_lock);
-               }
-       }
-       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
-       pgdeactivate += pgmoved;
-       if (buffer_heads_over_limit) {
-               spin_unlock_irq(&zone->lru_lock);
-               pagevec_strip(&pvec);
-               spin_lock_irq(&zone->lru_lock);
-       }
-       __count_zone_vm_events(PGREFILL, zone, pgscanned);
-       __count_vm_events(PGDEACTIVATE, pgdeactivate);
+       move_active_pages_to_lru(zone, &l_active,
+                                               LRU_ACTIVE + file * LRU_FILE);
+       move_active_pages_to_lru(zone, &l_inactive,
+                                               LRU_BASE   + file * LRU_FILE);
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
        spin_unlock_irq(&zone->lru_lock);
-       if (vm_swap_full())
-               pagevec_swap_free(&pvec);
-
-       pagevec_release(&pvec);
 }
 
 static int inactive_anon_is_low_global(struct zone *zone)
@@ -1344,12 +1414,48 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
        return low;
 }
 
+static int inactive_file_is_low_global(struct zone *zone)
+{
+       unsigned long active, inactive;
+
+       active = zone_page_state(zone, NR_ACTIVE_FILE);
+       inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+
+       return (active > inactive);
+}
+
+/**
+ * inactive_file_is_low - check if file pages need to be deactivated
+ * @zone: zone to check
+ * @sc:   scan control of this context
+ *
+ * When the system is doing streaming IO, memory pressure here
+ * ensures that active file pages get deactivated, until more
+ * than half of the file pages are on the inactive list.
+ *
+ * Once we get to that situation, protect the system's working
+ * set from being evicted by disabling active file page aging.
+ *
+ * This uses a different ratio than the anonymous pages, because
+ * the page cache uses a use-once replacement algorithm.
+ */
+static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
+{
+       int low;
+
+       if (scanning_global_lru(sc))
+               low = inactive_file_is_low_global(zone);
+       else
+               low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
+       return low;
+}
+
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        struct zone *zone, struct scan_control *sc, int priority)
 {
        int file = is_file_lru(lru);
 
-       if (lru == LRU_ACTIVE_FILE) {
+       if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
                shrink_active_list(nr_to_scan, zone, sc, priority, file);
                return 0;
        }
@@ -1378,13 +1484,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long ap, fp;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
-       /* If we have no swap space, do not bother scanning anon pages. */
-       if (nr_swap_pages <= 0) {
-               percent[0] = 0;
-               percent[1] = 100;
-               return;
-       }
-
        anon  = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
                zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
        file  = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
@@ -1394,7 +1493,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
                   force-scan anon pages. */
-               if (unlikely(file + free <= zone->pages_high)) {
+               if (unlikely(file + free <= high_wmark_pages(zone))) {
                        percent[0] = 100;
                        percent[1] = 0;
                        return;
@@ -1449,6 +1548,26 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        percent[1] = 100 - percent[0];
 }
 
+/*
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
+ */
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+                                      unsigned long *nr_saved_scan,
+                                      unsigned long swap_cluster_max)
+{
+       unsigned long nr;
+
+       *nr_saved_scan += nr_to_scan;
+       nr = *nr_saved_scan;
+
+       if (nr >= swap_cluster_max)
+               *nr_saved_scan = 0;
+       else
+               nr = 0;
+
+       return nr;
+}
 
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
@@ -1462,26 +1581,30 @@ static void shrink_zone(int priority, struct zone *zone,
        enum lru_list l;
        unsigned long nr_reclaimed = sc->nr_reclaimed;
        unsigned long swap_cluster_max = sc->swap_cluster_max;
+       int noswap = 0;
 
-       get_scan_ratio(zone, sc, percent);
+       /* If we have no swap space, do not bother scanning anon pages. */
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
+               noswap = 1;
+               percent[0] = 0;
+               percent[1] = 100;
+       } else
+               get_scan_ratio(zone, sc, percent);
 
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
-               int scan;
+               unsigned long scan;
 
-               scan = zone_page_state(zone, NR_LRU_BASE + l);
-               if (priority) {
+               scan = zone_nr_pages(zone, sc, l);
+               if (priority || noswap) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
                }
-               if (scanning_global_lru(sc)) {
-                       zone->lru[l].nr_scan += scan;
-                       nr[l] = zone->lru[l].nr_scan;
-                       if (nr[l] >= swap_cluster_max)
-                               zone->lru[l].nr_scan = 0;
-                       else
-                               nr[l] = 0;
-               } else
+               if (scanning_global_lru(sc))
+                       nr[l] = nr_scan_try_batch(scan,
+                                                 &zone->lru[l].nr_saved_scan,
+                                                 swap_cluster_max);
+               else
                        nr[l] = scan;
        }
 
@@ -1515,7 +1638,7 @@ static void shrink_zone(int priority, struct zone *zone,
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_anon_is_low(zone, sc))
+       if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
        throttle_vm_writeout(sc->gfp_mask);
@@ -1526,11 +1649,13 @@ static void shrink_zone(int priority, struct zone *zone,
  * try to reclaim pages from zones which will satisfy the caller's allocation
  * request.
  *
- * We reclaim from a zone even if that zone is over pages_high.  Because:
+ * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
+ * Because:
  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  *    allocation or
- * b) The zones may be over pages_high but they must go *over* pages_high to
- *    satisfy the `incremental min' zone defense algorithm.
+ * b) The target zone may be at high_wmark_pages(zone) but the lower zones
+ *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
+ *    zone defense algorithm.
  *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
@@ -1543,7 +1668,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
        struct zone *zone;
 
        sc->all_unreclaimable = 1;
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+                                       sc->nodemask) {
                if (!populated_zone(zone))
                        continue;
                /*
@@ -1614,7 +1740,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
 
-                       lru_pages += zone_lru_pages(zone);
+                       lru_pages += zone_reclaimable_pages(zone);
                }
        }
 
@@ -1649,13 +1775,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 */
                if (total_scanned > sc->swap_cluster_max +
                                        sc->swap_cluster_max / 2) {
-                       wakeup_pdflush(laptop_mode ? 0 : total_scanned);
+                       wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
                        sc->may_writepage = 1;
                }
 
                /* Take a nap, wait for some writeback to complete */
                if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
        }
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (!sc->all_unreclaimable && scanning_global_lru(sc))
@@ -1688,17 +1814,19 @@ out:
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                                               gfp_t gfp_mask)
+                               gfp_t gfp_mask, nodemask_t *nodemask)
 {
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
+               .may_unmap = 1,
                .may_swap = 1,
                .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .isolate_pages = isolate_pages_global,
+               .nodemask = nodemask,
        };
 
        return do_try_to_free_pages(zonelist, &sc);
@@ -1713,18 +1841,17 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 {
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
-               .may_swap = 1,
+               .may_unmap = 1,
+               .may_swap = !noswap,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
+               .nodemask = NULL, /* we don't care the placement */
        };
        struct zonelist *zonelist;
 
-       if (noswap)
-               sc.may_swap = 0;
-
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1734,7 +1861,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
- * they are all at pages_high.
+ * they are all at high_wmark_pages(zone).
  *
  * Returns the number of pages which were actually freed.
  *
@@ -1747,11 +1874,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  * the zone for when the problem goes away.
  *
  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
- * zones which have free_pages > pages_high, but once a zone is found to have
- * free_pages <= pages_high, we scan that zone and the lower zones regardless
- * of the number of free pages in the lower zones.  This interoperates with
- * the page allocator fallback scheme to ensure that aging of pages is balanced
- * across the zones.
+ * zones which have free_pages > high_wmark_pages(zone), but once a zone is
+ * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
+ * lower zones regardless of the number of free pages in the lower zones. This
+ * interoperates with the page allocator fallback scheme to ensure that aging
+ * of pages is balanced across the zones.
  */
 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
 {
@@ -1762,6 +1889,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
+               .may_unmap = 1,
                .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
@@ -1771,7 +1899,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        };
        /*
         * temp_priority is used to remember the scanning priority at which
-        * this zone was successfully refilled to free_pages == pages_high.
+        * this zone was successfully refilled to
+        * free_pages == high_wmark_pages(zone).
         */
        int temp_priority[MAX_NR_ZONES];
 
@@ -1816,8 +1945,8 @@ loop_again:
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              0, 0)) {
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
                        }
@@ -1828,7 +1957,7 @@ loop_again:
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
-                       lru_pages += zone_lru_pages(zone);
+                       lru_pages += zone_reclaimable_pages(zone);
                }
 
                /*
@@ -1851,8 +1980,8 @@ loop_again:
                                        priority != DEF_PRIORITY)
                                continue;
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              end_zone, 0))
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), end_zone, 0))
                                all_zones_ok = 0;
                        temp_priority[i] = priority;
                        sc.nr_scanned = 0;
@@ -1861,8 +1990,8 @@ loop_again:
                         * We put equal pressure on every zone, unless one
                         * zone has way too many pages free already.
                         */
-                       if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
-                                               end_zone, 0))
+                       if (!zone_watermark_ok(zone, order,
+                                       8*high_wmark_pages(zone), end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -1872,7 +2001,7 @@ loop_again:
                        if (zone_is_all_unreclaimable(zone))
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                               (zone_lru_pages(zone) * 6))
+                                       (zone_reclaimable_pages(zone) * 6))
                                        zone_set_flag(zone,
                                                      ZONE_ALL_UNRECLAIMABLE);
                        /*
@@ -1891,7 +2020,7 @@ loop_again:
                 * another pass across the zones.
                 */
                if (total_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -1963,7 +2092,7 @@ static int kswapd(void *p)
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       node_to_cpumask_ptr(cpumask, pgdat->node_id);
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
        lockdep_set_current_reclaim_state(GFP_KERNEL);
 
@@ -2028,7 +2157,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
 
        pgdat = zone->zone_pgdat;
-       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
+       if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
@@ -2039,33 +2168,57 @@ void wakeup_kswapd(struct zone *zone, int order)
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
-unsigned long global_lru_pages(void)
+/*
+ * The reclaimable count would be mostly accurate.
+ * The less reclaimable pages may be
+ * - mlocked pages, which will be moved to unevictable list when encountered
+ * - mapped pages, which may require several travels to be reclaimed
+ * - dirty pages, which is not "instantly" reclaimable
+ */
+unsigned long global_reclaimable_pages(void)
 {
-       return global_page_state(NR_ACTIVE_ANON)
-               + global_page_state(NR_ACTIVE_FILE)
-               + global_page_state(NR_INACTIVE_ANON)
-               + global_page_state(NR_INACTIVE_FILE);
+       int nr;
+
+       nr = global_page_state(NR_ACTIVE_FILE) +
+            global_page_state(NR_INACTIVE_FILE);
+
+       if (nr_swap_pages > 0)
+               nr += global_page_state(NR_ACTIVE_ANON) +
+                     global_page_state(NR_INACTIVE_ANON);
+
+       return nr;
+}
+
+unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+       int nr;
+
+       nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+            zone_page_state(zone, NR_INACTIVE_FILE);
+
+       if (nr_swap_pages > 0)
+               nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+                     zone_page_state(zone, NR_INACTIVE_ANON);
+
+       return nr;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_HIBERNATION
 /*
  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
+ * from LRU lists system-wide, for given pass and priority.
  *
  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  */
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+static void shrink_all_zones(unsigned long nr_pages, int prio,
                                      int pass, struct scan_control *sc)
 {
        struct zone *zone;
-       unsigned long ret = 0;
+       unsigned long nr_reclaimed = 0;
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                enum lru_list l;
 
-               if (!populated_zone(zone))
-                       continue;
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
@@ -2078,20 +2231,22 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
                                                l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
-                       if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                       zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
+                       if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
                                unsigned long nr_to_scan;
 
-                               zone->lru[l].nr_scan = 0;
+                               zone->lru[l].nr_saved_scan = 0;
                                nr_to_scan = min(nr_pages, lru_pages);
-                               ret += shrink_list(l, nr_to_scan, zone,
+                               nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
-                               if (ret >= nr_pages)
-                                       return ret;
+                               if (nr_reclaimed >= nr_pages) {
+                                       sc->nr_reclaimed += nr_reclaimed;
+                                       return;
+                               }
                        }
                }
        }
-       return ret;
+       sc->nr_reclaimed += nr_reclaimed;
 }
 
 /*
@@ -2105,20 +2260,19 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
 unsigned long shrink_all_memory(unsigned long nr_pages)
 {
        unsigned long lru_pages, nr_slab;
-       unsigned long ret = 0;
        int pass;
        struct reclaim_state reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
-               .may_swap = 0,
-               .swap_cluster_max = nr_pages,
+               .may_unmap = 0,
                .may_writepage = 1,
                .isolate_pages = isolate_pages_global,
+               .nr_reclaimed = 0,
        };
 
        current->reclaim_state = &reclaim_state;
 
-       lru_pages = global_lru_pages();
+       lru_pages = global_reclaimable_pages();
        nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
@@ -2127,8 +2281,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                if (!reclaim_state.reclaimed_slab)
                        break;
 
-               ret += reclaim_state.reclaimed_slab;
-               if (ret >= nr_pages)
+               sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+               if (sc.nr_reclaimed >= nr_pages)
                        goto out;
 
                nr_slab -= reclaim_state.reclaimed_slab;
@@ -2147,46 +2301,50 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
                /* Force reclaiming mapped pages in the passes #3 and #4 */
                if (pass > 2)
-                       sc.may_swap = 1;
+                       sc.may_unmap = 1;
 
                for (prio = DEF_PRIORITY; prio >= 0; prio--) {
-                       unsigned long nr_to_scan = nr_pages - ret;
+                       unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
 
                        sc.nr_scanned = 0;
-                       ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
-                       if (ret >= nr_pages)
+                       sc.swap_cluster_max = nr_to_scan;
+                       shrink_all_zones(nr_to_scan, prio, pass, &sc);
+                       if (sc.nr_reclaimed >= nr_pages)
                                goto out;
 
                        reclaim_state.reclaimed_slab = 0;
                        shrink_slab(sc.nr_scanned, sc.gfp_mask,
-                                       global_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-                       if (ret >= nr_pages)
+                                   global_reclaimable_pages());
+                       sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+                       if (sc.nr_reclaimed >= nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               congestion_wait(WRITE, HZ / 10);
+                               congestion_wait(BLK_RW_ASYNC, HZ / 10);
                }
        }
 
        /*
-        * If ret = 0, we could not shrink LRUs, but there may be something
-        * in slab caches
+        * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
+        * something in slab caches
         */
-       if (!ret) {
+       if (!sc.nr_reclaimed) {
                do {
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-               } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+                       shrink_slab(nr_pages, sc.gfp_mask,
+                                   global_reclaimable_pages());
+                       sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+               } while (sc.nr_reclaimed < nr_pages &&
+                               reclaim_state.reclaimed_slab > 0);
        }
 
+
 out:
        current->reclaim_state = NULL;
 
-       return ret;
+       return sc.nr_reclaimed;
 }
-#endif
+#endif /* CONFIG_HIBERNATION */
 
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
@@ -2200,7 +2358,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
                for_each_node_state(nid, N_HIGH_MEMORY) {
                        pg_data_t *pgdat = NODE_DATA(nid);
-                       node_to_cpumask_ptr(mask, pgdat->node_id);
+                       const struct cpumask *mask;
+
+                       mask = cpumask_of_node(pgdat->node_id);
 
                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
@@ -2278,6 +2438,48 @@ int sysctl_min_unmapped_ratio = 1;
  */
 int sysctl_min_slab_ratio = 5;
 
+static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
+{
+       unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
+       unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
+               zone_page_state(zone, NR_ACTIVE_FILE);
+
+       /*
+        * It's possible for there to be more file mapped pages than
+        * accounted for by the pages on the file LRU lists because
+        * tmpfs pages accounted for as ANON can also be FILE_MAPPED
+        */
+       return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
+}
+
+/* Work out how many page cache pages we can reclaim in this reclaim_mode */
+static long zone_pagecache_reclaimable(struct zone *zone)
+{
+       long nr_pagecache_reclaimable;
+       long delta = 0;
+
+       /*
+        * If RECLAIM_SWAP is set, then all file pages are considered
+        * potentially reclaimable. Otherwise, we have to worry about
+        * pages like swapcache and zone_unmapped_file_pages() provides
+        * a better estimate
+        */
+       if (zone_reclaim_mode & RECLAIM_SWAP)
+               nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
+       else
+               nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
+
+       /* If we can't clean pages, remove dirty pages from consideration */
+       if (!(zone_reclaim_mode & RECLAIM_WRITE))
+               delta += zone_page_state(zone, NR_FILE_DIRTY);
+
+       /* Watch for any possible underflows due to delta */
+       if (unlikely(delta > nr_pagecache_reclaimable))
+               delta = nr_pagecache_reclaimable;
+
+       return nr_pagecache_reclaimable - delta;
+}
+
 /*
  * Try to free up some pages from this zone through reclaim.
  */
@@ -2290,11 +2492,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        int priority;
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
-               .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_swap = 1,
                .swap_cluster_max = max_t(unsigned long, nr_pages,
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
+               .order = order,
                .isolate_pages = isolate_pages_global,
        };
        unsigned long slab_reclaimable;
@@ -2310,9 +2514,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       if (zone_page_state(zone, NR_FILE_PAGES) -
-               zone_page_state(zone, NR_FILE_MAPPED) >
-               zone->min_unmapped_pages) {
+       if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
                /*
                 * Free memory by calling shrink zone with increasing
                 * priorities until we have enough memory freed.
@@ -2370,20 +2572,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * if less than a specified percentage of the zone is used by
         * unmapped file backed pages.
         */
-       if (zone_page_state(zone, NR_FILE_PAGES) -
-           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
-           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
-                       <= zone->min_slab_pages)
-               return 0;
+       if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
+           zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
+               return ZONE_RECLAIM_FULL;
 
        if (zone_is_all_unreclaimable(zone))
-               return 0;
+               return ZONE_RECLAIM_FULL;
 
        /*
         * Do not scan if the allocation should not be delayed.
         */
        if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
-                       return 0;
+               return ZONE_RECLAIM_NOSCAN;
 
        /*
         * Only run zone reclaim on the local zone or on zones that do not
@@ -2393,18 +2593,21 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         */
        node_id = zone_to_nid(zone);
        if (node_state(node_id, N_CPU) && node_id != numa_node_id())
-               return 0;
+               return ZONE_RECLAIM_NOSCAN;
 
        if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
-               return 0;
+               return ZONE_RECLAIM_NOSCAN;
+
        ret = __zone_reclaim(zone, gfp_mask, order);
        zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
 
+       if (!ret)
+               count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
+
        return ret;
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * page_evictable - test whether a page is evictable
  * @page: the page to test
@@ -2449,7 +2652,7 @@ static void check_move_unevictable_page(struct page *page, struct zone *zone)
 retry:
        ClearPageUnevictable(page);
        if (page_evictable(page, NULL)) {
-               enum lru_list l = LRU_INACTIVE_ANON + page_is_file_cache(page);
+               enum lru_list l = page_lru_base_type(page);
 
                __dec_zone_state(zone, NR_UNEVICTABLE);
                list_move(&page->lru, &zone->lru[l].list);
@@ -2651,4 +2854,3 @@ void scan_unevictable_unregister_node(struct node *node)
        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
 }
 
-#endif