vmscan: report vm_flags in page_referenced()
[linux-2.6.git] / mm / vmscan.c
index f4619c6..6be2068 100644 (file)
@@ -63,6 +63,9 @@ struct scan_control {
        /* Can mapped pages be reclaimed? */
        int may_unmap;
 
+       /* Can pages be swapped as part of reclaim? */
+       int may_swap;
+
        /* This context's SWAP_CLUSTER_MAX. If freeing memory for
         * suspend, we effectively ignore SWAP_CLUSTER_MAX.
         * In this context, it doesn't matter that we scan the
@@ -78,6 +81,12 @@ struct scan_control {
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
+       /*
+        * Nodemask of nodes allowed by the caller. If NULL, all nodes
+        * are scanned.
+        */
+       nodemask_t      *nodemask;
+
        /* Pluggable isolate pages callback */
        unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
                        unsigned long *scanned, int order, int mode,
@@ -277,7 +286,7 @@ static inline int page_mapping_inuse(struct page *page)
 
 static inline int is_page_cache_freeable(struct page *page)
 {
-       return page_count(page) - !!PagePrivate(page) == 2;
+       return page_count(page) - !!page_has_private(page) == 2;
 }
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -361,7 +370,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * Some data journaling orphaned pages can have
                 * page->mapping == NULL while being dirty with clean buffers.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
                                printk("%s: orphaned page\n", __func__);
@@ -461,10 +470,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page)
                swp_entry_t swap = { .val = page_private(page) };
                __delete_from_swap_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
-               swap_free(swap);
+               swapcache_free(swap, page);
        } else {
                __remove_from_page_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
+               mem_cgroup_uncharge_cache_page(page);
        }
 
        return 1;
@@ -503,7 +513,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
  *
  * lru_lock must not be held, interrupts must be enabled.
  */
-#ifdef CONFIG_UNEVICTABLE_LRU
 void putback_lru_page(struct page *page)
 {
        int lru;
@@ -557,20 +566,6 @@ redo:
        put_page(page);         /* drop ref from isolate */
 }
 
-#else /* CONFIG_UNEVICTABLE_LRU */
-
-void putback_lru_page(struct page *page)
-{
-       int lru;
-       VM_BUG_ON(PageLRU(page));
-
-       lru = !!TestClearPageActive(page) + page_is_file_cache(page);
-       lru_cache_add_lru(page, lru);
-       put_page(page);
-}
-#endif /* CONFIG_UNEVICTABLE_LRU */
-
-
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -582,6 +577,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
        struct pagevec freed_pvec;
        int pgactivate = 0;
        unsigned long nr_reclaimed = 0;
+       unsigned long vm_flags;
 
        cond_resched();
 
@@ -632,7 +628,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
                }
 
-               referenced = page_referenced(page, 1, sc->mem_cgroup);
+               referenced = page_referenced(page, 1,
+                                               sc->mem_cgroup, &vm_flags);
                /* In active use or really unfreeable?  Activate it. */
                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
                                        referenced && page_mapping_inuse(page))
@@ -721,7 +718,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * process address space (page_count == 1) it can be freed.
                 * Otherwise, leave the page on the LRU so it is swappable.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (!try_to_release_page(page, sc->gfp_mask))
                                goto activate_locked;
                        if (!mapping && page_count(page) == 1) {
@@ -1050,6 +1047,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        unsigned long nr_scanned = 0;
        unsigned long nr_reclaimed = 0;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       int lumpy_reclaim = 0;
+
+       /*
+        * If we need a large contiguous chunk of memory, or have
+        * trouble getting a small set of contiguous pages, we
+        * will reclaim both active and inactive pages.
+        *
+        * We use the same threshold as pageout congestion_wait below.
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               lumpy_reclaim = 1;
+       else if (sc->order && priority < DEF_PRIORITY - 2)
+               lumpy_reclaim = 1;
 
        pagevec_init(&pvec, 1);
 
@@ -1062,19 +1072,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_freed;
                unsigned long nr_active;
                unsigned int count[NR_LRU_LISTS] = { 0, };
-               int mode = ISOLATE_INACTIVE;
-
-               /*
-                * If we need a large contiguous chunk of memory, or have
-                * trouble getting a small set of contiguous pages, we
-                * will reclaim both active and inactive pages.
-                *
-                * We use the same threshold as pageout congestion_wait below.
-                */
-               if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
-                       mode = ISOLATE_BOTH;
-               else if (sc->order && priority < DEF_PRIORITY - 2)
-                       mode = ISOLATE_BOTH;
+               int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
 
                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
                             &page_list, &nr_scan, sc->order, mode,
@@ -1111,7 +1109,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 * but that should be acceptable to the caller
                 */
                if (nr_freed < nr_taken && !current_is_kswapd() &&
-                                       sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+                   lumpy_reclaim) {
                        congestion_wait(WRITE, HZ/10);
 
                        /*
@@ -1211,8 +1209,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        struct scan_control *sc, int priority, int file)
 {
        unsigned long pgmoved;
-       int pgdeactivate = 0;
        unsigned long pgscanned;
+       unsigned long vm_flags;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
        LIST_HEAD(l_inactive);
        struct page *page;
@@ -1240,7 +1238,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
        spin_unlock_irq(&zone->lru_lock);
 
-       pgmoved = 0;
+       pgmoved = 0;  /* count referenced (mapping) mapped pages */
        while (!list_empty(&l_hold)) {
                cond_resched();
                page = lru_to_page(&l_hold);
@@ -1253,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
 
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
-                   page_referenced(page, 0, sc->mem_cgroup))
+                   page_referenced(page, 0, sc->mem_cgroup, &vm_flags))
                        pgmoved++;
 
                list_add(&page->lru, &l_inactive);
@@ -1274,7 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         */
        reclaim_stat->recent_rotated[!!file] += pgmoved;
 
-       pgmoved = 0;
+       pgmoved = 0;  /* count pages moved to inactive list */
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1287,10 +1285,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                mem_cgroup_add_lru_list(page, lru);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
-                       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
                        spin_unlock_irq(&zone->lru_lock);
-                       pgdeactivate += pgmoved;
-                       pgmoved = 0;
                        if (buffer_heads_over_limit)
                                pagevec_strip(&pvec);
                        __pagevec_release(&pvec);
@@ -1298,9 +1293,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                }
        }
        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
-       pgdeactivate += pgmoved;
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
-       __count_vm_events(PGDEACTIVATE, pgdeactivate);
+       __count_vm_events(PGDEACTIVATE, pgmoved);
        spin_unlock_irq(&zone->lru_lock);
        if (buffer_heads_over_limit)
                pagevec_strip(&pvec);
@@ -1339,12 +1333,48 @@ static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
        return low;
 }
 
+static int inactive_file_is_low_global(struct zone *zone)
+{
+       unsigned long active, inactive;
+
+       active = zone_page_state(zone, NR_ACTIVE_FILE);
+       inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+
+       return (active > inactive);
+}
+
+/**
+ * inactive_file_is_low - check if file pages need to be deactivated
+ * @zone: zone to check
+ * @sc:   scan control of this context
+ *
+ * When the system is doing streaming IO, memory pressure here
+ * ensures that active file pages get deactivated, until more
+ * than half of the file pages are on the inactive list.
+ *
+ * Once we get to that situation, protect the system's working
+ * set from being evicted by disabling active file page aging.
+ *
+ * This uses a different ratio than the anonymous pages, because
+ * the page cache uses a use-once replacement algorithm.
+ */
+static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
+{
+       int low;
+
+       if (scanning_global_lru(sc))
+               low = inactive_file_is_low_global(zone);
+       else
+               low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
+       return low;
+}
+
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        struct zone *zone, struct scan_control *sc, int priority)
 {
        int file = is_file_lru(lru);
 
-       if (lru == LRU_ACTIVE_FILE) {
+       if (lru == LRU_ACTIVE_FILE && inactive_file_is_low(zone, sc)) {
                shrink_active_list(nr_to_scan, zone, sc, priority, file);
                return 0;
        }
@@ -1374,7 +1404,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (nr_swap_pages <= 0) {
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
                percent[0] = 0;
                percent[1] = 100;
                return;
@@ -1389,7 +1419,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
                   force-scan anon pages. */
-               if (unlikely(file + free <= zone->pages_high)) {
+               if (unlikely(file + free <= high_wmark_pages(zone))) {
                        percent[0] = 100;
                        percent[1] = 0;
                        return;
@@ -1444,6 +1474,26 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        percent[1] = 100 - percent[0];
 }
 
+/*
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
+ */
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+                                      unsigned long *nr_saved_scan,
+                                      unsigned long swap_cluster_max)
+{
+       unsigned long nr;
+
+       *nr_saved_scan += nr_to_scan;
+       nr = *nr_saved_scan;
+
+       if (nr >= swap_cluster_max)
+               *nr_saved_scan = 0;
+       else
+               nr = 0;
+
+       return nr;
+}
 
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
@@ -1462,21 +1512,18 @@ static void shrink_zone(int priority, struct zone *zone,
 
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
-               int scan;
+               unsigned long scan;
 
                scan = zone_nr_pages(zone, sc, l);
                if (priority) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
                }
-               if (scanning_global_lru(sc)) {
-                       zone->lru[l].nr_scan += scan;
-                       nr[l] = zone->lru[l].nr_scan;
-                       if (nr[l] >= swap_cluster_max)
-                               zone->lru[l].nr_scan = 0;
-                       else
-                               nr[l] = 0;
-               } else
+               if (scanning_global_lru(sc))
+                       nr[l] = nr_scan_try_batch(scan,
+                                                 &zone->lru[l].nr_saved_scan,
+                                                 swap_cluster_max);
+               else
                        nr[l] = scan;
        }
 
@@ -1510,7 +1557,7 @@ static void shrink_zone(int priority, struct zone *zone,
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (inactive_anon_is_low(zone, sc))
+       if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
        throttle_vm_writeout(sc->gfp_mask);
@@ -1521,11 +1568,13 @@ static void shrink_zone(int priority, struct zone *zone,
  * try to reclaim pages from zones which will satisfy the caller's allocation
  * request.
  *
- * We reclaim from a zone even if that zone is over pages_high.  Because:
+ * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
+ * Because:
  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  *    allocation or
- * b) The zones may be over pages_high but they must go *over* pages_high to
- *    satisfy the `incremental min' zone defense algorithm.
+ * b) The target zone may be at high_wmark_pages(zone) but the lower zones
+ *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
+ *    zone defense algorithm.
  *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
@@ -1538,7 +1587,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
        struct zone *zone;
 
        sc->all_unreclaimable = 1;
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+                                       sc->nodemask) {
                if (!populated_zone(zone))
                        continue;
                /*
@@ -1683,17 +1733,19 @@ out:
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                                               gfp_t gfp_mask)
+                               gfp_t gfp_mask, nodemask_t *nodemask)
 {
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .may_unmap = 1,
+               .may_swap = 1,
                .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .isolate_pages = isolate_pages_global,
+               .nodemask = nodemask,
        };
 
        return do_try_to_free_pages(zonelist, &sc);
@@ -1709,17 +1761,16 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
+               .may_swap = !noswap,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
+               .nodemask = NULL, /* we don't care the placement */
        };
        struct zonelist *zonelist;
 
-       if (noswap)
-               sc.may_unmap = 0;
-
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1729,7 +1780,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
- * they are all at pages_high.
+ * they are all at high_wmark_pages(zone).
  *
  * Returns the number of pages which were actually freed.
  *
@@ -1742,11 +1793,11 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
  * the zone for when the problem goes away.
  *
  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
- * zones which have free_pages > pages_high, but once a zone is found to have
- * free_pages <= pages_high, we scan that zone and the lower zones regardless
- * of the number of free pages in the lower zones.  This interoperates with
- * the page allocator fallback scheme to ensure that aging of pages is balanced
- * across the zones.
+ * zones which have free_pages > high_wmark_pages(zone), but once a zone is
+ * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
+ * lower zones regardless of the number of free pages in the lower zones. This
+ * interoperates with the page allocator fallback scheme to ensure that aging
+ * of pages is balanced across the zones.
  */
 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
 {
@@ -1758,6 +1809,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_unmap = 1,
+               .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
                .order = order,
@@ -1766,7 +1818,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        };
        /*
         * temp_priority is used to remember the scanning priority at which
-        * this zone was successfully refilled to free_pages == pages_high.
+        * this zone was successfully refilled to
+        * free_pages == high_wmark_pages(zone).
         */
        int temp_priority[MAX_NR_ZONES];
 
@@ -1811,8 +1864,8 @@ loop_again:
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              0, 0)) {
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
                        }
@@ -1846,8 +1899,8 @@ loop_again:
                                        priority != DEF_PRIORITY)
                                continue;
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              end_zone, 0))
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), end_zone, 0))
                                all_zones_ok = 0;
                        temp_priority[i] = priority;
                        sc.nr_scanned = 0;
@@ -1856,8 +1909,8 @@ loop_again:
                         * We put equal pressure on every zone, unless one
                         * zone has way too many pages free already.
                         */
-                       if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
-                                               end_zone, 0))
+                       if (!zone_watermark_ok(zone, order,
+                                       8*high_wmark_pages(zone), end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
@@ -1958,7 +2011,7 @@ static int kswapd(void *p)
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       node_to_cpumask_ptr(cpumask, pgdat->node_id);
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
        lockdep_set_current_reclaim_state(GFP_KERNEL);
 
@@ -2023,7 +2076,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
 
        pgdat = zone->zone_pgdat;
-       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
+       if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
@@ -2042,7 +2095,7 @@ unsigned long global_lru_pages(void)
                + global_page_state(NR_INACTIVE_FILE);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_HIBERNATION
 /*
  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
  * from LRU lists system-wide, for given pass and priority.
@@ -2070,22 +2123,22 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
                                                l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
-                       if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                       zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
+                       if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
                                unsigned long nr_to_scan;
 
-                               zone->lru[l].nr_scan = 0;
+                               zone->lru[l].nr_saved_scan = 0;
                                nr_to_scan = min(nr_pages, lru_pages);
                                nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
                                if (nr_reclaimed >= nr_pages) {
-                                       sc->nr_reclaimed = nr_reclaimed;
+                                       sc->nr_reclaimed += nr_reclaimed;
                                        return;
                                }
                        }
                }
        }
-       sc->nr_reclaimed = nr_reclaimed;
+       sc->nr_reclaimed += nr_reclaimed;
 }
 
 /*
@@ -2106,6 +2159,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                .may_unmap = 0,
                .may_writepage = 1,
                .isolate_pages = isolate_pages_global,
+               .nr_reclaimed = 0,
        };
 
        current->reclaim_state = &reclaim_state;
@@ -2181,7 +2235,7 @@ out:
 
        return sc.nr_reclaimed;
 }
-#endif
+#endif /* CONFIG_HIBERNATION */
 
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
@@ -2195,7 +2249,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
                for_each_node_state(nid, N_HIGH_MEMORY) {
                        pg_data_t *pgdat = NODE_DATA(nid);
-                       node_to_cpumask_ptr(mask, pgdat->node_id);
+                       const struct cpumask *mask;
+
+                       mask = cpumask_of_node(pgdat->node_id);
 
                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
@@ -2286,6 +2342,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
                .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_swap = 1,
                .swap_cluster_max = max_t(unsigned long, nr_pages,
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
@@ -2400,7 +2457,6 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 }
 #endif
 
-#ifdef CONFIG_UNEVICTABLE_LRU
 /*
  * page_evictable - test whether a page is evictable
  * @page: the page to test
@@ -2647,4 +2703,3 @@ void scan_unevictable_unregister_node(struct node *node)
        sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
 }
 
-#endif