Merge branch 'for-2.6.40/core' of git://git.kernel.dk/linux-2.6-block
[linux-2.6.git] / mm / vmscan.c
index c7f5a6d..7e01161 100644 (file)
@@ -41,6 +41,8 @@
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
+#include <linux/oom.h>
+#include <linux/prefetch.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -200,6 +202,14 @@ void unregister_shrinker(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
+static inline int do_shrinker_shrink(struct shrinker *shrinker,
+                                    struct shrink_control *sc,
+                                    unsigned long nr_to_scan)
+{
+       sc->nr_to_scan = nr_to_scan;
+       return (*shrinker->shrink)(shrinker, sc);
+}
+
 #define SHRINK_BATCH 128
 /*
  * Call the shrink functions to age shrinkable caches
@@ -220,25 +230,29 @@ EXPORT_SYMBOL(unregister_shrinker);
  *
  * Returns the number of slab objects which we shrunk.
  */
-unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
-                       unsigned long lru_pages)
+unsigned long shrink_slab(struct shrink_control *shrink,
+                         unsigned long nr_pages_scanned,
+                         unsigned long lru_pages)
 {
        struct shrinker *shrinker;
        unsigned long ret = 0;
 
-       if (scanned == 0)
-               scanned = SWAP_CLUSTER_MAX;
+       if (nr_pages_scanned == 0)
+               nr_pages_scanned = SWAP_CLUSTER_MAX;
 
-       if (!down_read_trylock(&shrinker_rwsem))
-               return 1;       /* Assume we'll be able to shrink next time */
+       if (!down_read_trylock(&shrinker_rwsem)) {
+               /* Assume we'll be able to shrink next time */
+               ret = 1;
+               goto out;
+       }
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
                unsigned long max_pass;
 
-               max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask);
-               delta = (4 * scanned) / shrinker->seeks;
+               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
@@ -265,9 +279,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        int shrink_ret;
                        int nr_before;
 
-                       nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask);
-                       shrink_ret = (*shrinker->shrink)(shrinker, this_scan,
-                                                               gfp_mask);
+                       nr_before = do_shrinker_shrink(shrinker, shrink, 0);
+                       shrink_ret = do_shrinker_shrink(shrinker, shrink,
+                                                       this_scan);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
@@ -281,6 +295,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                shrinker->nr += total_scan;
        }
        up_read(&shrinker_rwsem);
+out:
+       cond_resched();
        return ret;
 }
 
@@ -936,7 +952,7 @@ keep_lumpy:
         * back off and wait for congestion to clear because further reclaim
         * will encounter the same problem
         */
-       if (nr_dirty == nr_congested && nr_dirty != 0)
+       if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
                zone_set_flag(zone, ZONE_CONGESTED);
 
        free_page_list(&free_pages);
@@ -1200,13 +1216,16 @@ int isolate_lru_page(struct page *page)
 {
        int ret = -EBUSY;
 
+       VM_BUG_ON(!page_count(page));
+
        if (PageLRU(page)) {
                struct zone *zone = page_zone(page);
 
                spin_lock_irq(&zone->lru_lock);
-               if (PageLRU(page) && get_page_unless_zero(page)) {
+               if (PageLRU(page)) {
                        int lru = page_lru(page);
                        ret = 0;
+                       get_page(page);
                        ClearPageLRU(page);
 
                        del_page_from_lru_list(zone, page, lru);
@@ -1988,17 +2007,12 @@ static bool zone_reclaimable(struct zone *zone)
        return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
 }
 
-/*
- * As hibernation is going on, kswapd is freezed so that it can't mark
- * the zone into all_unreclaimable. It can't handle OOM during hibernation.
- * So let's check zone's unreclaimable in direct reclaim as well as kswapd.
- */
+/* All zones in zonelist are unreclaimable? */
 static bool all_unreclaimable(struct zonelist *zonelist,
                struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
-       bool all_unreclaimable = true;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2006,13 +2020,11 @@ static bool all_unreclaimable(struct zonelist *zonelist,
                        continue;
                if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
-               if (zone_reclaimable(zone)) {
-                       all_unreclaimable = false;
-                       break;
-               }
+               if (!zone->all_unreclaimable)
+                       return false;
        }
 
-       return all_unreclaimable;
+       return true;
 }
 
 /*
@@ -2032,7 +2044,8 @@ static bool all_unreclaimable(struct zonelist *zonelist,
  *             else, the number of pages reclaimed
  */
 static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
-                                       struct scan_control *sc)
+                                       struct scan_control *sc,
+                                       struct shrink_control *shrink)
 {
        int priority;
        unsigned long total_scanned = 0;
@@ -2066,7 +2079,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                                lru_pages += zone_reclaimable_pages(zone);
                        }
 
-                       shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
+                       shrink_slab(shrink, sc->nr_scanned, lru_pages);
                        if (reclaim_state) {
                                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
                                reclaim_state->reclaimed_slab = 0;
@@ -2108,6 +2121,14 @@ out:
        if (sc->nr_reclaimed)
                return sc->nr_reclaimed;
 
+       /*
+        * As hibernation is going on, kswapd is freezed so that it can't mark
+        * the zone into all_unreclaimable. Thus bypassing all_unreclaimable
+        * check.
+        */
+       if (oom_killer_disabled)
+               return 0;
+
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
                return 1;
@@ -2130,12 +2151,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .mem_cgroup = NULL,
                .nodemask = nodemask,
        };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
 
        trace_mm_vmscan_direct_reclaim_begin(order,
                                sc.may_writepage,
                                gfp_mask);
 
-       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
 
        trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
 
@@ -2195,17 +2219,20 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .order = 0,
                .mem_cgroup = mem_cont,
                .nodemask = NULL, /* we don't care the placement */
+               .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                               (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
+       };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
        };
 
-       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
-                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
 
        trace_mm_vmscan_memcg_reclaim_begin(0,
                                            sc.may_writepage,
                                            sc.gfp_mask);
 
-       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
@@ -2284,7 +2311,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
         * must be balanced
         */
        if (order)
-               return pgdat_balanced(pgdat, balanced, classzone_idx);
+               return !pgdat_balanced(pgdat, balanced, classzone_idx);
        else
                return !all_zones_ok;
 }
@@ -2333,6 +2360,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                .order = order,
                .mem_cgroup = NULL,
        };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
 loop_again:
        total_scanned = 0;
        sc.nr_reclaimed = 0;
@@ -2432,8 +2462,7 @@ loop_again:
                                        end_zone, 0))
                                shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
-                       nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
-                                               lru_pages);
+                       nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
 
@@ -2785,7 +2814,10 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
                .swappiness = vm_swappiness,
                .order = 0,
        };
-       struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
+       struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
        struct task_struct *p = current;
        unsigned long nr_reclaimed;
 
@@ -2794,7 +2826,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
 
        p->reclaim_state = NULL;
        lockdep_clear_current_reclaim_state();
@@ -2969,6 +3001,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .swappiness = vm_swappiness,
                .order = order,
        };
+       struct shrink_control shrink = {
+               .gfp_mask = sc.gfp_mask,
+       };
        unsigned long nr_slab_pages0, nr_slab_pages1;
 
        cond_resched();
@@ -3010,7 +3045,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                        unsigned long lru_pages = zone_reclaimable_pages(zone);
 
                        /* No reclaimable slab or very low memory pressure */
-                       if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages))
+                       if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
                                break;
 
                        /* Freed enough memory */