media: video: tegra: sh532u: fix out-of-bounds read
[linux-2.6.git] / mm / vmscan.c
index 2e8fbac..b55699c 100644 (file)
@@ -95,8 +95,6 @@ struct scan_control {
        /* Can pages be swapped as part of reclaim? */
        int may_swap;
 
-       int swappiness;
-
        int order;
 
        /*
@@ -173,7 +171,8 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
                                struct scan_control *sc, enum lru_list lru)
 {
        if (!scanning_global_lru(sc))
-               return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+               return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup,
+                               zone_to_nid(zone), zone_idx(zone), BIT(lru));
 
        return zone_page_state(zone, NR_LRU_BASE + lru);
 }
@@ -250,49 +249,90 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                unsigned long long delta;
                unsigned long total_scan;
                unsigned long max_pass;
+               int shrink_ret = 0;
+               long nr;
+               long new_nr;
+               long batch_size = shrinker->batch ? shrinker->batch
+                                                 : SHRINK_BATCH;
+
+               /*
+                * copy the current shrinker scan count into a local variable
+                * and zero it so that other concurrent shrinker invocations
+                * don't also do this scanning work.
+                */
+               do {
+                       nr = shrinker->nr;
+               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
 
+               total_scan = nr;
                max_pass = do_shrinker_shrink(shrinker, shrink, 0);
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
-               shrinker->nr += delta;
-               if (shrinker->nr < 0) {
+               total_scan += delta;
+               if (total_scan < 0) {
                        printk(KERN_ERR "shrink_slab: %pF negative objects to "
                               "delete nr=%ld\n",
-                              shrinker->shrink, shrinker->nr);
-                       shrinker->nr = max_pass;
+                              shrinker->shrink, total_scan);
+                       total_scan = max_pass;
                }
 
                /*
+                * We need to avoid excessive windup on filesystem shrinkers
+                * due to large numbers of GFP_NOFS allocations causing the
+                * shrinkers to return -1 all the time. This results in a large
+                * nr being built up so when a shrink that can do some work
+                * comes along it empties the entire cache due to nr >>>
+                * max_pass.  This is bad for sustaining a working set in
+                * memory.
+                *
+                * Hence only allow the shrinker to scan the entire cache when
+                * a large delta change is calculated directly.
+                */
+               if (delta < max_pass / 4)
+                       total_scan = min(total_scan, max_pass / 2);
+
+               /*
                 * Avoid risking looping forever due to too large nr value:
                 * never try to free more than twice the estimate number of
                 * freeable entries.
                 */
-               if (shrinker->nr > max_pass * 2)
-                       shrinker->nr = max_pass * 2;
+               if (total_scan > max_pass * 2)
+                       total_scan = max_pass * 2;
 
-               total_scan = shrinker->nr;
-               shrinker->nr = 0;
+               trace_mm_shrink_slab_start(shrinker, shrink, nr,
+                                       nr_pages_scanned, lru_pages,
+                                       max_pass, delta, total_scan);
 
-               while (total_scan >= SHRINK_BATCH) {
-                       long this_scan = SHRINK_BATCH;
-                       int shrink_ret;
+               while (total_scan >= batch_size) {
                        int nr_before;
 
                        nr_before = do_shrinker_shrink(shrinker, shrink, 0);
                        shrink_ret = do_shrinker_shrink(shrinker, shrink,
-                                                       this_scan);
+                                                       batch_size);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
                                ret += nr_before - shrink_ret;
-                       count_vm_events(SLABS_SCANNED, this_scan);
-                       total_scan -= this_scan;
+                       count_vm_events(SLABS_SCANNED, batch_size);
+                       total_scan -= batch_size;
 
                        cond_resched();
                }
 
-               shrinker->nr += total_scan;
+               /*
+                * move the unused scan count back into the shrinker in a
+                * manner that handles concurrent updates. If we exhausted the
+                * scan, there is no need to do an update.
+                */
+               do {
+                       nr = shrinker->nr;
+                       new_nr = total_scan + nr;
+                       if (total_scan <= 0)
+                               break;
+               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+
+               trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }
        up_read(&shrinker_rwsem);
 out:
@@ -1124,8 +1164,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                                        nr_lumpy_dirty++;
                                scan++;
                        } else {
-                               /* the page is freed already. */
-                               if (!page_count(cursor_page))
+                               /*
+                                * Check if the page is freed already.
+                                *
+                                * We can't use page_count() as that
+                                * requires compound_head and we don't
+                                * have a pin on the page here. If a
+                                * page is tail, we may or may not
+                                * have isolated the head, so assume
+                                * it's not free, it'd be tricky to
+                                * track the head status without a
+                                * page pin.
+                                */
+                               if (!PageTail(cursor_page) &&
+                                   !atomic_read(&cursor_page->_count))
                                        continue;
                                break;
                        }
@@ -1717,6 +1769,13 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
 }
 
+static int vmscan_swappiness(struct scan_control *sc)
+{
+       if (scanning_global_lru(sc))
+               return vm_swappiness;
+       return mem_cgroup_swappiness(sc->mem_cgroup);
+}
+
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -1735,22 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        u64 fraction[2], denominator;
        enum lru_list l;
        int noswap = 0;
-       int force_scan = 0;
-
-
-       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+       bool force_scan = false;
+       unsigned long nr_force_scan[2];
 
-       if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
-               /* kswapd does zone balancing and need to scan this zone */
-               if (scanning_global_lru(sc) && current_is_kswapd())
-                       force_scan = 1;
-               /* memcg may have small limit and need to avoid priority drop */
-               if (!scanning_global_lru(sc))
-                       force_scan = 1;
-       }
+       /* kswapd does zone balancing and needs to scan this zone */
+       if (scanning_global_lru(sc) && current_is_kswapd())
+               force_scan = true;
+       /* memcg may have small limit and need to avoid priority drop */
+       if (!scanning_global_lru(sc))
+               force_scan = true;
 
        /* If we have no swap space, do not bother scanning anon pages. */
        if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1758,9 +1810,16 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                fraction[0] = 0;
                fraction[1] = 1;
                denominator = 1;
+               nr_force_scan[0] = 0;
+               nr_force_scan[1] = SWAP_CLUSTER_MAX;
                goto out;
        }
 
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
        if (scanning_global_lru(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
@@ -1769,6 +1828,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                        fraction[0] = 1;
                        fraction[1] = 0;
                        denominator = 1;
+                       nr_force_scan[0] = SWAP_CLUSTER_MAX;
+                       nr_force_scan[1] = 0;
                        goto out;
                }
        }
@@ -1777,8 +1838,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
         * With swappiness at 100, anonymous and file have the same priority.
         * This scanning priority is essentially the inverse of IO cost.
         */
-       anon_prio = sc->swappiness;
-       file_prio = 200 - sc->swappiness;
+       anon_prio = vmscan_swappiness(sc);
+       file_prio = 200 - vmscan_swappiness(sc);
 
        /*
         * OK, so we have swap space and a fair amount of page cache
@@ -1817,6 +1878,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        fraction[0] = ap;
        fraction[1] = fp;
        denominator = ap + fp + 1;
+       if (force_scan) {
+               unsigned long scan = SWAP_CLUSTER_MAX;
+               nr_force_scan[0] = div64_u64(scan * ap, denominator);
+               nr_force_scan[1] = div64_u64(scan * fp, denominator);
+       }
 out:
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
@@ -1837,12 +1903,8 @@ out:
                 * memcg, priority drop can cause big latency. So, it's better
                 * to scan small amount. See may_noscan above.
                 */
-               if (!scan && force_scan) {
-                       if (file)
-                               scan = SWAP_CLUSTER_MAX;
-                       else if (!noswap)
-                               scan = SWAP_CLUSTER_MAX;
-               }
+               if (!scan && force_scan)
+                       scan = nr_force_scan[file];
                nr[l] = scan;
        }
 }
@@ -1983,14 +2045,13 @@ restart:
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
        unsigned long nr_soft_reclaimed;
        unsigned long nr_soft_scanned;
-       unsigned long total_scanned = 0;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2005,19 +2066,23 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                continue;
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
+                       /*
+                        * This steals pages from memory cgroups over softlimit
+                        * and returns the number of reclaimed pages and
+                        * scanned pages. This works for global memory pressure
+                        * and balancing, not for a memcg's limit.
+                        */
+                       nr_soft_scanned = 0;
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                               sc->order, sc->gfp_mask,
+                                               &nr_soft_scanned);
+                       sc->nr_reclaimed += nr_soft_reclaimed;
+                       sc->nr_scanned += nr_soft_scanned;
+                       /* need some check for avoid more shrink_zone() */
                }
 
-               nr_soft_scanned = 0;
-               nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
-                                                       sc->order, sc->gfp_mask,
-                                                       &nr_soft_scanned);
-               sc->nr_reclaimed += nr_soft_reclaimed;
-               total_scanned += nr_soft_scanned;
-
                shrink_zone(priority, zone, sc);
        }
-
-       return total_scanned;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2081,8 +2146,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                sc->nr_scanned = 0;
                if (!priority)
-                       disable_swap_token();
-               total_scanned += shrink_zones(priority, zonelist, sc);
+                       disable_swap_token(sc->mem_cgroup);
+               shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -2164,7 +2229,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_unmap = 1,
                .may_swap = 1,
-               .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .nodemask = nodemask,
@@ -2188,7 +2252,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
-                                               unsigned int swappiness,
                                                struct zone *zone,
                                                unsigned long *nr_scanned)
 {
@@ -2198,7 +2261,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
                .may_swap = !noswap,
-               .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem,
        };
@@ -2227,8 +2289,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                           gfp_t gfp_mask,
-                                          bool noswap,
-                                          unsigned int swappiness)
+                                          bool noswap)
 {
        struct zonelist *zonelist;
        unsigned long nr_reclaimed;
@@ -2238,7 +2299,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .may_unmap = 1,
                .may_swap = !noswap,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
-               .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
                .nodemask = NULL, /* we don't care the placement */
@@ -2295,7 +2355,8 @@ static bool pgdat_balanced(pg_data_t *pgdat, unsigned long balanced_pages,
        for (i = 0; i <= classzone_idx; i++)
                present_pages += pgdat->node_zones[i].present_pages;
 
-       return balanced_pages > (present_pages >> 2);
+       /* A special case here: if zone has no page, we think it's balanced */
+       return balanced_pages >= (present_pages >> 2);
 }
 
 /* is kswapd sleeping prematurely? */
@@ -2311,7 +2372,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
                return true;
 
        /* Check the watermark levels */
-       for (i = 0; i < pgdat->nr_zones; i++) {
+       for (i = 0; i <= classzone_idx; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
                if (!populated_zone(zone))
@@ -2329,7 +2390,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
                }
 
                if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
-                                                       classzone_idx, 0))
+                                                       i, 0))
                        all_zones_ok = false;
                else
                        balanced += zone->present_pages;
@@ -2388,7 +2449,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
                 * we want to put equal scanning pressure on each zone.
                 */
                .nr_to_reclaim = ULONG_MAX,
-               .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
        };
@@ -2407,7 +2467,7 @@ loop_again:
 
                /* The swap token gets in the way of swapout... */
                if (!priority)
-                       disable_swap_token();
+                       disable_swap_token(NULL);
 
                all_zones_ok = 1;
                balanced = 0;
@@ -2436,8 +2496,10 @@ loop_again:
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
-                               *classzone_idx = i;
                                break;
+                       } else {
+                               /* If balanced, clear the congested flag */
+                               zone_clear_flag(zone, ZONE_CONGESTED);
                        }
                }
                if (i < 0)
@@ -2495,18 +2557,18 @@ loop_again:
                                KSWAPD_ZONE_BALANCE_GAP_RATIO);
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone) + balance_gap,
-                                       end_zone, 0))
+                                       end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
-                       reclaim_state->reclaimed_slab = 0;
-                       nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
-                       sc.nr_reclaimed += reclaim_state->reclaimed_slab;
-                       total_scanned += sc.nr_scanned;
 
-                       if (zone->all_unreclaimable)
-                               continue;
-                       if (nr_slab == 0 &&
-                           !zone_reclaimable(zone))
-                               zone->all_unreclaimable = 1;
+                               reclaim_state->reclaimed_slab = 0;
+                               nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
+                               sc.nr_reclaimed += reclaim_state->reclaimed_slab;
+                               total_scanned += sc.nr_scanned;
+
+                               if (nr_slab == 0 && !zone_reclaimable(zone))
+                                       zone->all_unreclaimable = 1;
+                       }
+
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
@@ -2516,6 +2578,12 @@ loop_again:
                            total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
 
+                       if (zone->all_unreclaimable) {
+                               if (end_zone && end_zone == i)
+                                       end_zone--;
+                               continue;
+                       }
+
                        if (!zone_watermark_ok_safe(zone, order,
                                        high_wmark_pages(zone), end_zone, 0)) {
                                all_zones_ok = 0;
@@ -2694,8 +2762,8 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
  */
 static int kswapd(void *p)
 {
-       unsigned long order;
-       int classzone_idx;
+       unsigned long order, new_order;
+       int classzone_idx, new_classzone_idx;
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
 
@@ -2725,17 +2793,23 @@ static int kswapd(void *p)
        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
        set_freezable();
 
-       order = 0;
-       classzone_idx = MAX_NR_ZONES - 1;
+       order = new_order = 0;
+       classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
        for ( ; ; ) {
-               unsigned long new_order;
-               int new_classzone_idx;
                int ret;
 
-               new_order = pgdat->kswapd_max_order;
-               new_classzone_idx = pgdat->classzone_idx;
-               pgdat->kswapd_max_order = 0;
-               pgdat->classzone_idx = MAX_NR_ZONES - 1;
+               /*
+                * If the last balance_pgdat was unsuccessful it's unlikely a
+                * new request of a similar or harder type will succeed soon
+                * so consider going to sleep on the basis we reclaimed at
+                */
+               if (classzone_idx >= new_classzone_idx && order == new_order) {
+                       new_order = pgdat->kswapd_max_order;
+                       new_classzone_idx = pgdat->classzone_idx;
+                       pgdat->kswapd_max_order =  0;
+                       pgdat->classzone_idx = pgdat->nr_zones - 1;
+               }
+
                if (order < new_order || classzone_idx > new_classzone_idx) {
                        /*
                         * Don't sleep if someone wants a larger 'order'
@@ -2748,7 +2822,7 @@ static int kswapd(void *p)
                        order = pgdat->kswapd_max_order;
                        classzone_idx = pgdat->classzone_idx;
                        pgdat->kswapd_max_order = 0;
-                       pgdat->classzone_idx = MAX_NR_ZONES - 1;
+                       pgdat->classzone_idx = pgdat->nr_zones - 1;
                }
 
                ret = try_to_freeze();
@@ -2847,7 +2921,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
                .may_writepage = 1,
                .nr_to_reclaim = nr_to_reclaim,
                .hibernation_mode = 1,
-               .swappiness = vm_swappiness,
                .order = 0,
        };
        struct shrink_control shrink = {
@@ -3034,7 +3107,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .nr_to_reclaim = max_t(unsigned long, nr_pages,
                                       SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
-               .swappiness = vm_swappiness,
                .order = order,
        };
        struct shrink_control shrink = {