Merge remote branch 'origin/android-tegra-nv-3.4' into tot
[linux-2.6.git] / mm / vmscan.c
index ae3bf0a..0932dc2 100644 (file)
@@ -722,7 +722,7 @@ static enum page_references page_check_references(struct page *page,
                return PAGEREF_RECLAIM;
 
        if (referenced_ptes) {
-               if (PageAnon(page))
+               if (PageSwapBacked(page))
                        return PAGEREF_ACTIVATE;
                /*
                 * All mapped pages start out with page table
@@ -1413,7 +1413,6 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
                       unsigned long *nr_anon,
                       unsigned long *nr_file)
 {
-       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
        struct zone *zone = mz->zone;
        unsigned int count[NR_LRU_LISTS] = { 0, };
        unsigned long nr_active = 0;
@@ -1434,6 +1433,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
                count[lru] += numpages;
        }
 
+       preempt_disable();
        __count_vm_events(PGDEACTIVATE, nr_active);
 
        __mod_zone_page_state(zone, NR_ACTIVE_FILE,
@@ -1448,8 +1448,9 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
        *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
        *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
 
-       reclaim_stat->recent_scanned[0] += *nr_anon;
-       reclaim_stat->recent_scanned[1] += *nr_file;
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
+       __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
+       preempt_enable();
 }
 
 /*
@@ -1511,6 +1512,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
        unsigned long nr_writeback = 0;
        isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
        struct zone *zone = mz->zone;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
 
        while (unlikely(too_many_isolated(zone, file, sc))) {
                congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1544,19 +1546,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
                        __count_zone_vm_events(PGSCAN_DIRECT, zone,
                                               nr_scanned);
        }
+       spin_unlock_irq(&zone->lru_lock);
 
-       if (nr_taken == 0) {
-               spin_unlock_irq(&zone->lru_lock);
+       if (nr_taken == 0)
                return 0;
-       }
 
        update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
 
-       __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
-       __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
-
-       spin_unlock_irq(&zone->lru_lock);
-
        nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
                                                &nr_dirty, &nr_writeback);
 
@@ -1569,9 +1565,17 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
 
        spin_lock_irq(&zone->lru_lock);
 
-       if (current_is_kswapd())
-               __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
-       __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
+       reclaim_stat->recent_scanned[0] += nr_anon;
+       reclaim_stat->recent_scanned[1] += nr_file;
+
+       if (global_reclaim(sc)) {
+               if (current_is_kswapd())
+                       __count_zone_vm_events(PGSTEAL_KSWAPD, zone,
+                                              nr_reclaimed);
+               else
+                       __count_zone_vm_events(PGSTEAL_DIRECT, zone,
+                                              nr_reclaimed);
+       }
 
        putback_inactive_pages(mz, &page_list);
 
@@ -1691,6 +1695,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
 
        lru_add_drain();
 
+       reset_reclaim_mode(sc);
+
        if (!sc->may_unmap)
                isolate_mode |= ISOLATE_UNMAPPED;
        if (!sc->may_writepage)
@@ -2106,12 +2112,7 @@ restart:
                 * with multiple processes reclaiming pages, the total
                 * freeing target can get unreasonably large.
                 */
-               if (nr_reclaimed >= nr_to_reclaim)
-                       nr_to_reclaim = 0;
-               else
-                       nr_to_reclaim -= nr_reclaimed;
-
-               if (!nr_to_reclaim && priority < DEF_PRIORITY)
+               if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
                        break;
        }
        blk_finish_plug(&plug);
@@ -2262,8 +2263,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
                                 * Even though compaction is invoked for any
                                 * non-zero order, only frequent costly order
                                 * reclamation is disruptive enough to become a
-                                * noticable problem, like transparent huge page
-                                * allocations.
+                                * noticeable problem, like transparent huge
+                                * page allocations.
                                 */
                                if (compaction_ready(zone, sc)) {
                                        aborted_reclaim = true;
@@ -2344,7 +2345,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        unsigned long writeback_threshold;
        bool aborted_reclaim;
 
-       get_mems_allowed();
        delayacct_freepages_start();
 
        if (global_reclaim(sc))
@@ -2408,7 +2408,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
 out:
        delayacct_freepages_end();
-       put_mems_allowed();
 
        if (sc->nr_reclaimed)
                return sc->nr_reclaimed;
@@ -2818,7 +2817,7 @@ loop_again:
                                testorder = 0;
 
                        if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
-                                   !zone_watermark_ok_safe(zone, order,
+                                   !zone_watermark_ok_safe(zone, testorder,
                                        high_wmark_pages(zone) + balance_gap,
                                        end_zone, 0)) {
                                shrink_zone(priority, zone, &sc);
@@ -2947,7 +2946,8 @@ out:
                                continue;
 
                        /* Would compaction fail due to lack of free memory? */
-                       if (compaction_suitable(zone, order) == COMPACT_SKIPPED)
+                       if (COMPACTION_BUILD &&
+                           compaction_suitable(zone, order) == COMPACT_SKIPPED)
                                goto loop_again;
 
                        /* Confirm the zone is balanced for order-0 */