vmscan: shrink_active_list(): reduce lru_lock hold time
Andrew Morton [Tue, 6 Jan 2009 22:40:13 +0000 (14:40 -0800)]
These three statements manipulate local variables and do not need the lock
coverage.

Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Rik van Riel <riel@redhat.com
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

mm/vmscan.c

index 466a36b..5daf606 100644 (file)
@@ -1237,6 +1237,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                list_add(&page->lru, &l_inactive);
        }
 
+       /*
+        * Move the pages to the [file or anon] inactive list.
+        */
+       pagevec_init(&pvec, 1);
+       pgmoved = 0;
+       lru = LRU_BASE + file * LRU_FILE;
+
        spin_lock_irq(&zone->lru_lock);
        /*
         * Count referenced pages from currently used mappings as
@@ -1247,13 +1254,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        if (scan_global_lru(sc))
                zone->recent_rotated[!!file] += pgmoved;
 
-       /*
-        * Move the pages to the [file or anon] inactive list.
-        */
-       pagevec_init(&pvec, 1);
-
-       pgmoved = 0;
-       lru = LRU_BASE + file * LRU_FILE;
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);