mm: page_alloc: remove trailing whitespace
[linux-2.6.git] / mm / swap.c
index 3a442f1..5c13f13 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -21,9 +21,8 @@
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
 #include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/mm_inline.h>
-#include <linux/buffer_head.h> /* for try_to_release_page() */
 #include <linux/percpu_counter.h>
 #include <linux/percpu.h>
 #include <linux/cpu.h>
@@ -54,7 +53,7 @@ static void __page_cache_release(struct page *page)
                spin_lock_irqsave(&zone->lru_lock, flags);
                VM_BUG_ON(!PageLRU(page));
                __ClearPageLRU(page);
-               del_page_from_lru(zone, page);
+               del_page_from_lru_list(zone, page, page_off_lru(page));
                spin_unlock_irqrestore(&zone->lru_lock, flags);
        }
 }
@@ -78,39 +77,22 @@ static void put_compound_page(struct page *page)
 {
        if (unlikely(PageTail(page))) {
                /* __split_huge_page_refcount can run under us */
-               struct page *page_head = page->first_page;
-               smp_rmb();
-               /*
-                * If PageTail is still set after smp_rmb() we can be sure
-                * that the page->first_page we read wasn't a dangling pointer.
-                * See __split_huge_page_refcount() smp_wmb().
-                */
-               if (likely(PageTail(page) && get_page_unless_zero(page_head))) {
+               struct page *page_head = compound_trans_head(page);
+
+               if (likely(page != page_head &&
+                          get_page_unless_zero(page_head))) {
                        unsigned long flags;
                        /*
-                        * Verify that our page_head wasn't converted
-                        * to a a regular page before we got a
-                        * reference on it.
+                        * page_head wasn't a dangling pointer but it
+                        * may not be a head page anymore by the time
+                        * we obtain the lock. That is ok as long as it
+                        * can't be freed from under us.
                         */
-                       if (unlikely(!PageHead(page_head))) {
-                               /* PageHead is cleared after PageTail */
-                               smp_rmb();
-                               VM_BUG_ON(PageTail(page));
-                               goto out_put_head;
-                       }
-                       /*
-                        * Only run compound_lock on a valid PageHead,
-                        * after having it pinned with
-                        * get_page_unless_zero() above.
-                        */
-                       smp_mb();
-                       /* page_head wasn't a dangling pointer */
                        flags = compound_lock_irqsave(page_head);
                        if (unlikely(!PageTail(page))) {
                                /* __split_huge_page_refcount run before us */
                                compound_unlock_irqrestore(page_head, flags);
                                VM_BUG_ON(PageHead(page_head));
-                       out_put_head:
                                if (put_page_testzero(page_head))
                                        __put_single_page(page_head);
                        out_put_single:
@@ -121,16 +103,17 @@ static void put_compound_page(struct page *page)
                        VM_BUG_ON(page_head != page->first_page);
                        /*
                         * We can release the refcount taken by
-                        * get_page_unless_zero now that
-                        * split_huge_page_refcount is blocked on the
-                        * compound_lock.
+                        * get_page_unless_zero() now that
+                        * __split_huge_page_refcount() is blocked on
+                        * the compound_lock.
                         */
                        if (put_page_testzero(page_head))
                                VM_BUG_ON(1);
                        /* __split_huge_page_refcount will wait now */
-                       VM_BUG_ON(atomic_read(&page->_count) <= 0);
-                       atomic_dec(&page->_count);
+                       VM_BUG_ON(page_mapcount(page) <= 0);
+                       atomic_dec(&page->_mapcount);
                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
+                       VM_BUG_ON(atomic_read(&page->_count) != 0);
                        compound_unlock_irqrestore(page_head, flags);
                        if (put_page_testzero(page_head)) {
                                if (PageHead(page_head))
@@ -160,6 +143,45 @@ void put_page(struct page *page)
 }
 EXPORT_SYMBOL(put_page);
 
+/*
+ * This function is exported but must not be called by anything other
+ * than get_page(). It implements the slow path of get_page().
+ */
+bool __get_page_tail(struct page *page)
+{
+       /*
+        * This takes care of get_page() if run on a tail page
+        * returned by one of the get_user_pages/follow_page variants.
+        * get_user_pages/follow_page itself doesn't need the compound
+        * lock because it runs __get_page_tail_foll() under the
+        * proper PT lock that already serializes against
+        * split_huge_page().
+        */
+       unsigned long flags;
+       bool got = false;
+       struct page *page_head = compound_trans_head(page);
+
+       if (likely(page != page_head && get_page_unless_zero(page_head))) {
+               /*
+                * page_head wasn't a dangling pointer but it
+                * may not be a head page anymore by the time
+                * we obtain the lock. That is ok as long as it
+                * can't be freed from under us.
+                */
+               flags = compound_lock_irqsave(page_head);
+               /* here __split_huge_page_refcount won't run anymore */
+               if (likely(PageTail(page))) {
+                       __get_page_tail_foll(page, false);
+                       got = true;
+               }
+               compound_unlock_irqrestore(page_head, flags);
+               if (unlikely(!got))
+                       put_page(page_head);
+       }
+       return got;
+}
+EXPORT_SYMBOL(__get_page_tail);
+
 /**
  * put_pages_list() - release a list of pages
  * @pages: list of pages threaded on page->lru
@@ -209,12 +231,14 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
 static void pagevec_move_tail_fn(struct page *page, void *arg)
 {
        int *pgmoved = arg;
-       struct zone *zone = page_zone(page);
 
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                enum lru_list lru = page_lru_base_type(page);
-               list_move_tail(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_rotate_reclaimable_page(page);
+               struct lruvec *lruvec;
+
+               lruvec = mem_cgroup_lru_move_lists(page_zone(page),
+                                                  page, lru, lru);
+               list_move_tail(&page->lru, &lruvec->lists[lru]);
                (*pgmoved)++;
        }
 }
@@ -345,7 +369,6 @@ void mark_page_accessed(struct page *page)
                SetPageReferenced(page);
        }
 }
-
 EXPORT_SYMBOL(mark_page_accessed);
 
 void __lru_cache_add(struct page *page, enum lru_list lru)
@@ -354,7 +377,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
 
        page_cache_get(page);
        if (!pagevec_add(pvec, page))
-               ____pagevec_lru_add(pvec, lru);
+               __pagevec_lru_add(pvec, lru);
        put_cpu_var(lru_add_pvecs);
 }
 EXPORT_SYMBOL(__lru_cache_add);
@@ -453,12 +476,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
                 */
                SetPageReclaim(page);
        } else {
+               struct lruvec *lruvec;
                /*
                 * The page's writeback ends up during pagevec
                 * We moves tha page into tail of inactive.
                 */
-               list_move_tail(&page->lru, &zone->lru[lru].list);
-               mem_cgroup_rotate_reclaimable_page(page);
+               lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
+               list_move_tail(&page->lru, &lruvec->lists[lru]);
                __count_vm_event(PGROTATED);
        }
 
@@ -472,7 +496,7 @@ static void lru_deactivate_fn(struct page *page, void *arg)
  * Either "cpu" is the current CPU, and preemption has already been
  * disabled; or "cpu" is being hot-unplugged, and is already dead.
  */
-static void drain_cpu_pagevecs(int cpu)
+void lru_add_drain_cpu(int cpu)
 {
        struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
        struct pagevec *pvec;
@@ -481,7 +505,7 @@ static void drain_cpu_pagevecs(int cpu)
        for_each_lru(lru) {
                pvec = &pvecs[lru - LRU_BASE];
                if (pagevec_count(pvec))
-                       ____pagevec_lru_add(pvec, lru);
+                       __pagevec_lru_add(pvec, lru);
        }
 
        pvec = &per_cpu(lru_rotate_pvecs, cpu);
@@ -529,7 +553,7 @@ void deactivate_page(struct page *page)
 
 void lru_add_drain(void)
 {
-       drain_cpu_pagevecs(get_cpu());
+       lru_add_drain_cpu(get_cpu());
        put_cpu();
 }
 
@@ -562,11 +586,10 @@ int lru_add_drain_all(void)
 void release_pages(struct page **pages, int nr, int cold)
 {
        int i;
-       struct pagevec pages_to_free;
+       LIST_HEAD(pages_to_free);
        struct zone *zone = NULL;
        unsigned long uninitialized_var(flags);
 
-       pagevec_init(&pages_to_free, cold);
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];
 
@@ -594,22 +617,15 @@ void release_pages(struct page **pages, int nr, int cold)
                        }
                        VM_BUG_ON(!PageLRU(page));
                        __ClearPageLRU(page);
-                       del_page_from_lru(zone, page);
+                       del_page_from_lru_list(zone, page, page_off_lru(page));
                }
 
-               if (!pagevec_add(&pages_to_free, page)) {
-                       if (zone) {
-                               spin_unlock_irqrestore(&zone->lru_lock, flags);
-                               zone = NULL;
-                       }
-                       __pagevec_free(&pages_to_free);
-                       pagevec_reinit(&pages_to_free);
-               }
+               list_add(&page->lru, &pages_to_free);
        }
        if (zone)
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-       pagevec_free(&pages_to_free);
+       free_hot_cold_page_list(&pages_to_free, cold);
 }
 EXPORT_SYMBOL(release_pages);
 
@@ -629,22 +645,21 @@ void __pagevec_release(struct pagevec *pvec)
        release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
        pagevec_reinit(pvec);
 }
-
 EXPORT_SYMBOL(__pagevec_release);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /* used by __split_huge_page_refcount() */
 void lru_add_page_tail(struct zone* zone,
                       struct page *page, struct page *page_tail)
 {
-       int active;
+       int uninitialized_var(active);
        enum lru_list lru;
        const int file = 0;
-       struct list_head *head;
 
        VM_BUG_ON(!PageHead(page));
        VM_BUG_ON(PageCompound(page_tail));
        VM_BUG_ON(PageLRU(page_tail));
-       VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
 
        SetPageLRU(page_tail);
 
@@ -657,19 +672,33 @@ void lru_add_page_tail(struct zone* zone,
                        active = 0;
                        lru = LRU_INACTIVE_ANON;
                }
-               update_page_reclaim_stat(zone, page_tail, file, active);
-               if (likely(PageLRU(page)))
-                       head = page->lru.prev;
-               else
-                       head = &zone->lru[lru].list;
-               __add_page_to_lru_list(zone, page_tail, lru, head);
        } else {
                SetPageUnevictable(page_tail);
-               add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
+               lru = LRU_UNEVICTABLE;
        }
+
+       if (likely(PageLRU(page)))
+               list_add_tail(&page_tail->lru, &page->lru);
+       else {
+               struct list_head *list_head;
+               /*
+                * Head page has not yet been counted, as an hpage,
+                * so we must account for each subpage individually.
+                *
+                * Use the standard add function to put page_tail on the list,
+                * but then correct its position so they all end up in order.
+                */
+               add_page_to_lru_list(zone, page_tail, lru);
+               list_head = page_tail->lru.prev;
+               list_move_tail(&page_tail->lru, list_head);
+       }
+
+       if (!PageUnevictable(page))
+               update_page_reclaim_stat(zone, page_tail, file, active);
 }
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
-static void ____pagevec_lru_add_fn(struct page *page, void *arg)
+static void __pagevec_lru_add_fn(struct page *page, void *arg)
 {
        enum lru_list lru = (enum lru_list)arg;
        struct zone *zone = page_zone(page);
@@ -683,40 +712,21 @@ static void ____pagevec_lru_add_fn(struct page *page, void *arg)
        SetPageLRU(page);
        if (active)
                SetPageActive(page);
-       update_page_reclaim_stat(zone, page, file, active);
        add_page_to_lru_list(zone, page, lru);
+       update_page_reclaim_stat(zone, page, file, active);
 }
 
 /*
  * Add the passed pages to the LRU, then drop the caller's refcount
  * on them.  Reinitialises the caller's pagevec.
  */
-void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
+void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 {
        VM_BUG_ON(is_unevictable_lru(lru));
 
-       pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
-}
-
-EXPORT_SYMBOL(____pagevec_lru_add);
-
-/*
- * Try to drop buffers from the pages in a pagevec
- */
-void pagevec_strip(struct pagevec *pvec)
-{
-       int i;
-
-       for (i = 0; i < pagevec_count(pvec); i++) {
-               struct page *page = pvec->pages[i];
-
-               if (page_has_private(page) && trylock_page(page)) {
-                       if (page_has_private(page))
-                               try_to_release_page(page, 0);
-                       unlock_page(page);
-               }
-       }
+       pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
 }
+EXPORT_SYMBOL(__pagevec_lru_add);
 
 /**
  * pagevec_lookup - gang pagecache lookup
@@ -740,7 +750,6 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
        pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
        return pagevec_count(pvec);
 }
-
 EXPORT_SYMBOL(pagevec_lookup);
 
 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
@@ -750,7 +759,6 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
                                        nr_pages, pvec->pages);
        return pagevec_count(pvec);
 }
-
 EXPORT_SYMBOL(pagevec_lookup_tag);
 
 /*