mlock: count attempts to free mlocked page
Lee Schermerhorn [Sun, 19 Oct 2008 03:26:53 +0000 (20:26 -0700)]
Allow free of mlock()ed pages.  This shouldn't happen, but during
developement, it occasionally did.

This patch allows us to survive that condition, while keeping the
statistics and events correct for debug.

Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

include/linux/vmstat.h
mm/internal.h
mm/page_alloc.c
mm/vmstat.c

index 05b8050..9cd3ab0 100644 (file)
@@ -49,6 +49,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                UNEVICTABLE_PGMUNLOCKED,
                UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
                UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
+               UNEVICTABLE_MLOCKFREED,
 #endif
                NR_VM_EVENT_ITEMS
 };
index 1cfbf2e..e4e728b 100644 (file)
@@ -146,6 +146,22 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
        }
 }
 
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static inline void free_page_mlock(struct page *page)
+{
+       if (unlikely(TestClearPageMlocked(page))) {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               __dec_zone_page_state(page, NR_MLOCK);
+               __count_vm_event(UNEVICTABLE_MLOCKFREED);
+               local_irq_restore(flags);
+       }
+}
 
 #else /* CONFIG_UNEVICTABLE_LRU */
 static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
@@ -155,6 +171,7 @@ static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
 static inline void clear_page_mlock(struct page *page) { }
 static inline void mlock_vma_page(struct page *page) { }
 static inline void mlock_migrate_page(struct page *new, struct page *old) { }
+static inline void free_page_mlock(struct page *page) { }
 
 #endif /* CONFIG_UNEVICTABLE_LRU */
 
index 5886586..cfbadad 100644 (file)
@@ -454,6 +454,7 @@ static inline void __free_one_page(struct page *page,
 
 static inline int free_pages_check(struct page *page)
 {
+       free_page_mlock(page);
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
index 9e28abc..9343227 100644 (file)
@@ -689,6 +689,7 @@ static const char * const vmstat_text[] = {
        "unevictable_pgs_munlocked",
        "unevictable_pgs_cleared",
        "unevictable_pgs_stranded",
+       "unevictable_pgs_mlockfreed",
 #endif
 #endif
 };