]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/memory-failure.c
fault-injection: add ability to export fault_attr in arbitrary directory
[linux-2.6.git] / mm / memory-failure.c
index 2b9a5eef39e0661d48e7a9976780345587aff80c..740c4f52059cef1bff55fb4293ec41218967fc02 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/swapops.h>
 #include <linux/hugetlb.h>
 #include <linux/memory_hotplug.h>
+#include <linux/mm_inline.h>
 #include "internal.h"
 
 int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -239,7 +240,11 @@ void shake_page(struct page *p, int access)
        if (access) {
                int nr;
                do {
-                       nr = shrink_slab(1000, GFP_KERNEL, 1000);
+                       struct shrink_control shrink = {
+                               .gfp_mask = GFP_KERNEL,
+                       };
+
+                       nr = shrink_slab(&shrink, 1000, 1000);
                        if (page_count(p) == 1)
                                break;
                } while (nr > 10);
@@ -386,10 +391,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        struct task_struct *tsk;
        struct anon_vma *av;
 
-       read_lock(&tasklist_lock);
        av = page_lock_anon_vma(page);
        if (av == NULL) /* Not actually mapped anymore */
-               goto out;
+               return;
+
+       read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
 
@@ -403,9 +409,8 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       page_unlock_anon_vma(av);
-out:
        read_unlock(&tasklist_lock);
+       page_unlock_anon_vma(av);
 }
 
 /*
@@ -419,17 +424,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        struct prio_tree_iter iter;
        struct address_space *mapping = page->mapping;
 
-       /*
-        * A note on the locking order between the two locks.
-        * We don't rely on this particular order.
-        * If you have some other code that needs a different order
-        * feel free to switch them around. Or add a reverse link
-        * from mm_struct to task_struct, then this could be all
-        * done without taking tasklist_lock and looping over all tasks.
-        */
-
+       mutex_lock(&mapping->i_mmap_mutex);
        read_lock(&tasklist_lock);
-       spin_lock(&mapping->i_mmap_lock);
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
@@ -449,8 +445,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
                                add_to_kill(tsk, page, vma, to_kill, tkc);
                }
        }
-       spin_unlock(&mapping->i_mmap_lock);
        read_unlock(&tasklist_lock);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 
 /*
@@ -1440,16 +1436,12 @@ int soft_offline_page(struct page *page, int flags)
         */
        ret = invalidate_inode_page(page);
        unlock_page(page);
-
        /*
-        * Drop count because page migration doesn't like raised
-        * counts. The page could get re-allocated, but if it becomes
-        * LRU the isolation will just fail.
         * RED-PEN would be better to keep it isolated here, but we
         * would need to fix isolation locking first.
         */
-       put_page(page);
        if (ret == 1) {
+               put_page(page);
                ret = 0;
                pr_info("soft_offline: %#lx: invalidated\n", pfn);
                goto done;
@@ -1461,9 +1453,15 @@ int soft_offline_page(struct page *page, int flags)
         * handles a large number of cases for us.
         */
        ret = isolate_lru_page(page);
+       /*
+        * Drop page reference which is came from get_any_page()
+        * successful isolate_lru_page() already took another one.
+        */
+       put_page(page);
        if (!ret) {
                LIST_HEAD(pagelist);
-
+               inc_zone_page_state(page, NR_ISOLATED_ANON +
+                                           page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
                ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
                                                                0, true);