sh: Drop down to a single quicklist.
[linux-2.6.git] / mm / memory-failure.c
index ed6e91c..17299fd 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/page-flags.h>
+#include <linux/kernel-page-flags.h>
 #include <linux/sched.h>
 #include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <linux/backing-dev.h>
+#include <linux/migrate.h>
+#include <linux/page-isolation.h>
+#include <linux/suspend.h>
 #include "internal.h"
 
 int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -48,6 +52,129 @@ int sysctl_memory_failure_recovery __read_mostly = 1;
 
 atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
 
+#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
+
+u32 hwpoison_filter_enable = 0;
+u32 hwpoison_filter_dev_major = ~0U;
+u32 hwpoison_filter_dev_minor = ~0U;
+u64 hwpoison_filter_flags_mask;
+u64 hwpoison_filter_flags_value;
+EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
+EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
+EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
+EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
+EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
+
+static int hwpoison_filter_dev(struct page *p)
+{
+       struct address_space *mapping;
+       dev_t dev;
+
+       if (hwpoison_filter_dev_major == ~0U &&
+           hwpoison_filter_dev_minor == ~0U)
+               return 0;
+
+       /*
+        * page_mapping() does not accept slab page
+        */
+       if (PageSlab(p))
+               return -EINVAL;
+
+       mapping = page_mapping(p);
+       if (mapping == NULL || mapping->host == NULL)
+               return -EINVAL;
+
+       dev = mapping->host->i_sb->s_dev;
+       if (hwpoison_filter_dev_major != ~0U &&
+           hwpoison_filter_dev_major != MAJOR(dev))
+               return -EINVAL;
+       if (hwpoison_filter_dev_minor != ~0U &&
+           hwpoison_filter_dev_minor != MINOR(dev))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int hwpoison_filter_flags(struct page *p)
+{
+       if (!hwpoison_filter_flags_mask)
+               return 0;
+
+       if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
+                                   hwpoison_filter_flags_value)
+               return 0;
+       else
+               return -EINVAL;
+}
+
+/*
+ * This allows stress tests to limit test scope to a collection of tasks
+ * by putting them under some memcg. This prevents killing unrelated/important
+ * processes such as /sbin/init. Note that the target task may share clean
+ * pages with init (eg. libc text), which is harmless. If the target task
+ * share _dirty_ pages with another task B, the test scheme must make sure B
+ * is also included in the memcg. At last, due to race conditions this filter
+ * can only guarantee that the page either belongs to the memcg tasks, or is
+ * a freed page.
+ */
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+u64 hwpoison_filter_memcg;
+EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
+static int hwpoison_filter_task(struct page *p)
+{
+       struct mem_cgroup *mem;
+       struct cgroup_subsys_state *css;
+       unsigned long ino;
+
+       if (!hwpoison_filter_memcg)
+               return 0;
+
+       mem = try_get_mem_cgroup_from_page(p);
+       if (!mem)
+               return -EINVAL;
+
+       css = mem_cgroup_css(mem);
+       /* root_mem_cgroup has NULL dentries */
+       if (!css->cgroup->dentry)
+               return -EINVAL;
+
+       ino = css->cgroup->dentry->d_inode->i_ino;
+       css_put(css);
+
+       if (ino != hwpoison_filter_memcg)
+               return -EINVAL;
+
+       return 0;
+}
+#else
+static int hwpoison_filter_task(struct page *p) { return 0; }
+#endif
+
+int hwpoison_filter(struct page *p)
+{
+       if (!hwpoison_filter_enable)
+               return 0;
+
+       if (hwpoison_filter_dev(p))
+               return -EINVAL;
+
+       if (hwpoison_filter_flags(p))
+               return -EINVAL;
+
+       if (hwpoison_filter_task(p))
+               return -EINVAL;
+
+       return 0;
+}
+#else
+int hwpoison_filter(struct page *p)
+{
+       return 0;
+}
+#endif
+
+EXPORT_SYMBOL_GPL(hwpoison_filter);
+
 /*
  * Send all the processes who have the page mapped an ``action optional''
  * signal.
@@ -86,7 +213,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
  * When a unknown page type is encountered drain as many buffers as possible
  * in the hope to turn the page into a LRU or free page, which we can handle.
  */
-void shake_page(struct page *p)
+void shake_page(struct page *p, int access)
 {
        if (!PageSlab(p)) {
                lru_add_drain_all();
@@ -96,11 +223,19 @@ void shake_page(struct page *p)
                if (PageLRU(p) || is_free_buddy_page(p))
                        return;
        }
+
        /*
-        * Could call shrink_slab here (which would also
-        * shrink other caches). Unfortunately that might
-        * also access the corrupted page, which could be fatal.
+        * Only all shrink_slab here (which would also
+        * shrink other caches) if access is not potentially fatal.
         */
+       if (access) {
+               int nr;
+               do {
+                       nr = shrink_slab(1000, GFP_KERNEL, 1000);
+                       if (page_count(p) == 0)
+                               break;
+               } while (nr > 10);
+       }
 }
 EXPORT_SYMBOL_GPL(shake_page);
 
@@ -199,7 +334,6 @@ static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno,
                         * In case something went wrong with munmapping
                         * make sure the process doesn't catch the
                         * signal and then access the memory. Just kill it.
-                        * the signal handlers
                         */
                        if (fail || tk->addr_valid == 0) {
                                printk(KERN_ERR
@@ -336,16 +470,16 @@ static void collect_procs(struct page *page, struct list_head *tokill)
  */
 
 enum outcome {
-       FAILED,         /* Error handling failed */
+       IGNORED,        /* Error: cannot be handled */
+       FAILED,         /* Error: handling failed */
        DELAYED,        /* Will be handled later */
-       IGNORED,        /* Error safely ignored */
        RECOVERED,      /* Successfully recovered */
 };
 
 static const char *action_name[] = {
+       [IGNORED] = "Ignored",
        [FAILED] = "Failed",
        [DELAYED] = "Delayed",
-       [IGNORED] = "Ignored",
        [RECOVERED] = "Recovered",
 };
 
@@ -380,14 +514,6 @@ static int delete_from_lru_cache(struct page *p)
  */
 static int me_kernel(struct page *p, unsigned long pfn)
 {
-       return DELAYED;
-}
-
-/*
- * Already poisoned page.
- */
-static int me_ignore(struct page *p, unsigned long pfn)
-{
        return IGNORED;
 }
 
@@ -604,7 +730,7 @@ static struct page_state {
        char *msg;
        int (*action)(struct page *p, unsigned long pfn);
 } error_states[] = {
-       { reserved,     reserved,       "reserved kernel",      me_ignore },
+       { reserved,     reserved,       "reserved kernel",      me_kernel },
        /*
         * free pages are specially detected outside this table:
         * PG_buddy pages only make a small fraction of all free pages.
@@ -642,6 +768,19 @@ static struct page_state {
        { 0,            0,              "unknown page state",   me_unknown },
 };
 
+#undef dirty
+#undef sc
+#undef unevict
+#undef mlock
+#undef writeback
+#undef lru
+#undef swapbacked
+#undef head
+#undef tail
+#undef compound
+#undef slab
+#undef reserved
+
 static void action_result(unsigned long pfn, char *msg, int result)
 {
        struct page *page = pfn_to_page(pfn);
@@ -662,17 +801,21 @@ static int page_action(struct page_state *ps, struct page *p,
        action_result(pfn, ps->msg, result);
 
        count = page_count(p) - 1;
-       if (count != 0)
+       if (ps->action == me_swapcache_dirty && result == DELAYED)
+               count--;
+       if (count != 0) {
                printk(KERN_ERR
                       "MCE %#lx: %s page still referenced by %d users\n",
                       pfn, ps->msg, count);
+               result = FAILED;
+       }
 
        /* Could do more checks here if page looks ok */
        /*
         * Could adjust zone counters here to correct for the missing page.
         */
 
-       return result == RECOVERED ? 0 : -EBUSY;
+       return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY;
 }
 
 #define N_UNMAP_TRIES 5
@@ -788,7 +931,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
 
        p = pfn_to_page(pfn);
        if (TestSetPageHWPoison(p)) {
-               action_result(pfn, "already hardware poisoned", IGNORED);
+               printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn);
                return 0;
        }
 
@@ -825,8 +968,15 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
         * walked by the page reclaim code, however that's not a big loss.
         */
        if (!PageLRU(p))
-               lru_add_drain_all();
+               shake_page(p, 0);
        if (!PageLRU(p)) {
+               /*
+                * shake_page could have turned it free.
+                */
+               if (is_free_buddy_page(p)) {
+                       action_result(pfn, "free buddy, 2nd try", DELAYED);
+                       return 0;
+               }
                action_result(pfn, "non LRU", IGNORED);
                put_page(p);
                return -EBUSY;
@@ -843,10 +993,17 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
         * unpoison always clear PG_hwpoison inside page lock
         */
        if (!PageHWPoison(p)) {
-               action_result(pfn, "unpoisoned", IGNORED);
+               printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
                res = 0;
                goto out;
        }
+       if (hwpoison_filter(p)) {
+               if (TestClearPageHWPoison(p))
+                       atomic_long_dec(&mce_bad_pages);
+               unlock_page(p);
+               put_page(p);
+               return 0;
+       }
 
        wait_on_page_writeback(p);
 
@@ -865,7 +1022,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
                action_result(pfn, "already truncated LRU", IGNORED);
-               res = 0;
+               res = -EBUSY;
                goto out;
        }
 
@@ -961,3 +1118,177 @@ int unpoison_memory(unsigned long pfn)
        return 0;
 }
 EXPORT_SYMBOL(unpoison_memory);
+
+static struct page *new_page(struct page *p, unsigned long private, int **x)
+{
+       int nid = page_to_nid(p);
+       return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0);
+}
+
+/*
+ * Safely get reference count of an arbitrary page.
+ * Returns 0 for a free page, -EIO for a zero refcount page
+ * that is not free, and 1 for any other page type.
+ * For 1 the page is returned with increased page count, otherwise not.
+ */
+static int get_any_page(struct page *p, unsigned long pfn, int flags)
+{
+       int ret;
+
+       if (flags & MF_COUNT_INCREASED)
+               return 1;
+
+       /*
+        * The lock_system_sleep prevents a race with memory hotplug,
+        * because the isolation assumes there's only a single user.
+        * This is a big hammer, a better would be nicer.
+        */
+       lock_system_sleep();
+
+       /*
+        * Isolate the page, so that it doesn't get reallocated if it
+        * was free.
+        */
+       set_migratetype_isolate(p);
+       if (!get_page_unless_zero(compound_head(p))) {
+               if (is_free_buddy_page(p)) {
+                       pr_debug("get_any_page: %#lx free buddy page\n", pfn);
+                       /* Set hwpoison bit while page is still isolated */
+                       SetPageHWPoison(p);
+                       ret = 0;
+               } else {
+                       pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
+                               pfn, p->flags);
+                       ret = -EIO;
+               }
+       } else {
+               /* Not a free page */
+               ret = 1;
+       }
+       unset_migratetype_isolate(p);
+       unlock_system_sleep();
+       return ret;
+}
+
+/**
+ * soft_offline_page - Soft offline a page.
+ * @page: page to offline
+ * @flags: flags. Same as memory_failure().
+ *
+ * Returns 0 on success, otherwise negated errno.
+ *
+ * Soft offline a page, by migration or invalidation,
+ * without killing anything. This is for the case when
+ * a page is not corrupted yet (so it's still valid to access),
+ * but has had a number of corrected errors and is better taken
+ * out.
+ *
+ * The actual policy on when to do that is maintained by
+ * user space.
+ *
+ * This should never impact any application or cause data loss,
+ * however it might take some time.
+ *
+ * This is not a 100% solution for all memory, but tries to be
+ * ``good enough'' for the majority of memory.
+ */
+int soft_offline_page(struct page *page, int flags)
+{
+       int ret;
+       unsigned long pfn = page_to_pfn(page);
+
+       ret = get_any_page(page, pfn, flags);
+       if (ret < 0)
+               return ret;
+       if (ret == 0)
+               goto done;
+
+       /*
+        * Page cache page we can handle?
+        */
+       if (!PageLRU(page)) {
+               /*
+                * Try to free it.
+                */
+               put_page(page);
+               shake_page(page, 1);
+
+               /*
+                * Did it turn free?
+                */
+               ret = get_any_page(page, pfn, 0);
+               if (ret < 0)
+                       return ret;
+               if (ret == 0)
+                       goto done;
+       }
+       if (!PageLRU(page)) {
+               pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
+                               pfn, page->flags);
+               return -EIO;
+       }
+
+       lock_page(page);
+       wait_on_page_writeback(page);
+
+       /*
+        * Synchronized using the page lock with memory_failure()
+        */
+       if (PageHWPoison(page)) {
+               unlock_page(page);
+               put_page(page);
+               pr_debug("soft offline: %#lx page already poisoned\n", pfn);
+               return -EBUSY;
+       }
+
+       /*
+        * Try to invalidate first. This should work for
+        * non dirty unmapped page cache pages.
+        */
+       ret = invalidate_inode_page(page);
+       unlock_page(page);
+
+       /*
+        * Drop count because page migration doesn't like raised
+        * counts. The page could get re-allocated, but if it becomes
+        * LRU the isolation will just fail.
+        * RED-PEN would be better to keep it isolated here, but we
+        * would need to fix isolation locking first.
+        */
+       put_page(page);
+       if (ret == 1) {
+               ret = 0;
+               pr_debug("soft_offline: %#lx: invalidated\n", pfn);
+               goto done;
+       }
+
+       /*
+        * Simple invalidation didn't work.
+        * Try to migrate to a new page instead. migrate.c
+        * handles a large number of cases for us.
+        */
+       ret = isolate_lru_page(page);
+       if (!ret) {
+               LIST_HEAD(pagelist);
+
+               list_add(&page->lru, &pagelist);
+               ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+               if (ret) {
+                       pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
+                               pfn, ret, page->flags);
+                       if (ret > 0)
+                               ret = -EIO;
+               }
+       } else {
+               pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
+                               pfn, ret, page_count(page), page->flags);
+       }
+       if (ret)
+               return ret;
+
+done:
+       atomic_long_add(1, &mce_bad_pages);
+       SetPageHWPoison(page);
+       /* keep elevated page count for bad page */
+       return ret;
+}