readahead: return early when readahead is disabled
[linux-2.6.git] / mm / oom_kill.c
index b19c78e..e4b0991 100644 (file)
 #include <linux/memcontrol.h>
 #include <linux/mempolicy.h>
 #include <linux/security.h>
+#include <linux/ptrace.h>
 
 int sysctl_panic_on_oom;
 int sysctl_oom_kill_allocating_task;
 int sysctl_oom_dump_tasks = 1;
 static DEFINE_SPINLOCK(zone_scan_lock);
 
+/**
+ * test_set_oom_score_adj() - set current's oom_score_adj and return old value
+ * @new_val: new oom_score_adj value
+ *
+ * Sets the oom_score_adj value for current to @new_val with proper
+ * synchronization and returns the old value.  Usually used to temporarily
+ * set a value, save the old value in the caller, and then reinstate it later.
+ */
+int test_set_oom_score_adj(int new_val)
+{
+       struct sighand_struct *sighand = current->sighand;
+       int old_val;
+
+       spin_lock_irq(&sighand->siglock);
+       old_val = current->signal->oom_score_adj;
+       if (new_val != old_val) {
+               if (new_val == OOM_SCORE_ADJ_MIN)
+                       atomic_inc(&current->mm->oom_disable_count);
+               else if (old_val == OOM_SCORE_ADJ_MIN)
+                       atomic_dec(&current->mm->oom_disable_count);
+               current->signal->oom_score_adj = new_val;
+       }
+       spin_unlock_irq(&sighand->siglock);
+
+       return old_val;
+}
+
 #ifdef CONFIG_NUMA
 /**
  * has_intersects_mems_allowed() - check task eligiblity for kill
@@ -83,24 +111,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
 #endif /* CONFIG_NUMA */
 
 /*
- * If this is a system OOM (not a memcg OOM) and the task selected to be
- * killed is not already running at high (RT) priorities, speed up the
- * recovery by boosting the dying task to the lowest FIFO priority.
- * That helps with the recovery and avoids interfering with RT tasks.
- */
-static void boost_dying_task_prio(struct task_struct *p,
-                                 struct mem_cgroup *mem)
-{
-       struct sched_param param = { .sched_priority = 1 };
-
-       if (mem)
-               return;
-
-       if (!rt_task(p))
-               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
-}
-
-/*
  * The process p may have detached its own ->mm while exiting or through
  * use_mm(), but one or more of its subthreads may still have a valid
  * pointer.  Return p, or any of its subthreads with a valid ->mm, with
@@ -172,15 +182,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
        }
 
        /*
-        * When the PF_OOM_ORIGIN bit is set, it indicates the task should have
-        * priority for oom killing.
-        */
-       if (p->flags & PF_OOM_ORIGIN) {
-               task_unlock(p);
-               return 1000;
-       }
-
-       /*
         * The memory controller may have a limit of 0 bytes, so avoid a divide
         * by zero, if necessary.
         */
@@ -189,10 +190,13 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
 
        /*
         * The baseline for the badness score is the proportion of RAM that each
-        * task's rss and swap space use.
+        * task's rss, pagetable and swap space use.
         */
-       points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 /
-                       totalpages;
+       points = get_mm_rss(p->mm) + p->mm->nr_ptes;
+       points += get_mm_counter(p->mm, MM_SWAPENTS);
+
+       points *= 1000;
+       points /= totalpages;
        task_unlock(p);
 
        /*
@@ -292,13 +296,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                unsigned long totalpages, struct mem_cgroup *mem,
                const nodemask_t *nodemask)
 {
-       struct task_struct *p;
+       struct task_struct *g, *p;
        struct task_struct *chosen = NULL;
        *ppoints = 0;
 
-       for_each_process(p) {
+       do_each_thread(g, p) {
                unsigned int points;
 
+               if (!p->mm)
+                       continue;
                if (oom_unkillable_task(p, mem, nodemask))
                        continue;
 
@@ -314,22 +320,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                if (test_tsk_thread_flag(p, TIF_MEMDIE))
                        return ERR_PTR(-1UL);
 
-               /*
-                * This is in the process of releasing memory so wait for it
-                * to finish before killing some other task by mistake.
-                *
-                * However, if p is the current task, we allow the 'kill' to
-                * go ahead if it is exiting: this will simply set TIF_MEMDIE,
-                * which will allow it to gain access to memory reserves in
-                * the process of exiting and releasing its resources.
-                * Otherwise we could get an easy OOM deadlock.
-                */
-               if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) {
-                       if (p != current)
-                               return ERR_PTR(-1UL);
-
-                       chosen = p;
-                       *ppoints = 1000;
+               if (p->flags & PF_EXITING) {
+                       /*
+                        * If p is the current task and is in the process of
+                        * releasing memory, we allow the "kill" to set
+                        * TIF_MEMDIE, which will allow it to gain access to
+                        * memory reserves.  Otherwise, it may stall forever.
+                        *
+                        * The loop isn't broken here, however, in case other
+                        * threads are found to have already been oom killed.
+                        */
+                       if (p == current) {
+                               chosen = p;
+                               *ppoints = 1000;
+                       } else {
+                               /*
+                                * If this task is not being ptraced on exit,
+                                * then wait for it to finish before killing
+                                * some other task unnecessarily.
+                                */
+                               if (!(task_ptrace(p->group_leader) &
+                                                       PT_TRACE_EXIT))
+                                       return ERR_PTR(-1UL);
+                       }
                }
 
                points = oom_badness(p, mem, nodemask, totalpages);
@@ -337,7 +350,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
                        chosen = p;
                        *ppoints = points;
                }
-       }
+       } while_each_thread(g, p);
 
        return chosen;
 }
@@ -396,7 +409,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
        task_unlock(current);
        dump_stack();
        mem_cgroup_print_oom_info(mem, p);
-       show_mem();
+       show_mem(SHOW_MEM_FILTER_NODES);
        if (sysctl_oom_dump_tasks)
                dump_tasks(mem, nodemask);
 }
@@ -442,13 +455,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
        set_tsk_thread_flag(p, TIF_MEMDIE);
        force_sig(SIGKILL, p);
 
-       /*
-        * We give our sacrificial lamb high priority and access to
-        * all the memory it needs. That way it should be able to
-        * exit() and clear out its resources quickly...
-        */
-       boost_dying_task_prio(p, mem);
-
        return 0;
 }
 #undef K
@@ -458,10 +464,10 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
                            struct mem_cgroup *mem, nodemask_t *nodemask,
                            const char *message)
 {
-       struct task_struct *victim;
+       struct task_struct *victim = p;
        struct task_struct *child;
-       struct task_struct *t;
-       unsigned int victim_points;
+       struct task_struct *t = p;
+       unsigned int victim_points = 0;
 
        if (printk_ratelimit())
                dump_header(p, gfp_mask, order, mem, nodemask);
@@ -472,7 +478,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         */
        if (p->flags & PF_EXITING) {
                set_tsk_thread_flag(p, TIF_MEMDIE);
-               boost_dying_task_prio(p, mem);
                return 0;
        }
 
@@ -487,14 +492,11 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
         * parent.  This attempts to lose the minimal amount of work done while
         * still freeing memory.
         */
-       victim_points = oom_badness(p, mem, nodemask, totalpages);
-       victim = p;
-       t = p;
        do {
                list_for_each_entry(child, &t->children, sibling) {
                        unsigned int child_points;
 
-                       if (child->mm == t->mm)
+                       if (child->mm == p->mm)
                                continue;
                        /*
                         * oom_badness() returns 0 if the thread is unkillable
@@ -542,6 +544,16 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
        unsigned int points = 0;
        struct task_struct *p;
 
+       /*
+        * If current has a pending SIGKILL, then automatically select it.  The
+        * goal is to allow it to allocate so that it may quickly exit and free
+        * its memory.
+        */
+       if (fatal_signal_pending(current)) {
+               set_thread_flag(TIF_MEMDIE);
+               return;
+       }
+
        check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
        limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
        read_lock(&tasklist_lock);
@@ -694,7 +706,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
         */
        if (fatal_signal_pending(current)) {
                set_thread_flag(TIF_MEMDIE);
-               boost_dying_task_prio(current, NULL);
                return;
        }