memcontrol: move oom task exclusion to tasklist scan
[linux-2.6.git] / mm / memcontrol.c
index 10833d9..2fadd48 100644 (file)
@@ -170,6 +170,16 @@ static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
                list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
 }
 
+int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
+{
+       int ret;
+
+       task_lock(task);
+       ret = task->mm && mm_cgroup(task->mm) == mem;
+       task_unlock(task);
+       return ret;
+}
+
 /*
  * This routine assumes that the appropriate zone's lru lock is already held
  */
@@ -256,7 +266,8 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  * 0 if the charge was successful
  * < 0 if the cgroup is over its limit
  */
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm)
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+                               gfp_t gfp_mask)
 {
        struct mem_cgroup *mem;
        struct page_cgroup *pc, *race_pc;
@@ -288,7 +299,7 @@ retry:
 
        unlock_page_cgroup(page);
 
-       pc = kzalloc(sizeof(struct page_cgroup), GFP_KERNEL);
+       pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
        if (pc == NULL)
                goto err;
 
@@ -315,7 +326,14 @@ retry:
         * the cgroup limit.
         */
        while (res_counter_charge(&mem->res, PAGE_SIZE)) {
-               if (try_to_free_mem_cgroup_pages(mem))
+               bool is_atomic = gfp_mask & GFP_ATOMIC;
+               /*
+                * We cannot reclaim under GFP_ATOMIC, fail the charge
+                */
+               if (is_atomic)
+                       goto noreclaim;
+
+               if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
                        continue;
 
                /*
@@ -339,9 +357,10 @@ retry:
                        congestion_wait(WRITE, HZ/10);
                        continue;
                }
-
+noreclaim:
                css_put(&mem->css);
-               mem_cgroup_out_of_memory(mem, GFP_KERNEL);
+               if (!is_atomic)
+                       mem_cgroup_out_of_memory(mem, GFP_KERNEL);
                goto free_pc;
        }
 
@@ -380,7 +399,8 @@ err:
 /*
  * See if the cached pages should be charged at all?
  */
-int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm)
+int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+                               gfp_t gfp_mask)
 {
        struct mem_cgroup *mem;
        if (!mm)
@@ -388,7 +408,7 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm)
 
        mem = rcu_dereference(mm->mem_cgroup);
        if (mem->control_type == MEM_CGROUP_TYPE_ALL)
-               return mem_cgroup_charge(page, mm);
+               return mem_cgroup_charge(page, mm, gfp_mask);
        else
                return 0;
 }