bugfix for memory cgroup controller: avoid !PageLRU page in mem_cgroup_isolate_pages
[linux-2.6.git] / mm / memcontrol.c
index 10833d9..e8493fb 100644 (file)
@@ -162,6 +162,48 @@ static void __always_inline unlock_page_cgroup(struct page *page)
        bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
+/*
+ * Tie new page_cgroup to struct page under lock_page_cgroup()
+ * This can fail if the page has been tied to a page_cgroup.
+ * If success, returns 0.
+ */
+static inline int
+page_cgroup_assign_new_page_cgroup(struct page *page, struct page_cgroup *pc)
+{
+       int ret = 0;
+
+       lock_page_cgroup(page);
+       if (!page_get_page_cgroup(page))
+               page_assign_page_cgroup(page, pc);
+       else /* A page is tied to other pc. */
+               ret = 1;
+       unlock_page_cgroup(page);
+       return ret;
+}
+
+/*
+ * Clear page->page_cgroup member under lock_page_cgroup().
+ * If given "pc" value is different from one page->page_cgroup,
+ * page->cgroup is not cleared.
+ * Returns a value of page->page_cgroup at lock taken.
+ * A can can detect failure of clearing by following
+ *  clear_page_cgroup(page, pc) == pc
+ */
+
+static inline struct page_cgroup *
+clear_page_cgroup(struct page *page, struct page_cgroup *pc)
+{
+       struct page_cgroup *ret;
+       /* lock and clear */
+       lock_page_cgroup(page);
+       ret = page_get_page_cgroup(page);
+       if (likely(ret == pc))
+               page_assign_page_cgroup(page, NULL);
+       unlock_page_cgroup(page);
+       return ret;
+}
+
+
 static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
 {
        if (active)
@@ -170,6 +212,16 @@ static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
                list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
 }
 
+int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
+{
+       int ret;
+
+       task_lock(task);
+       ret = task->mm && mm_cgroup(task->mm) == mem;
+       task_unlock(task);
+       return ret;
+}
+
 /*
  * This routine assumes that the appropriate zone's lru lock is already held
  */
@@ -198,7 +250,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        unsigned long scan;
        LIST_HEAD(pc_list);
        struct list_head *src;
-       struct page_cgroup *pc;
+       struct page_cgroup *pc, *tmp;
 
        if (active)
                src = &mem_cont->active_list;
@@ -206,11 +258,18 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                src = &mem_cont->inactive_list;
 
        spin_lock(&mem_cont->lru_lock);
-       for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
-               pc = list_entry(src->prev, struct page_cgroup, lru);
+       scan = 0;
+       list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
+               if (scan++ > nr_to_scan)
+                       break;
                page = pc->page;
                VM_BUG_ON(!pc);
 
+               if (unlikely(!PageLRU(page))) {
+                       scan--;
+                       continue;
+               }
+
                if (PageActive(page) && !active) {
                        __mem_cgroup_move_lists(pc, true);
                        scan--;
@@ -256,10 +315,11 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
  * 0 if the charge was successful
  * < 0 if the cgroup is over its limit
  */
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm)
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+                               gfp_t gfp_mask)
 {
        struct mem_cgroup *mem;
-       struct page_cgroup *pc, *race_pc;
+       struct page_cgroup *pc;
        unsigned long flags;
        unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
 
@@ -282,13 +342,15 @@ retry:
                        unlock_page_cgroup(page);
                        cpu_relax();
                        goto retry;
-               } else
+               } else {
+                       unlock_page_cgroup(page);
                        goto done;
+               }
        }
 
        unlock_page_cgroup(page);
 
-       pc = kzalloc(sizeof(struct page_cgroup), GFP_KERNEL);
+       pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
        if (pc == NULL)
                goto err;
 
@@ -315,7 +377,14 @@ retry:
         * the cgroup limit.
         */
        while (res_counter_charge(&mem->res, PAGE_SIZE)) {
-               if (try_to_free_mem_cgroup_pages(mem))
+               bool is_atomic = gfp_mask & GFP_ATOMIC;
+               /*
+                * We cannot reclaim under GFP_ATOMIC, fail the charge
+                */
+               if (is_atomic)
+                       goto noreclaim;
+
+               if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
                        continue;
 
                /*
@@ -339,37 +408,33 @@ retry:
                        congestion_wait(WRITE, HZ/10);
                        continue;
                }
-
+noreclaim:
                css_put(&mem->css);
-               mem_cgroup_out_of_memory(mem, GFP_KERNEL);
+               if (!is_atomic)
+                       mem_cgroup_out_of_memory(mem, GFP_KERNEL);
                goto free_pc;
        }
 
-       lock_page_cgroup(page);
-       /*
-        * Check if somebody else beat us to allocating the page_cgroup
-        */
-       race_pc = page_get_page_cgroup(page);
-       if (race_pc) {
-               kfree(pc);
-               pc = race_pc;
-               atomic_inc(&pc->ref_cnt);
-               res_counter_uncharge(&mem->res, PAGE_SIZE);
-               css_put(&mem->css);
-               goto done;
-       }
-
        atomic_set(&pc->ref_cnt, 1);
        pc->mem_cgroup = mem;
        pc->page = page;
-       page_assign_page_cgroup(page, pc);
+       if (page_cgroup_assign_new_page_cgroup(page, pc)) {
+               /*
+                * an another charge is added to this page already.
+                * we do take lock_page_cgroup(page) again and read
+                * page->cgroup, increment refcnt.... just retry is OK.
+                */
+               res_counter_uncharge(&mem->res, PAGE_SIZE);
+               css_put(&mem->css);
+               kfree(pc);
+               goto retry;
+       }
 
        spin_lock_irqsave(&mem->lru_lock, flags);
        list_add(&pc->lru, &mem->active_list);
        spin_unlock_irqrestore(&mem->lru_lock, flags);
 
 done:
-       unlock_page_cgroup(page);
        return 0;
 free_pc:
        kfree(pc);
@@ -380,7 +445,8 @@ err:
 /*
  * See if the cached pages should be charged at all?
  */
-int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm)
+int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+                               gfp_t gfp_mask)
 {
        struct mem_cgroup *mem;
        if (!mm)
@@ -388,7 +454,7 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm)
 
        mem = rcu_dereference(mm->mem_cgroup);
        if (mem->control_type == MEM_CGROUP_TYPE_ALL)
-               return mem_cgroup_charge(page, mm);
+               return mem_cgroup_charge(page, mm, gfp_mask);
        else
                return 0;
 }
@@ -412,19 +478,70 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
 
        if (atomic_dec_and_test(&pc->ref_cnt)) {
                page = pc->page;
-               lock_page_cgroup(page);
-               mem = pc->mem_cgroup;
-               css_put(&mem->css);
-               page_assign_page_cgroup(page, NULL);
-               unlock_page_cgroup(page);
-               res_counter_uncharge(&mem->res, PAGE_SIZE);
-
-               spin_lock_irqsave(&mem->lru_lock, flags);
-               list_del_init(&pc->lru);
-               spin_unlock_irqrestore(&mem->lru_lock, flags);
-               kfree(pc);
+               /*
+                * get page->cgroup and clear it under lock.
+                */
+               if (clear_page_cgroup(page, pc) == pc) {
+                       mem = pc->mem_cgroup;
+                       css_put(&mem->css);
+                       res_counter_uncharge(&mem->res, PAGE_SIZE);
+                       spin_lock_irqsave(&mem->lru_lock, flags);
+                       list_del_init(&pc->lru);
+                       spin_unlock_irqrestore(&mem->lru_lock, flags);
+                       kfree(pc);
+               } else {
+                       /*
+                        * Note:This will be removed when force-empty patch is
+                        * applied. just show warning here.
+                        */
+                       printk(KERN_ERR "Race in mem_cgroup_uncharge() ?");
+                       dump_stack();
+               }
        }
 }
+/*
+ * Returns non-zero if a page (under migration) has valid page_cgroup member.
+ * Refcnt of page_cgroup is incremented.
+ */
+
+int mem_cgroup_prepare_migration(struct page *page)
+{
+       struct page_cgroup *pc;
+       int ret = 0;
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
+       if (pc && atomic_inc_not_zero(&pc->ref_cnt))
+               ret = 1;
+       unlock_page_cgroup(page);
+       return ret;
+}
+
+void mem_cgroup_end_migration(struct page *page)
+{
+       struct page_cgroup *pc = page_get_page_cgroup(page);
+       mem_cgroup_uncharge(pc);
+}
+/*
+ * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
+ * And no race with uncharge() routines because page_cgroup for *page*
+ * has extra one reference by mem_cgroup_prepare_migration.
+ */
+
+void mem_cgroup_page_migration(struct page *page, struct page *newpage)
+{
+       struct page_cgroup *pc;
+retry:
+       pc = page_get_page_cgroup(page);
+       if (!pc)
+               return;
+       if (clear_page_cgroup(page, pc) != pc)
+               goto retry;
+       pc->page = newpage;
+       lock_page_cgroup(newpage);
+       page_assign_page_cgroup(newpage, pc);
+       unlock_page_cgroup(newpage);
+       return;
+}
 
 int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
 {