Memory controller remove control_type feature
[linux-2.6.git] / mm / memcontrol.c
index f8a6a39..5c2c702 100644 (file)
@@ -89,6 +89,12 @@ enum mem_cgroup_zstat_index {
 };
 
 struct mem_cgroup_per_zone {
+       /*
+        * spin_lock to protect the per cgroup LRU
+        */
+       spinlock_t              lru_lock;
+       struct list_head        active_list;
+       struct list_head        inactive_list;
        unsigned long count[NR_MEM_CGROUP_ZSTAT];
 };
 /* Macro for accessing counter */
@@ -122,16 +128,9 @@ struct mem_cgroup {
        /*
         * Per cgroup active and inactive list, similar to the
         * per zone LRU lists.
-        * TODO: Consider making these lists per zone
         */
-       struct list_head active_list;
-       struct list_head inactive_list;
        struct mem_cgroup_lru_info info;
-       /*
-        * spin_lock to protect the per cgroup LRU
-        */
-       spinlock_t lru_lock;
-       unsigned long control_type;     /* control RSS or RSS+Pagecache */
+
        int     prev_priority;  /* for recording reclaim priority */
        /*
         * statistics.
@@ -366,10 +365,10 @@ static void __mem_cgroup_add_list(struct page_cgroup *pc)
 
        if (!to) {
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
-               list_add(&pc->lru, &pc->mem_cgroup->inactive_list);
+               list_add(&pc->lru, &mz->inactive_list);
        } else {
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
-               list_add(&pc->lru, &pc->mem_cgroup->active_list);
+               list_add(&pc->lru, &mz->active_list);
        }
        mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
 }
@@ -387,11 +386,11 @@ static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
        if (active) {
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
                pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
-               list_move(&pc->lru, &pc->mem_cgroup->active_list);
+               list_move(&pc->lru, &mz->active_list);
        } else {
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
                pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
-               list_move(&pc->lru, &pc->mem_cgroup->inactive_list);
+               list_move(&pc->lru, &mz->inactive_list);
        }
 }
 
@@ -410,15 +409,16 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
  */
 void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
 {
-       struct mem_cgroup *mem;
+       struct mem_cgroup_per_zone *mz;
+       unsigned long flags;
+
        if (!pc)
                return;
 
-       mem = pc->mem_cgroup;
-
-       spin_lock(&mem->lru_lock);
+       mz = page_cgroup_zoneinfo(pc);
+       spin_lock_irqsave(&mz->lru_lock, flags);
        __mem_cgroup_move_lists(pc, active);
-       spin_unlock(&mem->lru_lock);
+       spin_unlock_irqrestore(&mz->lru_lock, flags);
 }
 
 /*
@@ -471,6 +471,39 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
        mem->prev_priority = priority;
 }
 
+/*
+ * Calculate # of pages to be scanned in this priority/zone.
+ * See also vmscan.c
+ *
+ * priority starts from "DEF_PRIORITY" and decremented in each loop.
+ * (see include/linux/mmzone.h)
+ */
+
+long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
+                                  struct zone *zone, int priority)
+{
+       long nr_active;
+       int nid = zone->zone_pgdat->node_id;
+       int zid = zone_idx(zone);
+       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+
+       nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
+       return (nr_active >> priority);
+}
+
+long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
+                                       struct zone *zone, int priority)
+{
+       long nr_inactive;
+       int nid = zone->zone_pgdat->node_id;
+       int zid = zone_idx(zone);
+       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+
+       nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
+
+       return (nr_inactive >> priority);
+}
+
 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        struct list_head *dst,
                                        unsigned long *scanned, int order,
@@ -484,13 +517,18 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        LIST_HEAD(pc_list);
        struct list_head *src;
        struct page_cgroup *pc, *tmp;
+       int nid = z->zone_pgdat->node_id;
+       int zid = zone_idx(z);
+       struct mem_cgroup_per_zone *mz;
 
+       mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
        if (active)
-               src = &mem_cont->active_list;
+               src = &mz->active_list;
        else
-               src = &mem_cont->inactive_list;
+               src = &mz->inactive_list;
+
 
-       spin_lock(&mem_cont->lru_lock);
+       spin_lock(&mz->lru_lock);
        scan = 0;
        list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
                if (scan >= nr_to_scan)
@@ -510,13 +548,6 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                        continue;
                }
 
-               /*
-                * Reclaim, per zone
-                * TODO: make the active/inactive lists per zone
-                */
-               if (page_zone(page) != z)
-                       continue;
-
                scan++;
                list_move(&pc->lru, &pc_list);
 
@@ -527,7 +558,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        }
 
        list_splice(&pc_list, src);
-       spin_unlock(&mem_cont->lru_lock);
+       spin_unlock(&mz->lru_lock);
 
        *scanned = scan;
        return nr_taken;
@@ -546,6 +577,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        struct page_cgroup *pc;
        unsigned long flags;
        unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+       struct mem_cgroup_per_zone *mz;
 
        /*
         * Should page_cgroup's go to their own slab?
@@ -647,10 +679,11 @@ retry:
                goto retry;
        }
 
-       spin_lock_irqsave(&mem->lru_lock, flags);
+       mz = page_cgroup_zoneinfo(pc);
+       spin_lock_irqsave(&mz->lru_lock, flags);
        /* Update statistics vector */
        __mem_cgroup_add_list(pc);
-       spin_unlock_irqrestore(&mem->lru_lock, flags);
+       spin_unlock_irqrestore(&mz->lru_lock, flags);
 
 done:
        return 0;
@@ -675,56 +708,59 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
        int ret = 0;
-       struct mem_cgroup *mem;
        if (!mm)
                mm = &init_mm;
 
-       rcu_read_lock();
-       mem = rcu_dereference(mm->mem_cgroup);
-       css_get(&mem->css);
-       rcu_read_unlock();
-       if (mem->control_type == MEM_CGROUP_TYPE_ALL)
-               ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+       ret = mem_cgroup_charge_common(page, mm, gfp_mask,
                                MEM_CGROUP_CHARGE_TYPE_CACHE);
-       css_put(&mem->css);
        return ret;
 }
 
 /*
  * Uncharging is always a welcome operation, we never complain, simply
- * uncharge.
+ * uncharge. This routine should be called with lock_page_cgroup held
  */
 void mem_cgroup_uncharge(struct page_cgroup *pc)
 {
        struct mem_cgroup *mem;
+       struct mem_cgroup_per_zone *mz;
        struct page *page;
        unsigned long flags;
 
        /*
-        * This can handle cases when a page is not charged at all and we
-        * are switching between handling the control_type.
+        * Check if our page_cgroup is valid
         */
        if (!pc)
                return;
 
        if (atomic_dec_and_test(&pc->ref_cnt)) {
                page = pc->page;
+               mz = page_cgroup_zoneinfo(pc);
                /*
                 * get page->cgroup and clear it under lock.
                 * force_empty can drop page->cgroup without checking refcnt.
                 */
+               unlock_page_cgroup(page);
                if (clear_page_cgroup(page, pc) == pc) {
                        mem = pc->mem_cgroup;
                        css_put(&mem->css);
                        res_counter_uncharge(&mem->res, PAGE_SIZE);
-                       spin_lock_irqsave(&mem->lru_lock, flags);
+                       spin_lock_irqsave(&mz->lru_lock, flags);
                        __mem_cgroup_remove_list(pc);
-                       spin_unlock_irqrestore(&mem->lru_lock, flags);
+                       spin_unlock_irqrestore(&mz->lru_lock, flags);
                        kfree(pc);
                }
+               lock_page_cgroup(page);
        }
 }
 
+void mem_cgroup_uncharge_page(struct page *page)
+{
+       lock_page_cgroup(page);
+       mem_cgroup_uncharge(page_get_page_cgroup(page));
+       unlock_page_cgroup(page);
+}
+
 /*
  * Returns non-zero if a page (under migration) has valid page_cgroup member.
  * Refcnt of page_cgroup is incremented.
@@ -744,8 +780,12 @@ int mem_cgroup_prepare_migration(struct page *page)
 
 void mem_cgroup_end_migration(struct page *page)
 {
-       struct page_cgroup *pc = page_get_page_cgroup(page);
+       struct page_cgroup *pc;
+
+       lock_page_cgroup(page);
+       pc = page_get_page_cgroup(page);
        mem_cgroup_uncharge(pc);
+       unlock_page_cgroup(page);
 }
 /*
  * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
@@ -758,24 +798,29 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
        struct page_cgroup *pc;
        struct mem_cgroup *mem;
        unsigned long flags;
+       struct mem_cgroup_per_zone *mz;
 retry:
        pc = page_get_page_cgroup(page);
        if (!pc)
                return;
        mem = pc->mem_cgroup;
+       mz = page_cgroup_zoneinfo(pc);
        if (clear_page_cgroup(page, pc) != pc)
                goto retry;
-
-       spin_lock_irqsave(&mem->lru_lock, flags);
+       spin_lock_irqsave(&mz->lru_lock, flags);
 
        __mem_cgroup_remove_list(pc);
+       spin_unlock_irqrestore(&mz->lru_lock, flags);
+
        pc->page = newpage;
        lock_page_cgroup(newpage);
        page_assign_page_cgroup(newpage, pc);
        unlock_page_cgroup(newpage);
-       __mem_cgroup_add_list(pc);
 
-       spin_unlock_irqrestore(&mem->lru_lock, flags);
+       mz = page_cgroup_zoneinfo(pc);
+       spin_lock_irqsave(&mz->lru_lock, flags);
+       __mem_cgroup_add_list(pc);
+       spin_unlock_irqrestore(&mz->lru_lock, flags);
        return;
 }
 
@@ -786,16 +831,26 @@ retry:
  */
 #define FORCE_UNCHARGE_BATCH   (128)
 static void
-mem_cgroup_force_empty_list(struct mem_cgroup *mem, struct list_head *list)
+mem_cgroup_force_empty_list(struct mem_cgroup *mem,
+                           struct mem_cgroup_per_zone *mz,
+                           int active)
 {
        struct page_cgroup *pc;
        struct page *page;
        int count;
        unsigned long flags;
+       struct list_head *list;
 
+       if (active)
+               list = &mz->active_list;
+       else
+               list = &mz->inactive_list;
+
+       if (list_empty(list))
+               return;
 retry:
        count = FORCE_UNCHARGE_BATCH;
-       spin_lock_irqsave(&mem->lru_lock, flags);
+       spin_lock_irqsave(&mz->lru_lock, flags);
 
        while (--count && !list_empty(list)) {
                pc = list_entry(list->prev, struct page_cgroup, lru);
@@ -810,7 +865,7 @@ retry:
                } else  /* being uncharged ? ...do relax */
                        break;
        }
-       spin_unlock_irqrestore(&mem->lru_lock, flags);
+       spin_unlock_irqrestore(&mz->lru_lock, flags);
        if (!list_empty(list)) {
                cond_resched();
                goto retry;
@@ -826,20 +881,25 @@ retry:
 int mem_cgroup_force_empty(struct mem_cgroup *mem)
 {
        int ret = -EBUSY;
+       int node, zid;
        css_get(&mem->css);
        /*
         * page reclaim code (kswapd etc..) will move pages between
 `       * active_list <-> inactive_list while we don't take a lock.
         * So, we have to do loop here until all lists are empty.
         */
-       while (!(list_empty(&mem->active_list) &&
-                list_empty(&mem->inactive_list))) {
+       while (mem->res.usage > 0) {
                if (atomic_read(&mem->css.cgroup->count) > 0)
                        goto out;
-               /* drop all page_cgroup in active_list */
-               mem_cgroup_force_empty_list(mem, &mem->active_list);
-               /* drop all page_cgroup in inactive_list */
-               mem_cgroup_force_empty_list(mem, &mem->inactive_list);
+               for_each_node_state(node, N_POSSIBLE)
+                       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+                               struct mem_cgroup_per_zone *mz;
+                               mz = mem_cgroup_zoneinfo(mem, node, zid);
+                               /* drop all page_cgroup in active_list */
+                               mem_cgroup_force_empty_list(mem, mz, 1);
+                               /* drop all page_cgroup in inactive_list */
+                               mem_cgroup_force_empty_list(mem, mz, 0);
+                       }
        }
        ret = 0;
 out:
@@ -880,61 +940,6 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
                                mem_cgroup_write_strategy);
 }
 
-static ssize_t mem_control_type_write(struct cgroup *cont,
-                       struct cftype *cft, struct file *file,
-                       const char __user *userbuf,
-                       size_t nbytes, loff_t *pos)
-{
-       int ret;
-       char *buf, *end;
-       unsigned long tmp;
-       struct mem_cgroup *mem;
-
-       mem = mem_cgroup_from_cont(cont);
-       buf = kmalloc(nbytes + 1, GFP_KERNEL);
-       ret = -ENOMEM;
-       if (buf == NULL)
-               goto out;
-
-       buf[nbytes] = 0;
-       ret = -EFAULT;
-       if (copy_from_user(buf, userbuf, nbytes))
-               goto out_free;
-
-       ret = -EINVAL;
-       tmp = simple_strtoul(buf, &end, 10);
-       if (*end != '\0')
-               goto out_free;
-
-       if (tmp <= MEM_CGROUP_TYPE_UNSPEC || tmp >= MEM_CGROUP_TYPE_MAX)
-               goto out_free;
-
-       mem->control_type = tmp;
-       ret = nbytes;
-out_free:
-       kfree(buf);
-out:
-       return ret;
-}
-
-static ssize_t mem_control_type_read(struct cgroup *cont,
-                               struct cftype *cft,
-                               struct file *file, char __user *userbuf,
-                               size_t nbytes, loff_t *ppos)
-{
-       unsigned long val;
-       char buf[64], *s;
-       struct mem_cgroup *mem;
-
-       mem = mem_cgroup_from_cont(cont);
-       s = buf;
-       val = mem->control_type;
-       s += sprintf(s, "%lu\n", val);
-       return simple_read_from_buffer((void __user *)userbuf, nbytes,
-                       ppos, buf, s - buf);
-}
-
-
 static ssize_t mem_force_empty_write(struct cgroup *cont,
                                struct cftype *cft, struct file *file,
                                const char __user *userbuf,
@@ -1033,11 +1038,6 @@ static struct cftype mem_cgroup_files[] = {
                .read = mem_cgroup_read,
        },
        {
-               .name = "control_type",
-               .write = mem_control_type_write,
-               .read = mem_control_type_read,
-       },
-       {
                .name = "force_empty",
                .write = mem_force_empty_write,
                .read = mem_force_empty_read,
@@ -1051,15 +1051,41 @@ static struct cftype mem_cgroup_files[] = {
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
 {
        struct mem_cgroup_per_node *pn;
-
-       pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
+       struct mem_cgroup_per_zone *mz;
+       int zone;
+       /*
+        * This routine is called against possible nodes.
+        * But it's BUG to call kmalloc() against offline node.
+        *
+        * TODO: this routine can waste much memory for nodes which will
+        *       never be onlined. It's better to use memory hotplug callback
+        *       function.
+        */
+       if (node_state(node, N_HIGH_MEMORY))
+               pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
+       else
+               pn = kmalloc(sizeof(*pn), GFP_KERNEL);
        if (!pn)
                return 1;
+
        mem->info.nodeinfo[node] = pn;
        memset(pn, 0, sizeof(*pn));
+
+       for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+               mz = &pn->zoneinfo[zone];
+               INIT_LIST_HEAD(&mz->active_list);
+               INIT_LIST_HEAD(&mz->inactive_list);
+               spin_lock_init(&mz->lru_lock);
+       }
        return 0;
 }
 
+static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
+{
+       kfree(mem->info.nodeinfo[node]);
+}
+
+
 static struct mem_cgroup init_mem_cgroup;
 
 static struct cgroup_subsys_state *
@@ -1078,10 +1104,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                return NULL;
 
        res_counter_init(&mem->res);
-       INIT_LIST_HEAD(&mem->active_list);
-       INIT_LIST_HEAD(&mem->inactive_list);
-       spin_lock_init(&mem->lru_lock);
-       mem->control_type = MEM_CGROUP_TYPE_ALL;
+
        memset(&mem->info, 0, sizeof(mem->info));
 
        for_each_node_state(node, N_POSSIBLE)
@@ -1091,7 +1114,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        return &mem->css;
 free_out:
        for_each_node_state(node, N_POSSIBLE)
-               kfree(mem->info.nodeinfo[node]);
+               free_mem_cgroup_per_zone_info(mem, node);
        if (cont->parent != NULL)
                kfree(mem);
        return NULL;
@@ -1111,7 +1134,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
 
        for_each_node_state(node, N_POSSIBLE)
-               kfree(mem->info.nodeinfo[node]);
+               free_mem_cgroup_per_zone_info(mem, node);
 
        kfree(mem_cgroup_from_cont(cont));
 }