]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/memcontrol.c
memcg: rework usage of stats by soft limit
[linux-2.6.git] / mm / memcontrol.c
index b0757660663f47e570208abd18182c44c4e47798..5a41d93c7077b7b39613f8c4537af5e29ff25bcd 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/memcontrol.h>
 #include <linux/cgroup.h>
 #include <linux/mm.h>
+#include <linux/hugetlb.h>
 #include <linux/pagemap.h>
 #include <linux/smp.h>
 #include <linux/page-flags.h>
 #include <linux/rcupdate.h>
 #include <linux/limits.h>
 #include <linux/mutex.h>
+#include <linux/rbtree.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
+#include <linux/swapops.h>
 #include <linux/spinlock.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/vmalloc.h>
 #include <linux/mm_inline.h>
 #include <linux/page_cgroup.h>
+#include <linux/cpu.h>
 #include "internal.h"
 
 #include <asm/uaccess.h>
@@ -53,7 +57,7 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
 #define do_swap_account                (0)
 #endif
 
-static DEFINE_MUTEX(memcg_tasklist);   /* can be hold under cgroup_mutex */
+#define SOFTLIMIT_EVENTS_THRESH (1000)
 
 /*
  * Statistics for memory cgroup.
@@ -64,9 +68,12 @@ enum mem_cgroup_stat_index {
         */
        MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
        MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
-       MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
+       MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
        MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
        MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
+       MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
+       MEM_CGROUP_STAT_SOFTLIMIT, /* decrements on each page in/out.
+                                       used by soft limit implementation */
 
        MEM_CGROUP_STAT_NSTATS,
 };
@@ -79,6 +86,20 @@ struct mem_cgroup_stat {
        struct mem_cgroup_stat_cpu cpustat[0];
 };
 
+static inline void
+__mem_cgroup_stat_set_safe(struct mem_cgroup_stat_cpu *stat,
+                               enum mem_cgroup_stat_index idx, s64 val)
+{
+       stat->count[idx] = val;
+}
+
+static inline s64
+__mem_cgroup_stat_read_local(struct mem_cgroup_stat_cpu *stat,
+                               enum mem_cgroup_stat_index idx)
+{
+       return stat->count[idx];
+}
+
 /*
  * For accounting under irq disable, no need for increment preempt count.
  */
@@ -118,6 +139,12 @@ struct mem_cgroup_per_zone {
        unsigned long           count[NR_LRU_LISTS];
 
        struct zone_reclaim_stat reclaim_stat;
+       struct rb_node          tree_node;      /* RB tree node */
+       unsigned long long      usage_in_excess;/* Set to the value by which */
+                                               /* the soft limit is exceeded*/
+       bool                    on_tree;
+       struct mem_cgroup       *mem;           /* Back pointer, we cannot */
+                                               /* use container_of        */
 };
 /* Macro for accessing counter */
 #define MEM_CGROUP_ZSTAT(mz, idx)      ((mz)->count[(idx)])
@@ -130,6 +157,26 @@ struct mem_cgroup_lru_info {
        struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
 };
 
+/*
+ * Cgroups above their limits are maintained in a RB-Tree, independent of
+ * their hierarchy representation
+ */
+
+struct mem_cgroup_tree_per_zone {
+       struct rb_root rb_root;
+       spinlock_t lock;
+};
+
+struct mem_cgroup_tree_per_node {
+       struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_tree {
+       struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
+};
+
+static struct mem_cgroup_tree soft_limit_tree __read_mostly;
+
 /*
  * The memory controller data structure. The memory controller controls both
  * page cache and RSS per cgroup. We would eventually like to provide
@@ -165,7 +212,7 @@ struct mem_cgroup {
        int     prev_priority;  /* for recording reclaim priority */
 
        /*
-        * While reclaiming in a hiearchy, we cache the last child we
+        * While reclaiming in a hierarchy, we cache the last child we
         * reclaimed from.
         */
        int last_scanned_child;
@@ -181,12 +228,48 @@ struct mem_cgroup {
        /* set when res.limit == memsw.limit */
        bool            memsw_is_minimum;
 
+       /*
+        * Should we move charges of a task when a task is moved into this
+        * mem_cgroup ? And what type of charges should we move ?
+        */
+       unsigned long   move_charge_at_immigrate;
+
        /*
         * statistics. This must be placed at the end of memcg.
         */
        struct mem_cgroup_stat stat;
 };
 
+/* Stuffs for move charges at task migration. */
+/*
+ * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
+ * left-shifted bitmap of these types.
+ */
+enum move_type {
+       MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
+       NR_MOVE_TYPE,
+};
+
+/* "mc" and its members are protected by cgroup_mutex */
+static struct move_charge_struct {
+       struct mem_cgroup *from;
+       struct mem_cgroup *to;
+       unsigned long precharge;
+       unsigned long moved_charge;
+       unsigned long moved_swap;
+       struct task_struct *moving_task;        /* a task moving charges */
+       wait_queue_head_t waitq;                /* a waitq for other context */
+} mc = {
+       .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
+};
+
+/*
+ * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
+ * limit reclaim to prevent infinite loops, if they ever occur.
+ */
+#define        MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
+#define        MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
+
 enum charge_type {
        MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
        MEM_CGROUP_CHARGE_TYPE_MAPPED,
@@ -211,15 +294,244 @@ enum charge_type {
 #define MEMFILE_TYPE(val)      (((val) >> 16) & 0xffff)
 #define MEMFILE_ATTR(val)      ((val) & 0xffff)
 
+/*
+ * Reclaim flags for mem_cgroup_hierarchical_reclaim
+ */
+#define MEM_CGROUP_RECLAIM_NOSWAP_BIT  0x0
+#define MEM_CGROUP_RECLAIM_NOSWAP      (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
+#define MEM_CGROUP_RECLAIM_SHRINK_BIT  0x1
+#define MEM_CGROUP_RECLAIM_SHRINK      (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
+#define MEM_CGROUP_RECLAIM_SOFT_BIT    0x2
+#define MEM_CGROUP_RECLAIM_SOFT                (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
+
 static void mem_cgroup_get(struct mem_cgroup *mem);
 static void mem_cgroup_put(struct mem_cgroup *mem);
 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
+static void drain_all_stock_async(void);
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
+{
+       return &mem->info.nodeinfo[nid]->zoneinfo[zid];
+}
+
+struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
+{
+       return &mem->css;
+}
+
+static struct mem_cgroup_per_zone *
+page_cgroup_zoneinfo(struct page_cgroup *pc)
+{
+       struct mem_cgroup *mem = pc->mem_cgroup;
+       int nid = page_cgroup_nid(pc);
+       int zid = page_cgroup_zid(pc);
+
+       if (!mem)
+               return NULL;
+
+       return mem_cgroup_zoneinfo(mem, nid, zid);
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_node_zone(int nid, int zid)
+{
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static struct mem_cgroup_tree_per_zone *
+soft_limit_tree_from_page(struct page *page)
+{
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+
+       return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
+}
+
+static void
+__mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz,
+                               unsigned long long new_usage_in_excess)
+{
+       struct rb_node **p = &mctz->rb_root.rb_node;
+       struct rb_node *parent = NULL;
+       struct mem_cgroup_per_zone *mz_node;
+
+       if (mz->on_tree)
+               return;
+
+       mz->usage_in_excess = new_usage_in_excess;
+       if (!mz->usage_in_excess)
+               return;
+       while (*p) {
+               parent = *p;
+               mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
+                                       tree_node);
+               if (mz->usage_in_excess < mz_node->usage_in_excess)
+                       p = &(*p)->rb_left;
+               /*
+                * We can't avoid mem cgroups that are over their soft
+                * limit by the same amount
+                */
+               else if (mz->usage_in_excess >= mz_node->usage_in_excess)
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&mz->tree_node, parent, p);
+       rb_insert_color(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = true;
+}
+
+static void
+__mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       if (!mz->on_tree)
+               return;
+       rb_erase(&mz->tree_node, &mctz->rb_root);
+       mz->on_tree = false;
+}
+
+static void
+mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
+                               struct mem_cgroup_per_zone *mz,
+                               struct mem_cgroup_tree_per_zone *mctz)
+{
+       spin_lock(&mctz->lock);
+       __mem_cgroup_remove_exceeded(mem, mz, mctz);
+       spin_unlock(&mctz->lock);
+}
+
+static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
+{
+       bool ret = false;
+       int cpu;
+       s64 val;
+       struct mem_cgroup_stat_cpu *cpustat;
+
+       cpu = get_cpu();
+       cpustat = &mem->stat.cpustat[cpu];
+       val = __mem_cgroup_stat_read_local(cpustat, MEM_CGROUP_STAT_SOFTLIMIT);
+       if (unlikely(val < 0)) {
+               __mem_cgroup_stat_set_safe(cpustat, MEM_CGROUP_STAT_SOFTLIMIT,
+                               SOFTLIMIT_EVENTS_THRESH);
+               ret = true;
+       }
+       put_cpu();
+       return ret;
+}
+
+static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
+{
+       unsigned long long excess;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+       int nid = page_to_nid(page);
+       int zid = page_zonenum(page);
+       mctz = soft_limit_tree_from_page(page);
+
+       /*
+        * Necessary to update all ancestors when hierarchy is used.
+        * because their event counter is not touched.
+        */
+       for (; mem; mem = parent_mem_cgroup(mem)) {
+               mz = mem_cgroup_zoneinfo(mem, nid, zid);
+               excess = res_counter_soft_limit_excess(&mem->res);
+               /*
+                * We have to update the tree if mz is on RB-tree or
+                * mem is over its softlimit.
+                */
+               if (excess || mz->on_tree) {
+                       spin_lock(&mctz->lock);
+                       /* if on-tree, remove it */
+                       if (mz->on_tree)
+                               __mem_cgroup_remove_exceeded(mem, mz, mctz);
+                       /*
+                        * Insert again. mz->usage_in_excess will be updated.
+                        * If excess is 0, no tree ops.
+                        */
+                       __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
+                       spin_unlock(&mctz->lock);
+               }
+       }
+}
+
+static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
+{
+       int node, zone;
+       struct mem_cgroup_per_zone *mz;
+       struct mem_cgroup_tree_per_zone *mctz;
+
+       for_each_node_state(node, N_POSSIBLE) {
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       mz = mem_cgroup_zoneinfo(mem, node, zone);
+                       mctz = soft_limit_tree_node_zone(node, zone);
+                       mem_cgroup_remove_exceeded(mem, mz, mctz);
+               }
+       }
+}
+
+static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
+{
+       return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
+}
+
+static struct mem_cgroup_per_zone *
+__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct rb_node *rightmost = NULL;
+       struct mem_cgroup_per_zone *mz;
+
+retry:
+       mz = NULL;
+       rightmost = rb_last(&mctz->rb_root);
+       if (!rightmost)
+               goto done;              /* Nothing to reclaim from */
+
+       mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
+       /*
+        * Remove the node now but someone else can add it back,
+        * we will to add it back at the end of reclaim to its correct
+        * position in the tree.
+        */
+       __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+       if (!res_counter_soft_limit_excess(&mz->mem->res) ||
+               !css_tryget(&mz->mem->css))
+               goto retry;
+done:
+       return mz;
+}
+
+static struct mem_cgroup_per_zone *
+mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
+{
+       struct mem_cgroup_per_zone *mz;
+
+       spin_lock(&mctz->lock);
+       mz = __mem_cgroup_largest_soft_limit_node(mctz);
+       spin_unlock(&mctz->lock);
+       return mz;
+}
+
+static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
+                                        bool charge)
+{
+       int val = (charge) ? 1 : -1;
+       struct mem_cgroup_stat *stat = &mem->stat;
+       struct mem_cgroup_stat_cpu *cpustat;
+       int cpu = get_cpu();
+
+       cpustat = &stat->cpustat[cpu];
+       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SWAPOUT, val);
+       put_cpu();
+}
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
                                         struct page_cgroup *pc,
                                         bool charge)
 {
-       int val = (charge)? 1 : -1;
+       int val = (charge) ? 1 : -1;
        struct mem_cgroup_stat *stat = &mem->stat;
        struct mem_cgroup_stat_cpu *cpustat;
        int cpu = get_cpu();
@@ -236,28 +548,10 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
        else
                __mem_cgroup_stat_add_safe(cpustat,
                                MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
+       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_SOFTLIMIT, -1);
        put_cpu();
 }
 
-static struct mem_cgroup_per_zone *
-mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
-{
-       return &mem->info.nodeinfo[nid]->zoneinfo[zid];
-}
-
-static struct mem_cgroup_per_zone *
-page_cgroup_zoneinfo(struct page_cgroup *pc)
-{
-       struct mem_cgroup *mem = pc->mem_cgroup;
-       int nid = page_cgroup_nid(pc);
-       int zid = page_cgroup_zid(pc);
-
-       if (!mem)
-               return NULL;
-
-       return mem_cgroup_zoneinfo(mem, nid, zid);
-}
-
 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
                                        enum lru_list idx)
 {
@@ -503,7 +797,13 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
        task_unlock(task);
        if (!curr)
                return 0;
-       if (curr->use_hierarchy)
+       /*
+        * We should check use_hierarchy of "mem" not "curr". Because checking
+        * use_hierarchy of "curr" here make this function true if hierarchy is
+        * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
+        * hierarchy(even if use_hierarchy is disabled in "mem").
+        */
+       if (mem->use_hierarchy)
                ret = css_is_ancestor(&curr->css, &mem->css);
        else
                ret = (curr == mem);
@@ -752,7 +1052,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
        static char memcg_name[PATH_MAX];
        int ret;
 
-       if (!memcg)
+       if (!memcg || !p)
                return;
 
 
@@ -862,28 +1162,64 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
  * If shrink==true, for avoiding to free too much, this returns immedieately.
  */
 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
-                                  gfp_t gfp_mask, bool noswap, bool shrink)
+                                               struct zone *zone,
+                                               gfp_t gfp_mask,
+                                               unsigned long reclaim_options)
 {
        struct mem_cgroup *victim;
        int ret, total = 0;
        int loop = 0;
+       bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
+       bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
+       bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
+       unsigned long excess = mem_cgroup_get_excess(root_mem);
 
        /* If memsw_is_minimum==1, swap-out is of-no-use. */
        if (root_mem->memsw_is_minimum)
                noswap = true;
 
-       while (loop < 2) {
+       while (1) {
                victim = mem_cgroup_select_victim(root_mem);
-               if (victim == root_mem)
+               if (victim == root_mem) {
                        loop++;
+                       if (loop >= 1)
+                               drain_all_stock_async();
+                       if (loop >= 2) {
+                               /*
+                                * If we have not been able to reclaim
+                                * anything, it might because there are
+                                * no reclaimable pages under this hierarchy
+                                */
+                               if (!check_soft || !total) {
+                                       css_put(&victim->css);
+                                       break;
+                               }
+                               /*
+                                * We want to do more targetted reclaim.
+                                * excess >> 2 is not to excessive so as to
+                                * reclaim too much, nor too less that we keep
+                                * coming back to reclaim from this cgroup
+                                */
+                               if (total >= (excess >> 2) ||
+                                       (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
+                                       css_put(&victim->css);
+                                       break;
+                               }
+                       }
+               }
                if (!mem_cgroup_local_usage(&victim->stat)) {
                        /* this cgroup's local usage == 0 */
                        css_put(&victim->css);
                        continue;
                }
                /* we use swappiness of local cgroup */
-               ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
-                                                  get_swappiness(victim));
+               if (check_soft)
+                       ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
+                               noswap, get_swappiness(victim), zone,
+                               zone->zone_pgdat->node_id);
+               else
+                       ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
+                                               noswap, get_swappiness(victim));
                css_put(&victim->css);
                /*
                 * At shrinking usage, we can't check we should stop here or
@@ -893,7 +1229,10 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                if (shrink)
                        return ret;
                total += ret;
-               if (mem_cgroup_check_under_limit(root_mem))
+               if (check_soft) {
+                       if (res_counter_check_under_soft_limit(&root_mem->res))
+                               return total;
+               } else if (mem_cgroup_check_under_limit(root_mem))
                        return 1 + total;
        }
        return total;
@@ -931,7 +1270,7 @@ static void record_last_oom(struct mem_cgroup *mem)
  * Currently used to update mapped file statistics, but the routine can be
  * generalized to update other statistics as well.
  */
-void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
+void mem_cgroup_update_file_mapped(struct page *page, int val)
 {
        struct mem_cgroup *mem;
        struct mem_cgroup_stat *stat;
@@ -939,9 +1278,6 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
        int cpu;
        struct page_cgroup *pc;
 
-       if (!page_is_file_cache(page))
-               return;
-
        pc = lookup_page_cgroup(page);
        if (unlikely(!pc))
                return;
@@ -961,22 +1297,150 @@ void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
        stat = &mem->stat;
        cpustat = &stat->cpustat[cpu];
 
-       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
+       __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED, val);
 done:
        unlock_page_cgroup(pc);
 }
 
+/*
+ * size of first charge trial. "32" comes from vmscan.c's magic value.
+ * TODO: maybe necessary to use big numbers in big irons.
+ */
+#define CHARGE_SIZE    (32 * PAGE_SIZE)
+struct memcg_stock_pcp {
+       struct mem_cgroup *cached; /* this never be root cgroup */
+       int charge;
+       struct work_struct work;
+};
+static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
+static atomic_t memcg_drain_count;
+
+/*
+ * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
+ * from local stock and true is returned. If the stock is 0 or charges from a
+ * cgroup which is not current target, returns false. This stock will be
+ * refilled.
+ */
+static bool consume_stock(struct mem_cgroup *mem)
+{
+       struct memcg_stock_pcp *stock;
+       bool ret = true;
+
+       stock = &get_cpu_var(memcg_stock);
+       if (mem == stock->cached && stock->charge)
+               stock->charge -= PAGE_SIZE;
+       else /* need to call res_counter_charge */
+               ret = false;
+       put_cpu_var(memcg_stock);
+       return ret;
+}
+
+/*
+ * Returns stocks cached in percpu to res_counter and reset cached information.
+ */
+static void drain_stock(struct memcg_stock_pcp *stock)
+{
+       struct mem_cgroup *old = stock->cached;
+
+       if (stock->charge) {
+               res_counter_uncharge(&old->res, stock->charge);
+               if (do_swap_account)
+                       res_counter_uncharge(&old->memsw, stock->charge);
+       }
+       stock->cached = NULL;
+       stock->charge = 0;
+}
+
+/*
+ * This must be called under preempt disabled or must be called by
+ * a thread which is pinned to local cpu.
+ */
+static void drain_local_stock(struct work_struct *dummy)
+{
+       struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
+       drain_stock(stock);
+}
+
+/*
+ * Cache charges(val) which is from res_counter, to local per_cpu area.
+ * This will be consumed by consumt_stock() function, later.
+ */
+static void refill_stock(struct mem_cgroup *mem, int val)
+{
+       struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
+
+       if (stock->cached != mem) { /* reset if necessary */
+               drain_stock(stock);
+               stock->cached = mem;
+       }
+       stock->charge += val;
+       put_cpu_var(memcg_stock);
+}
+
+/*
+ * Tries to drain stocked charges in other cpus. This function is asynchronous
+ * and just put a work per cpu for draining localy on each cpu. Caller can
+ * expects some charges will be back to res_counter later but cannot wait for
+ * it.
+ */
+static void drain_all_stock_async(void)
+{
+       int cpu;
+       /* This function is for scheduling "drain" in asynchronous way.
+        * The result of "drain" is not directly handled by callers. Then,
+        * if someone is calling drain, we don't have to call drain more.
+        * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
+        * there is a race. We just do loose check here.
+        */
+       if (atomic_read(&memcg_drain_count))
+               return;
+       /* Notify other cpus that system-wide "drain" is running */
+       atomic_inc(&memcg_drain_count);
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
+               struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
+               schedule_work_on(cpu, &stock->work);
+       }
+       put_online_cpus();
+       atomic_dec(&memcg_drain_count);
+       /* We don't wait for flush_work */
+}
+
+/* This is a synchronous drain interface. */
+static void drain_all_stock_sync(void)
+{
+       /* called when force_empty is called */
+       atomic_inc(&memcg_drain_count);
+       schedule_on_each_cpu(drain_local_stock);
+       atomic_dec(&memcg_drain_count);
+}
+
+static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
+                                       unsigned long action,
+                                       void *hcpu)
+{
+       int cpu = (unsigned long)hcpu;
+       struct memcg_stock_pcp *stock;
+
+       if (action != CPU_DEAD)
+               return NOTIFY_OK;
+       stock = &per_cpu(memcg_stock, cpu);
+       drain_stock(stock);
+       return NOTIFY_OK;
+}
+
 /*
  * Unlike exported interface, "oom" parameter is added. if oom==true,
  * oom-killer can be invoked.
  */
 static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        gfp_t gfp_mask, struct mem_cgroup **memcg,
-                       bool oom)
+                       bool oom, struct page *page)
 {
        struct mem_cgroup *mem, *mem_over_limit;
        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
        struct res_counter *fail_res;
+       int csize = CHARGE_SIZE;
 
        if (unlikely(test_thread_flag(TIF_MEMDIE))) {
                /* Don't account this! */
@@ -1001,22 +1465,26 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                return 0;
 
        VM_BUG_ON(css_is_removed(&mem->css));
+       if (mem_cgroup_is_root(mem))
+               goto done;
 
        while (1) {
-               int ret;
-               bool noswap = false;
+               int ret = 0;
+               unsigned long flags = 0;
 
-               ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
+               if (consume_stock(mem))
+                       goto charged;
+
+               ret = res_counter_charge(&mem->res, csize, &fail_res);
                if (likely(!ret)) {
                        if (!do_swap_account)
                                break;
-                       ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
-                                                       &fail_res);
+                       ret = res_counter_charge(&mem->memsw, csize, &fail_res);
                        if (likely(!ret))
                                break;
                        /* mem+swap counter fails */
-                       res_counter_uncharge(&mem->res, PAGE_SIZE);
-                       noswap = true;
+                       res_counter_uncharge(&mem->res, csize);
+                       flags |= MEM_CGROUP_RECLAIM_NOSWAP;
                        mem_over_limit = mem_cgroup_from_res_counter(fail_res,
                                                                        memsw);
                } else
@@ -1024,11 +1492,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        mem_over_limit = mem_cgroup_from_res_counter(fail_res,
                                                                        res);
 
+               /* reduce request size and retry */
+               if (csize > PAGE_SIZE) {
+                       csize = PAGE_SIZE;
+                       continue;
+               }
                if (!(gfp_mask & __GFP_WAIT))
                        goto nomem;
 
-               ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
-                                                       noswap, false);
+               ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
+                                               gfp_mask, flags);
                if (ret)
                        continue;
 
@@ -1043,22 +1516,95 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                if (mem_cgroup_check_under_limit(mem_over_limit))
                        continue;
 
+               /* try to avoid oom while someone is moving charge */
+               if (mc.moving_task && current != mc.moving_task) {
+                       struct mem_cgroup *from, *to;
+                       bool do_continue = false;
+                       /*
+                        * There is a small race that "from" or "to" can be
+                        * freed by rmdir, so we use css_tryget().
+                        */
+                       rcu_read_lock();
+                       from = mc.from;
+                       to = mc.to;
+                       if (from && css_tryget(&from->css)) {
+                               if (mem_over_limit->use_hierarchy)
+                                       do_continue = css_is_ancestor(
+                                                       &from->css,
+                                                       &mem_over_limit->css);
+                               else
+                                       do_continue = (from == mem_over_limit);
+                               css_put(&from->css);
+                       }
+                       if (!do_continue && to && css_tryget(&to->css)) {
+                               if (mem_over_limit->use_hierarchy)
+                                       do_continue = css_is_ancestor(
+                                                       &to->css,
+                                                       &mem_over_limit->css);
+                               else
+                                       do_continue = (to == mem_over_limit);
+                               css_put(&to->css);
+                       }
+                       rcu_read_unlock();
+                       if (do_continue) {
+                               DEFINE_WAIT(wait);
+                               prepare_to_wait(&mc.waitq, &wait,
+                                                       TASK_INTERRUPTIBLE);
+                               /* moving charge context might have finished. */
+                               if (mc.moving_task)
+                                       schedule();
+                               finish_wait(&mc.waitq, &wait);
+                               continue;
+                       }
+               }
+
                if (!nr_retries--) {
                        if (oom) {
-                               mutex_lock(&memcg_tasklist);
                                mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
-                               mutex_unlock(&memcg_tasklist);
                                record_last_oom(mem_over_limit);
                        }
                        goto nomem;
                }
        }
+       if (csize > PAGE_SIZE)
+               refill_stock(mem, csize - PAGE_SIZE);
+charged:
+       /*
+        * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+        * if they exceeds softlimit.
+        */
+       if (page && mem_cgroup_soft_limit_check(mem))
+               mem_cgroup_update_tree(mem, page);
+done:
        return 0;
 nomem:
        css_put(&mem->css);
        return -ENOMEM;
 }
 
+/*
+ * Somemtimes we have to undo a charge we got by try_charge().
+ * This function is for that and do uncharge, put css's refcnt.
+ * gotten by try_charge().
+ */
+static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
+                                                       unsigned long count)
+{
+       if (!mem_cgroup_is_root(mem)) {
+               res_counter_uncharge(&mem->res, PAGE_SIZE * count);
+               if (do_swap_account)
+                       res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
+               VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
+               WARN_ON_ONCE(count > INT_MAX);
+               __css_put(&mem->css, (int)count);
+       }
+       /* we don't need css_put for root */
+}
+
+static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
+{
+       __mem_cgroup_cancel_charge(mem, 1);
+}
 
 /*
  * A helper function to get mem_cgroup from ID. must be called under
@@ -1079,25 +1625,22 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
        return container_of(css, struct mem_cgroup, css);
 }
 
-static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
+struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
 {
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem = NULL;
        struct page_cgroup *pc;
        unsigned short id;
        swp_entry_t ent;
 
        VM_BUG_ON(!PageLocked(page));
 
-       if (!PageSwapCache(page))
-               return NULL;
-
        pc = lookup_page_cgroup(page);
        lock_page_cgroup(pc);
        if (PageCgroupUsed(pc)) {
                mem = pc->mem_cgroup;
                if (mem && !css_tryget(&mem->css))
                        mem = NULL;
-       } else {
+       } else if (PageSwapCache(page)) {
                ent.val = page_private(page);
                id = lookup_swap_cgroup(ent);
                rcu_read_lock();
@@ -1126,14 +1669,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
-               res_counter_uncharge(&mem->res, PAGE_SIZE);
-               if (do_swap_account)
-                       res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-               css_put(&mem->css);
+               mem_cgroup_cancel_charge(mem);
                return;
        }
 
        pc->mem_cgroup = mem;
+       /*
+        * We access a page_cgroup asynchronously without lock_page_cgroup().
+        * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
+        * is accessed after testing USED bit. To make pc->mem_cgroup visible
+        * before USED bit, we need memory barrier here.
+        * See mem_cgroup_add_lru_list(), etc.
+        */
        smp_wmb();
        switch (ctype) {
        case MEM_CGROUP_CHARGE_TYPE_CACHE:
@@ -1155,27 +1702,25 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
 }
 
 /**
- * mem_cgroup_move_account - move account of the page
+ * __mem_cgroup_move_account - move account of the page
  * @pc:        page_cgroup of the page.
  * @from: mem_cgroup which the page is moved from.
  * @to:        mem_cgroup which the page is moved to. @from != @to.
+ * @uncharge: whether we should call uncharge and css_put against @from.
  *
  * The caller must confirm following.
  * - page is not on LRU (isolate_page() is useful.)
+ * - the pc is locked, used, and ->mem_cgroup points to @from.
  *
- * returns 0 at success,
- * returns -EBUSY when lock is busy or "pc" is unstable.
- *
- * This function does "uncharge" from old cgroup but doesn't do "charge" to
- * new cgroup. It should be done by a caller.
+ * This function doesn't do "charge" nor css_get to new cgroup. It should be
+ * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
+ * true, this function does "uncharge" from old cgroup, but it doesn't if
+ * @uncharge is false, so a caller should do "uncharge".
  */
 
-static int mem_cgroup_move_account(struct page_cgroup *pc,
-       struct mem_cgroup *from, struct mem_cgroup *to)
+static void __mem_cgroup_move_account(struct page_cgroup *pc,
+       struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
 {
-       struct mem_cgroup_per_zone *from_mz, *to_mz;
-       int nid, zid;
-       int ret = -EBUSY;
        struct page *page;
        int cpu;
        struct mem_cgroup_stat *stat;
@@ -1183,56 +1728,56 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
 
        VM_BUG_ON(from == to);
        VM_BUG_ON(PageLRU(pc->page));
-
-       nid = page_cgroup_nid(pc);
-       zid = page_cgroup_zid(pc);
-       from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
-       to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
-
-       if (!trylock_page_cgroup(pc))
-               return ret;
-
-       if (!PageCgroupUsed(pc))
-               goto out;
-
-       if (pc->mem_cgroup != from)
-               goto out;
-
-       res_counter_uncharge(&from->res, PAGE_SIZE);
-       mem_cgroup_charge_statistics(from, pc, false);
+       VM_BUG_ON(!PageCgroupLocked(pc));
+       VM_BUG_ON(!PageCgroupUsed(pc));
+       VM_BUG_ON(pc->mem_cgroup != from);
 
        page = pc->page;
-       if (page_is_file_cache(page) && page_mapped(page)) {
+       if (page_mapped(page) && !PageAnon(page)) {
                cpu = smp_processor_id();
                /* Update mapped_file data for mem_cgroup "from" */
                stat = &from->stat;
                cpustat = &stat->cpustat[cpu];
-               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
                                                -1);
 
                /* Update mapped_file data for mem_cgroup "to" */
                stat = &to->stat;
                cpustat = &stat->cpustat[cpu];
-               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
+               __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_FILE_MAPPED,
                                                1);
        }
+       mem_cgroup_charge_statistics(from, pc, false);
+       if (uncharge)
+               /* This is not "cancel", but cancel_charge does all we need. */
+               mem_cgroup_cancel_charge(from);
 
-       if (do_swap_account)
-               res_counter_uncharge(&from->memsw, PAGE_SIZE);
-       css_put(&from->css);
-
-       css_get(&to->css);
+       /* caller should have done css_get */
        pc->mem_cgroup = to;
        mem_cgroup_charge_statistics(to, pc, true);
-       ret = 0;
-out:
-       unlock_page_cgroup(pc);
        /*
         * We charges against "to" which may not have any tasks. Then, "to"
         * can be under rmdir(). But in current implementation, caller of
-        * this function is just force_empty() and it's garanteed that
-        * "to" is never removed. So, we don't check rmdir status here.
+        * this function is just force_empty() and move charge, so it's
+        * garanteed that "to" is never removed. So, we don't check rmdir
+        * status here.
         */
+}
+
+/*
+ * check whether the @pc is valid for moving account and call
+ * __mem_cgroup_move_account()
+ */
+static int mem_cgroup_move_account(struct page_cgroup *pc,
+               struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
+{
+       int ret = -EINVAL;
+       lock_page_cgroup(pc);
+       if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
+               __mem_cgroup_move_account(pc, from, to, uncharge);
+               ret = 0;
+       }
+       unlock_page_cgroup(pc);
        return ret;
 }
 
@@ -1254,43 +1799,25 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
        if (!pcg)
                return -EINVAL;
 
+       ret = -EBUSY;
+       if (!get_page_unless_zero(page))
+               goto out;
+       if (isolate_lru_page(page))
+               goto put;
 
        parent = mem_cgroup_from_cont(pcg);
-
-
-       ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+       ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false, page);
        if (ret || !parent)
-               return ret;
-
-       if (!get_page_unless_zero(page)) {
-               ret = -EBUSY;
-               goto uncharge;
-       }
-
-       ret = isolate_lru_page(page);
+               goto put_back;
 
+       ret = mem_cgroup_move_account(pc, child, parent, true);
        if (ret)
-               goto cancel;
-
-       ret = mem_cgroup_move_account(pc, child, parent);
-
+               mem_cgroup_cancel_charge(parent);
+put_back:
        putback_lru_page(page);
-       if (!ret) {
-               put_page(page);
-               /* drop extra refcnt by try_charge() */
-               css_put(&parent->css);
-               return 0;
-       }
-
-cancel:
+put:
        put_page(page);
-uncharge:
-       /* drop extra refcnt by try_charge() */
-       css_put(&parent->css);
-       /* uncharge if move fails */
-       res_counter_uncharge(&parent->res, PAGE_SIZE);
-       if (do_swap_account)
-               res_counter_uncharge(&parent->memsw, PAGE_SIZE);
+out:
        return ret;
 }
 
@@ -1315,7 +1842,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        prefetchw(pc);
 
        mem = memcg;
-       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+       ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page);
        if (ret || !mem)
                return ret;
 
@@ -1408,7 +1935,7 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
 /*
  * While swap-in, try_charge -> commit or cancel, the page is locked.
  * And when try_charge() successfully returns, one refcnt to memcg without
- * struct page_cgroup is aquired. This refcnt will be cumsumed by
+ * struct page_cgroup is acquired. This refcnt will be consumed by
  * "commit()" or removed by "cancel()"
  */
 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
@@ -1425,23 +1952,24 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
                goto charge_cur_mm;
        /*
         * A racing thread's fault, or swapoff, may have already updated
-        * the pte, and even removed page from swap cache: return success
-        * to go on to do_swap_page()'s pte_same() test, which should fail.
+        * the pte, and even removed page from swap cache: in those cases
+        * do_swap_page()'s pte_same() test will fail; but there's also a
+        * KSM case which does need to charge the page.
         */
        if (!PageSwapCache(page))
-               return 0;
-       mem = try_get_mem_cgroup_from_swapcache(page);
+               goto charge_cur_mm;
+       mem = try_get_mem_cgroup_from_page(page);
        if (!mem)
                goto charge_cur_mm;
        *ptr = mem;
-       ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+       ret = __mem_cgroup_try_charge(NULL, mask, ptr, true, page);
        /* drop extra refcnt from tryget */
        css_put(&mem->css);
        return ret;
 charge_cur_mm:
        if (unlikely(!mm))
                mm = &init_mm;
-       return __mem_cgroup_try_charge(mm, mask, ptr, true);
+       return __mem_cgroup_try_charge(mm, mask, ptr, true, page);
 }
 
 static void
@@ -1479,7 +2007,9 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                         * This recorded memcg can be obsolete one. So, avoid
                         * calling css_tryget
                         */
-                       res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+                       if (!mem_cgroup_is_root(memcg))
+                               res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+                       mem_cgroup_swap_statistics(memcg, false);
                        mem_cgroup_put(memcg);
                }
                rcu_read_unlock();
@@ -1504,13 +2034,54 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
                return;
        if (!mem)
                return;
+       mem_cgroup_cancel_charge(mem);
+}
+
+static void
+__do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
+{
+       struct memcg_batch_info *batch = NULL;
+       bool uncharge_memsw = true;
+       /* If swapout, usage of swap doesn't decrease */
+       if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+               uncharge_memsw = false;
+       /*
+        * do_batch > 0 when unmapping pages or inode invalidate/truncate.
+        * In those cases, all pages freed continously can be expected to be in
+        * the same cgroup and we have chance to coalesce uncharges.
+        * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
+        * because we want to do uncharge as soon as possible.
+        */
+       if (!current->memcg_batch.do_batch || test_thread_flag(TIF_MEMDIE))
+               goto direct_uncharge;
+
+       batch = &current->memcg_batch;
+       /*
+        * In usual, we do css_get() when we remember memcg pointer.
+        * But in this case, we keep res->usage until end of a series of
+        * uncharges. Then, it's ok to ignore memcg's refcnt.
+        */
+       if (!batch->memcg)
+               batch->memcg = mem;
+       /*
+        * In typical case, batch->memcg == mem. This means we can
+        * merge a series of uncharges to an uncharge of res_counter.
+        * If not, we uncharge res_counter ony by one.
+        */
+       if (batch->memcg != mem)
+               goto direct_uncharge;
+       /* remember freed charge and uncharge it later */
+       batch->bytes += PAGE_SIZE;
+       if (uncharge_memsw)
+               batch->memsw_bytes += PAGE_SIZE;
+       return;
+direct_uncharge:
        res_counter_uncharge(&mem->res, PAGE_SIZE);
-       if (do_swap_account)
+       if (uncharge_memsw)
                res_counter_uncharge(&mem->memsw, PAGE_SIZE);
-       css_put(&mem->css);
+       return;
 }
 
-
 /*
  * uncharge if !page_mapped(page)
  */
@@ -1558,9 +2129,10 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
                break;
        }
 
-       res_counter_uncharge(&mem->res, PAGE_SIZE);
-       if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
-               res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+       if (!mem_cgroup_is_root(mem))
+               __do_uncharge(mem, ctype);
+       if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+               mem_cgroup_swap_statistics(mem, true);
        mem_cgroup_charge_statistics(mem, pc, false);
 
        ClearPageCgroupUsed(pc);
@@ -1574,6 +2146,8 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        mz = page_cgroup_zoneinfo(pc);
        unlock_page_cgroup(pc);
 
+       if (mem_cgroup_soft_limit_check(mem))
+               mem_cgroup_update_tree(mem, page);
        /* at swapout, this memcg will be accessed to record to swap */
        if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
                css_put(&mem->css);
@@ -1602,6 +2176,50 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
        __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
 }
 
+/*
+ * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
+ * In that cases, pages are freed continuously and we can expect pages
+ * are in the same memcg. All these calls itself limits the number of
+ * pages freed at once, then uncharge_start/end() is called properly.
+ * This may be called prural(2) times in a context,
+ */
+
+void mem_cgroup_uncharge_start(void)
+{
+       current->memcg_batch.do_batch++;
+       /* We can do nest. */
+       if (current->memcg_batch.do_batch == 1) {
+               current->memcg_batch.memcg = NULL;
+               current->memcg_batch.bytes = 0;
+               current->memcg_batch.memsw_bytes = 0;
+       }
+}
+
+void mem_cgroup_uncharge_end(void)
+{
+       struct memcg_batch_info *batch = &current->memcg_batch;
+
+       if (!batch->do_batch)
+               return;
+
+       batch->do_batch--;
+       if (batch->do_batch) /* If stacked, do nothing. */
+               return;
+
+       if (!batch->memcg)
+               return;
+       /*
+        * This "batch->memcg" is valid without any css_get/put etc...
+        * bacause we hide charges behind us.
+        */
+       if (batch->bytes)
+               res_counter_uncharge(&batch->memcg->res, batch->bytes);
+       if (batch->memsw_bytes)
+               res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
+       /* forget this pointer (for sanity check) */
+       batch->memcg = NULL;
+}
+
 #ifdef CONFIG_SWAP
 /*
  * called after __delete_from_swap_cache() and drop "page" account.
@@ -1649,11 +2267,71 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
                 * We uncharge this because swap is freed.
                 * This memcg can be obsolete one. We avoid calling css_tryget
                 */
-               res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+               if (!mem_cgroup_is_root(memcg))
+                       res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+               mem_cgroup_swap_statistics(memcg, false);
                mem_cgroup_put(memcg);
        }
        rcu_read_unlock();
 }
+
+/**
+ * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
+ * @entry: swap entry to be moved
+ * @from:  mem_cgroup which the entry is moved from
+ * @to:  mem_cgroup which the entry is moved to
+ * @need_fixup: whether we should fixup res_counters and refcounts.
+ *
+ * It succeeds only when the swap_cgroup's record for this entry is the same
+ * as the mem_cgroup's id of @from.
+ *
+ * Returns 0 on success, -EINVAL on failure.
+ *
+ * The caller must have charged to @to, IOW, called res_counter_charge() about
+ * both res and memsw, and called css_get().
+ */
+static int mem_cgroup_move_swap_account(swp_entry_t entry,
+               struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+{
+       unsigned short old_id, new_id;
+
+       old_id = css_id(&from->css);
+       new_id = css_id(&to->css);
+
+       if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
+               mem_cgroup_swap_statistics(from, false);
+               mem_cgroup_swap_statistics(to, true);
+               /*
+                * This function is only called from task migration context now.
+                * It postpones res_counter and refcount handling till the end
+                * of task migration(mem_cgroup_clear_mc()) for performance
+                * improvement. But we cannot postpone mem_cgroup_get(to)
+                * because if the process that has been moved to @to does
+                * swap-in, the refcount of @to might be decreased to 0.
+                */
+               mem_cgroup_get(to);
+               if (need_fixup) {
+                       if (!mem_cgroup_is_root(from))
+                               res_counter_uncharge(&from->memsw, PAGE_SIZE);
+                       mem_cgroup_put(from);
+                       /*
+                        * we charged both to->res and to->memsw, so we should
+                        * uncharge to->res.
+                        */
+                       if (!mem_cgroup_is_root(to))
+                               res_counter_uncharge(&to->res, PAGE_SIZE);
+                       css_put(&to->css);
+               }
+               return 0;
+       }
+       return -EINVAL;
+}
+#else
+static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
+               struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
+{
+       return -EINVAL;
+}
 #endif
 
 /*
@@ -1678,7 +2356,8 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        unlock_page_cgroup(pc);
 
        if (mem) {
-               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false,
+                                               page);
                css_put(&mem->css);
        }
        *ptr = mem;
@@ -1774,7 +2453,6 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                                unsigned long long val)
 {
        int retry_count;
-       int progress;
        u64 memswlimit;
        int ret = 0;
        int children = mem_cgroup_count_children(memcg);
@@ -1818,8 +2496,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                if (!ret)
                        break;
 
-               progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
-                                                  false, true);
+               mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
+                                               MEM_CGROUP_RECLAIM_SHRINK);
                curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
                /* Usage is reduced ? */
                if (curusage >= oldusage)
@@ -1871,7 +2549,9 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                if (!ret)
                        break;
 
-               mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
+               mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
+                                               MEM_CGROUP_RECLAIM_NOSWAP |
+                                               MEM_CGROUP_RECLAIM_SHRINK);
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
                /* Usage is reduced ? */
                if (curusage >= oldusage)
@@ -1882,6 +2562,97 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
        return ret;
 }
 
+unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
+                                               gfp_t gfp_mask, int nid,
+                                               int zid)
+{
+       unsigned long nr_reclaimed = 0;
+       struct mem_cgroup_per_zone *mz, *next_mz = NULL;
+       unsigned long reclaimed;
+       int loop = 0;
+       struct mem_cgroup_tree_per_zone *mctz;
+       unsigned long long excess;
+
+       if (order > 0)
+               return 0;
+
+       mctz = soft_limit_tree_node_zone(nid, zid);
+       /*
+        * This loop can run a while, specially if mem_cgroup's continuously
+        * keep exceeding their soft limit and putting the system under
+        * pressure
+        */
+       do {
+               if (next_mz)
+                       mz = next_mz;
+               else
+                       mz = mem_cgroup_largest_soft_limit_node(mctz);
+               if (!mz)
+                       break;
+
+               reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
+                                               gfp_mask,
+                                               MEM_CGROUP_RECLAIM_SOFT);
+               nr_reclaimed += reclaimed;
+               spin_lock(&mctz->lock);
+
+               /*
+                * If we failed to reclaim anything from this memory cgroup
+                * it is time to move on to the next cgroup
+                */
+               next_mz = NULL;
+               if (!reclaimed) {
+                       do {
+                               /*
+                                * Loop until we find yet another one.
+                                *
+                                * By the time we get the soft_limit lock
+                                * again, someone might have aded the
+                                * group back on the RB tree. Iterate to
+                                * make sure we get a different mem.
+                                * mem_cgroup_largest_soft_limit_node returns
+                                * NULL if no other cgroup is present on
+                                * the tree
+                                */
+                               next_mz =
+                               __mem_cgroup_largest_soft_limit_node(mctz);
+                               if (next_mz == mz) {
+                                       css_put(&next_mz->mem->css);
+                                       next_mz = NULL;
+                               } else /* next_mz == NULL or other memcg */
+                                       break;
+                       } while (1);
+               }
+               __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
+               excess = res_counter_soft_limit_excess(&mz->mem->res);
+               /*
+                * One school of thought says that we should not add
+                * back the node to the tree if reclaim returns 0.
+                * But our reclaim could return 0, simply because due
+                * to priority we are exposing a smaller subset of
+                * memory to reclaim from. Consider this as a longer
+                * term TODO.
+                */
+               /* If excess == 0, no tree ops */
+               __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
+               spin_unlock(&mctz->lock);
+               css_put(&mz->mem->css);
+               loop++;
+               /*
+                * Could not reclaim anything and there are no more
+                * mem cgroups to try or we seem to be looping without
+                * reclaiming anything.
+                */
+               if (!nr_reclaimed &&
+                       (next_mz == NULL ||
+                       loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
+                       break;
+       } while (!nr_reclaimed);
+       if (next_mz)
+               css_put(&next_mz->mem->css);
+       return nr_reclaimed;
+}
+
 /*
  * This routine traverse page_cgroup in given list and drop them all.
  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
@@ -1914,7 +2685,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
                pc = list_entry(list->prev, struct page_cgroup, lru);
                if (busy == pc) {
                        list_move(&pc->lru, list);
-                       busy = 0;
+                       busy = NULL;
                        spin_unlock_irqrestore(&zone->lru_lock, flags);
                        continue;
                }
@@ -1955,7 +2726,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
        if (free_all)
                goto try_to_free;
 move_account:
-       while (mem->res.usage > 0) {
+       do {
                ret = -EBUSY;
                if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
                        goto out;
@@ -1964,6 +2735,7 @@ move_account:
                        goto out;
                /* This is for making all *used* pages to be on LRU. */
                lru_add_drain_all();
+               drain_all_stock_sync();
                ret = 0;
                for_each_node_state(node, N_HIGH_MEMORY) {
                        for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
@@ -1982,8 +2754,8 @@ move_account:
                if (ret == -ENOMEM)
                        goto try_to_free;
                cond_resched();
-       }
-       ret = 0;
+       /* "ret" should also be checked to ensure all lists are empty. */
+       } while (mem->res.usage > 0 || ret);
 out:
        css_put(&mem->css);
        return ret;
@@ -2016,10 +2788,7 @@ try_to_free:
        }
        lru_add_drain();
        /* try move_account...there may be some *locked* pages. */
-       if (mem->res.usage)
-               goto move_account;
-       ret = 0;
-       goto out;
+       goto move_account;
 }
 
 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
@@ -2046,7 +2815,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
 
        cgroup_lock();
        /*
-        * If parent's use_hiearchy is set, we can't make any modifications
+        * If parent's use_hierarchy is set, we can't make any modifications
         * in the child subtrees. If it is unset, then the change can
         * occur, provided the current cgroup has no children.
         *
@@ -2066,20 +2835,75 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
        return retval;
 }
 
+struct mem_cgroup_idx_data {
+       s64 val;
+       enum mem_cgroup_stat_index idx;
+};
+
+static int
+mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
+{
+       struct mem_cgroup_idx_data *d = data;
+       d->val += mem_cgroup_read_stat(&mem->stat, d->idx);
+       return 0;
+}
+
+static void
+mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
+                               enum mem_cgroup_stat_index idx, s64 *val)
+{
+       struct mem_cgroup_idx_data d;
+       d.idx = idx;
+       d.val = 0;
+       mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
+       *val = d.val;
+}
+
+static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
+{
+       u64 idx_val, val;
+
+       if (!mem_cgroup_is_root(mem)) {
+               if (!swap)
+                       return res_counter_read_u64(&mem->res, RES_USAGE);
+               else
+                       return res_counter_read_u64(&mem->memsw, RES_USAGE);
+       }
+
+       mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
+       val = idx_val;
+       mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
+       val += idx_val;
+
+       if (swap) {
+               mem_cgroup_get_recursive_idx_stat(mem,
+                               MEM_CGROUP_STAT_SWAPOUT, &idx_val);
+               val += idx_val;
+       }
+
+       return val << PAGE_SHIFT;
+}
+
 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       u64 val = 0;
+       u64 val;
        int type, name;
 
        type = MEMFILE_TYPE(cft->private);
        name = MEMFILE_ATTR(cft->private);
        switch (type) {
        case _MEM:
-               val = res_counter_read_u64(&mem->res, name);
+               if (name == RES_USAGE)
+                       val = mem_cgroup_usage(mem, false);
+               else
+                       val = res_counter_read_u64(&mem->res, name);
                break;
        case _MEMSWAP:
-               val = res_counter_read_u64(&mem->memsw, name);
+               if (name == RES_USAGE)
+                       val = mem_cgroup_usage(mem, true);
+               else
+                       val = res_counter_read_u64(&mem->memsw, name);
                break;
        default:
                BUG();
@@ -2116,6 +2940,20 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
                else
                        ret = mem_cgroup_resize_memsw_limit(memcg, val);
                break;
+       case RES_SOFT_LIMIT:
+               ret = res_counter_memparse_write_strategy(buffer, &val);
+               if (ret)
+                       break;
+               /*
+                * For memsw, soft limits are hard to implement in terms
+                * of semantics, for now, we support soft limits for
+                * control without swap
+                */
+               if (type == _MEM)
+                       ret = res_counter_set_soft_limit(&memcg->res, val);
+               else
+                       ret = -EINVAL;
+               break;
        default:
                ret = -EINVAL; /* should be BUG() ? */
                break;
@@ -2173,17 +3011,52 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
                        res_counter_reset_failcnt(&mem->memsw);
                break;
        }
+
        return 0;
 }
 
+static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
+                                       struct cftype *cft)
+{
+       return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
+}
+
+#ifdef CONFIG_MMU
+static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+                                       struct cftype *cft, u64 val)
+{
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
+
+       if (val >= (1 << NR_MOVE_TYPE))
+               return -EINVAL;
+       /*
+        * We check this value several times in both in can_attach() and
+        * attach(), so we need cgroup lock to prevent this value from being
+        * inconsistent.
+        */
+       cgroup_lock();
+       mem->move_charge_at_immigrate = val;
+       cgroup_unlock();
+
+       return 0;
+}
+#else
+static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+                                       struct cftype *cft, u64 val)
+{
+       return -ENOSYS;
+}
+#endif
+
 
 /* For read statistics */
 enum {
        MCS_CACHE,
        MCS_RSS,
-       MCS_MAPPED_FILE,
+       MCS_FILE_MAPPED,
        MCS_PGPGIN,
        MCS_PGPGOUT,
+       MCS_SWAP,
        MCS_INACTIVE_ANON,
        MCS_ACTIVE_ANON,
        MCS_INACTIVE_FILE,
@@ -2205,6 +3078,7 @@ struct {
        {"mapped_file", "total_mapped_file"},
        {"pgpgin", "total_pgpgin"},
        {"pgpgout", "total_pgpgout"},
+       {"swap", "total_swap"},
        {"inactive_anon", "total_inactive_anon"},
        {"active_anon", "total_active_anon"},
        {"inactive_file", "total_inactive_file"},
@@ -2223,12 +3097,16 @@ static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
        s->stat[MCS_CACHE] += val * PAGE_SIZE;
        val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
        s->stat[MCS_RSS] += val * PAGE_SIZE;
-       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
-       s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
+       val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_FILE_MAPPED);
+       s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
        val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
        s->stat[MCS_PGPGIN] += val;
        val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
        s->stat[MCS_PGPGOUT] += val;
+       if (do_swap_account) {
+               val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_SWAPOUT);
+               s->stat[MCS_SWAP] += val * PAGE_SIZE;
+       }
 
        /* per zone stat */
        val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -2260,8 +3138,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
        memset(&mystat, 0, sizeof(mystat));
        mem_cgroup_get_local_stat(mem_cont, &mystat);
 
-       for (i = 0; i < NR_MCS_STAT; i++)
+       for (i = 0; i < NR_MCS_STAT; i++) {
+               if (i == MCS_SWAP && !do_swap_account)
+                       continue;
                cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
+       }
 
        /* Hierarchical information */
        {
@@ -2274,9 +3155,11 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
 
        memset(&mystat, 0, sizeof(mystat));
        mem_cgroup_get_total_stat(mem_cont, &mystat);
-       for (i = 0; i < NR_MCS_STAT; i++)
+       for (i = 0; i < NR_MCS_STAT; i++) {
+               if (i == MCS_SWAP && !do_swap_account)
+                       continue;
                cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
-
+       }
 
 #ifdef CONFIG_DEBUG_VM
        cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
@@ -2368,6 +3251,12 @@ static struct cftype mem_cgroup_files[] = {
                .write_string = mem_cgroup_write,
                .read_u64 = mem_cgroup_read,
        },
+       {
+               .name = "soft_limit_in_bytes",
+               .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
+               .write_string = mem_cgroup_write,
+               .read_u64 = mem_cgroup_read,
+       },
        {
                .name = "failcnt",
                .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
@@ -2392,6 +3281,11 @@ static struct cftype mem_cgroup_files[] = {
                .read_u64 = mem_cgroup_swappiness_read,
                .write_u64 = mem_cgroup_swappiness_write,
        },
+       {
+               .name = "move_charge_at_immigrate",
+               .read_u64 = mem_cgroup_move_charge_read,
+               .write_u64 = mem_cgroup_move_charge_write,
+       },
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -2462,6 +3356,9 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
                mz = &pn->zoneinfo[zone];
                for_each_lru(l)
                        INIT_LIST_HEAD(&mz->lists[l]);
+               mz->usage_in_excess = 0;
+               mz->on_tree = false;
+               mz->mem = mem;
        }
        return 0;
 }
@@ -2507,6 +3404,7 @@ static void __mem_cgroup_free(struct mem_cgroup *mem)
 {
        int node;
 
+       mem_cgroup_remove_from_trees(mem);
        free_css_id(&mem_cgroup_subsys, &mem->css);
 
        for_each_node_state(node, N_POSSIBLE)
@@ -2523,9 +3421,9 @@ static void mem_cgroup_get(struct mem_cgroup *mem)
        atomic_inc(&mem->refcnt);
 }
 
-static void mem_cgroup_put(struct mem_cgroup *mem)
+static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
 {
-       if (atomic_dec_and_test(&mem->refcnt)) {
+       if (atomic_sub_and_test(count, &mem->refcnt)) {
                struct mem_cgroup *parent = parent_mem_cgroup(mem);
                __mem_cgroup_free(mem);
                if (parent)
@@ -2533,6 +3431,11 @@ static void mem_cgroup_put(struct mem_cgroup *mem)
        }
 }
 
+static void mem_cgroup_put(struct mem_cgroup *mem)
+{
+       __mem_cgroup_put(mem, 1);
+}
+
 /*
  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
  */
@@ -2555,6 +3458,31 @@ static void __init enable_swap_cgroup(void)
 }
 #endif
 
+static int mem_cgroup_soft_limit_tree_init(void)
+{
+       struct mem_cgroup_tree_per_node *rtpn;
+       struct mem_cgroup_tree_per_zone *rtpz;
+       int tmp, node, zone;
+
+       for_each_node_state(node, N_POSSIBLE) {
+               tmp = node;
+               if (!node_state(node, N_NORMAL_MEMORY))
+                       tmp = -1;
+               rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
+               if (!rtpn)
+                       return 1;
+
+               soft_limit_tree.rb_tree_per_node[node] = rtpn;
+
+               for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+                       rtpz = &rtpn->rb_tree_per_zone[zone];
+                       rtpz->rb_root = RB_ROOT;
+                       spin_lock_init(&rtpz->lock);
+               }
+       }
+       return 0;
+}
+
 static struct cgroup_subsys_state * __ref
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -2569,11 +3497,21 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
                        goto free_out;
+
        /* root ? */
        if (cont->parent == NULL) {
+               int cpu;
                enable_swap_cgroup();
                parent = NULL;
                root_mem_cgroup = mem;
+               if (mem_cgroup_soft_limit_tree_init())
+                       goto free_out;
+               for_each_possible_cpu(cpu) {
+                       struct memcg_stock_pcp *stock =
+                                               &per_cpu(memcg_stock, cpu);
+                       INIT_WORK(&stock->work, drain_local_stock);
+               }
+               hotcpu_notifier(memcg_stock_cpu_callback, 0);
        } else {
                parent = mem_cgroup_from_cont(cont->parent);
                mem->use_hierarchy = parent->use_hierarchy;
@@ -2599,6 +3537,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        if (parent)
                mem->swappiness = get_swappiness(parent);
        atomic_set(&mem->refcnt, 1);
+       mem->move_charge_at_immigrate = 0;
        return &mem->css;
 free_out:
        __mem_cgroup_free(mem);
@@ -2635,18 +3574,445 @@ static int mem_cgroup_populate(struct cgroup_subsys *ss,
        return ret;
 }
 
+#ifdef CONFIG_MMU
+/* Handlers for move charge at task migration. */
+#define PRECHARGE_COUNT_AT_ONCE        256
+static int mem_cgroup_do_precharge(unsigned long count)
+{
+       int ret = 0;
+       int batch_count = PRECHARGE_COUNT_AT_ONCE;
+       struct mem_cgroup *mem = mc.to;
+
+       if (mem_cgroup_is_root(mem)) {
+               mc.precharge += count;
+               /* we don't need css_get for root */
+               return ret;
+       }
+       /* try to charge at once */
+       if (count > 1) {
+               struct res_counter *dummy;
+               /*
+                * "mem" cannot be under rmdir() because we've already checked
+                * by cgroup_lock_live_cgroup() that it is not removed and we
+                * are still under the same cgroup_mutex. So we can postpone
+                * css_get().
+                */
+               if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
+                       goto one_by_one;
+               if (do_swap_account && res_counter_charge(&mem->memsw,
+                                               PAGE_SIZE * count, &dummy)) {
+                       res_counter_uncharge(&mem->res, PAGE_SIZE * count);
+                       goto one_by_one;
+               }
+               mc.precharge += count;
+               VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
+               WARN_ON_ONCE(count > INT_MAX);
+               __css_get(&mem->css, (int)count);
+               return ret;
+       }
+one_by_one:
+       /* fall back to one by one charge */
+       while (count--) {
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+               if (!batch_count--) {
+                       batch_count = PRECHARGE_COUNT_AT_ONCE;
+                       cond_resched();
+               }
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem,
+                                                               false, NULL);
+               if (ret || !mem)
+                       /* mem_cgroup_clear_mc() will do uncharge later */
+                       return -ENOMEM;
+               mc.precharge++;
+       }
+       return ret;
+}
+#else  /* !CONFIG_MMU */
+static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
+                               struct cgroup *cgroup,
+                               struct task_struct *p,
+                               bool threadgroup)
+{
+       return 0;
+}
+static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
+                               struct cgroup *cgroup,
+                               struct task_struct *p,
+                               bool threadgroup)
+{
+}
 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *cont,
                                struct cgroup *old_cont,
                                struct task_struct *p,
                                bool threadgroup)
 {
-       mutex_lock(&memcg_tasklist);
+}
+#endif
+
+/**
+ * is_target_pte_for_mc - check a pte whether it is valid for move charge
+ * @vma: the vma the pte to be checked belongs
+ * @addr: the address corresponding to the pte to be checked
+ * @ptent: the pte to be checked
+ * @target: the pointer the target page or swap ent will be stored(can be NULL)
+ *
+ * Returns
+ *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
+ *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
+ *     move charge. if @target is not NULL, the page is stored in target->page
+ *     with extra refcnt got(Callers should handle it).
+ *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
+ *     target for charge migration. if @target is not NULL, the entry is stored
+ *     in target->ent.
+ *
+ * Called with pte lock held.
+ */
+union mc_target {
+       struct page     *page;
+       swp_entry_t     ent;
+};
+
+enum mc_target_type {
+       MC_TARGET_NONE, /* not used */
+       MC_TARGET_PAGE,
+       MC_TARGET_SWAP,
+};
+
+static int is_target_pte_for_mc(struct vm_area_struct *vma,
+               unsigned long addr, pte_t ptent, union mc_target *target)
+{
+       struct page *page = NULL;
+       struct page_cgroup *pc;
+       int ret = 0;
+       swp_entry_t ent = { .val = 0 };
+       int usage_count = 0;
+       bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
+                                       &mc.to->move_charge_at_immigrate);
+
+       if (!pte_present(ptent)) {
+               /* TODO: handle swap of shmes/tmpfs */
+               if (pte_none(ptent) || pte_file(ptent))
+                       return 0;
+               else if (is_swap_pte(ptent)) {
+                       ent = pte_to_swp_entry(ptent);
+                       if (!move_anon || non_swap_entry(ent))
+                               return 0;
+                       usage_count = mem_cgroup_count_swap_user(ent, &page);
+               }
+       } else {
+               page = vm_normal_page(vma, addr, ptent);
+               if (!page || !page_mapped(page))
+                       return 0;
+               /*
+                * TODO: We don't move charges of file(including shmem/tmpfs)
+                * pages for now.
+                */
+               if (!move_anon || !PageAnon(page))
+                       return 0;
+               if (!get_page_unless_zero(page))
+                       return 0;
+               usage_count = page_mapcount(page);
+       }
+       if (usage_count > 1) {
+               /*
+                * TODO: We don't move charges of shared(used by multiple
+                * processes) pages for now.
+                */
+               if (page)
+                       put_page(page);
+               return 0;
+       }
+       if (page) {
+               pc = lookup_page_cgroup(page);
+               /*
+                * Do only loose check w/o page_cgroup lock.
+                * mem_cgroup_move_account() checks the pc is valid or not under
+                * the lock.
+                */
+               if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
+                       ret = MC_TARGET_PAGE;
+                       if (target)
+                               target->page = page;
+               }
+               if (!ret || !target)
+                       put_page(page);
+       }
+       /* throught */
+       if (ent.val && do_swap_account && !ret &&
+                       css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
+               ret = MC_TARGET_SWAP;
+               if (target)
+                       target->ent = ent;
+       }
+       return ret;
+}
+
+static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
+                                       unsigned long addr, unsigned long end,
+                                       struct mm_walk *walk)
+{
+       struct vm_area_struct *vma = walk->private;
+       pte_t *pte;
+       spinlock_t *ptl;
+
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; pte++, addr += PAGE_SIZE)
+               if (is_target_pte_for_mc(vma, addr, *pte, NULL))
+                       mc.precharge++; /* increment precharge temporarily */
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+
+       return 0;
+}
+
+static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
+{
+       unsigned long precharge;
+       struct vm_area_struct *vma;
+
+       down_read(&mm->mmap_sem);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               struct mm_walk mem_cgroup_count_precharge_walk = {
+                       .pmd_entry = mem_cgroup_count_precharge_pte_range,
+                       .mm = mm,
+                       .private = vma,
+               };
+               if (is_vm_hugetlb_page(vma))
+                       continue;
+               /* TODO: We don't move charges of shmem/tmpfs pages for now. */
+               if (vma->vm_flags & VM_SHARED)
+                       continue;
+               walk_page_range(vma->vm_start, vma->vm_end,
+                                       &mem_cgroup_count_precharge_walk);
+       }
+       up_read(&mm->mmap_sem);
+
+       precharge = mc.precharge;
+       mc.precharge = 0;
+
+       return precharge;
+}
+
+static int mem_cgroup_precharge_mc(struct mm_struct *mm)
+{
+       return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
+}
+
+static void mem_cgroup_clear_mc(void)
+{
+       /* we must uncharge all the leftover precharges from mc.to */
+       if (mc.precharge) {
+               __mem_cgroup_cancel_charge(mc.to, mc.precharge);
+               mc.precharge = 0;
+       }
        /*
-        * FIXME: It's better to move charges of this process from old
-        * memcg to new memcg. But it's just on TODO-List now.
+        * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
+        * we must uncharge here.
         */
-       mutex_unlock(&memcg_tasklist);
+       if (mc.moved_charge) {
+               __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
+               mc.moved_charge = 0;
+       }
+       /* we must fixup refcnts and charges */
+       if (mc.moved_swap) {
+               WARN_ON_ONCE(mc.moved_swap > INT_MAX);
+               /* uncharge swap account from the old cgroup */
+               if (!mem_cgroup_is_root(mc.from))
+                       res_counter_uncharge(&mc.from->memsw,
+                                               PAGE_SIZE * mc.moved_swap);
+               __mem_cgroup_put(mc.from, mc.moved_swap);
+
+               if (!mem_cgroup_is_root(mc.to)) {
+                       /*
+                        * we charged both to->res and to->memsw, so we should
+                        * uncharge to->res.
+                        */
+                       res_counter_uncharge(&mc.to->res,
+                                               PAGE_SIZE * mc.moved_swap);
+                       VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
+                       __css_put(&mc.to->css, mc.moved_swap);
+               }
+               /* we've already done mem_cgroup_get(mc.to) */
+
+               mc.moved_swap = 0;
+       }
+       mc.from = NULL;
+       mc.to = NULL;
+       mc.moving_task = NULL;
+       wake_up_all(&mc.waitq);
+}
+
+static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
+                               struct cgroup *cgroup,
+                               struct task_struct *p,
+                               bool threadgroup)
+{
+       int ret = 0;
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
+
+       if (mem->move_charge_at_immigrate) {
+               struct mm_struct *mm;
+               struct mem_cgroup *from = mem_cgroup_from_task(p);
+
+               VM_BUG_ON(from == mem);
+
+               mm = get_task_mm(p);
+               if (!mm)
+                       return 0;
+               /* We move charges only when we move a owner of the mm */
+               if (mm->owner == p) {
+                       VM_BUG_ON(mc.from);
+                       VM_BUG_ON(mc.to);
+                       VM_BUG_ON(mc.precharge);
+                       VM_BUG_ON(mc.moved_charge);
+                       VM_BUG_ON(mc.moved_swap);
+                       VM_BUG_ON(mc.moving_task);
+                       mc.from = from;
+                       mc.to = mem;
+                       mc.precharge = 0;
+                       mc.moved_charge = 0;
+                       mc.moved_swap = 0;
+                       mc.moving_task = current;
+
+                       ret = mem_cgroup_precharge_mc(mm);
+                       if (ret)
+                               mem_cgroup_clear_mc();
+               }
+               mmput(mm);
+       }
+       return ret;
+}
+
+static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
+                               struct cgroup *cgroup,
+                               struct task_struct *p,
+                               bool threadgroup)
+{
+       mem_cgroup_clear_mc();
+}
+
+static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
+                               unsigned long addr, unsigned long end,
+                               struct mm_walk *walk)
+{
+       int ret = 0;
+       struct vm_area_struct *vma = walk->private;
+       pte_t *pte;
+       spinlock_t *ptl;
+
+retry:
+       pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+       for (; addr != end; addr += PAGE_SIZE) {
+               pte_t ptent = *(pte++);
+               union mc_target target;
+               int type;
+               struct page *page;
+               struct page_cgroup *pc;
+               swp_entry_t ent;
+
+               if (!mc.precharge)
+                       break;
+
+               type = is_target_pte_for_mc(vma, addr, ptent, &target);
+               switch (type) {
+               case MC_TARGET_PAGE:
+                       page = target.page;
+                       if (isolate_lru_page(page))
+                               goto put;
+                       pc = lookup_page_cgroup(page);
+                       if (!mem_cgroup_move_account(pc,
+                                               mc.from, mc.to, false)) {
+                               mc.precharge--;
+                               /* we uncharge from mc.from later. */
+                               mc.moved_charge++;
+                       }
+                       putback_lru_page(page);
+put:                   /* is_target_pte_for_mc() gets the page */
+                       put_page(page);
+                       break;
+               case MC_TARGET_SWAP:
+                       ent = target.ent;
+                       if (!mem_cgroup_move_swap_account(ent,
+                                               mc.from, mc.to, false)) {
+                               mc.precharge--;
+                               /* we fixup refcnts and charges later. */
+                               mc.moved_swap++;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       pte_unmap_unlock(pte - 1, ptl);
+       cond_resched();
+
+       if (addr != end) {
+               /*
+                * We have consumed all precharges we got in can_attach().
+                * We try charge one by one, but don't do any additional
+                * charges to mc.to if we have failed in charge once in attach()
+                * phase.
+                */
+               ret = mem_cgroup_do_precharge(1);
+               if (!ret)
+                       goto retry;
+       }
+
+       return ret;
+}
+
+static void mem_cgroup_move_charge(struct mm_struct *mm)
+{
+       struct vm_area_struct *vma;
+
+       lru_add_drain_all();
+       down_read(&mm->mmap_sem);
+       for (vma = mm->mmap; vma; vma = vma->vm_next) {
+               int ret;
+               struct mm_walk mem_cgroup_move_charge_walk = {
+                       .pmd_entry = mem_cgroup_move_charge_pte_range,
+                       .mm = mm,
+                       .private = vma,
+               };
+               if (is_vm_hugetlb_page(vma))
+                       continue;
+               /* TODO: We don't move charges of shmem/tmpfs pages for now. */
+               if (vma->vm_flags & VM_SHARED)
+                       continue;
+               ret = walk_page_range(vma->vm_start, vma->vm_end,
+                                               &mem_cgroup_move_charge_walk);
+               if (ret)
+                       /*
+                        * means we have consumed all precharges and failed in
+                        * doing additional charge. Just abandon here.
+                        */
+                       break;
+       }
+       up_read(&mm->mmap_sem);
+}
+
+static void mem_cgroup_move_task(struct cgroup_subsys *ss,
+                               struct cgroup *cont,
+                               struct cgroup *old_cont,
+                               struct task_struct *p,
+                               bool threadgroup)
+{
+       struct mm_struct *mm;
+
+       if (!mc.to)
+               /* no need to move charge */
+               return;
+
+       mm = get_task_mm(p);
+       if (mm) {
+               mem_cgroup_move_charge(mm);
+               mmput(mm);
+       }
+       mem_cgroup_clear_mc();
 }
 
 struct cgroup_subsys mem_cgroup_subsys = {
@@ -2656,6 +4022,8 @@ struct cgroup_subsys mem_cgroup_subsys = {
        .pre_destroy = mem_cgroup_pre_destroy,
        .destroy = mem_cgroup_destroy,
        .populate = mem_cgroup_populate,
+       .can_attach = mem_cgroup_can_attach,
+       .cancel_attach = mem_cgroup_cancel_attach,
        .attach = mem_cgroup_move_task,
        .early_init = 0,
        .use_id = 1,