memcg: use for_each_mem_cgroup
[linux-2.6.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/mutex.h>
37 #include <linux/rbtree.h>
38 #include <linux/slab.h>
39 #include <linux/swap.h>
40 #include <linux/swapops.h>
41 #include <linux/spinlock.h>
42 #include <linux/eventfd.h>
43 #include <linux/sort.h>
44 #include <linux/fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/vmalloc.h>
47 #include <linux/mm_inline.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/cpu.h>
50 #include <linux/oom.h>
51 #include "internal.h"
52
53 #include <asm/uaccess.h>
54
55 #include <trace/events/vmscan.h>
56
57 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58 #define MEM_CGROUP_RECLAIM_RETRIES      5
59 struct mem_cgroup *root_mem_cgroup __read_mostly;
60
61 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63 int do_swap_account __read_mostly;
64 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
65 #else
66 #define do_swap_account         (0)
67 #endif
68
69 /*
70  * Per memcg event counter is incremented at every pagein/pageout. This counter
71  * is used for trigger some periodic events. This is straightforward and better
72  * than using jiffies etc. to handle periodic memcg event.
73  *
74  * These values will be used as !((event) & ((1 <<(thresh)) - 1))
75  */
76 #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
77 #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
78
79 /*
80  * Statistics for memory cgroup.
81  */
82 enum mem_cgroup_stat_index {
83         /*
84          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
85          */
86         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
87         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
88         MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
89         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
90         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
91         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
92         MEM_CGROUP_EVENTS,      /* incremented at every  pagein/pageout */
93         MEM_CGROUP_ON_MOVE,     /* someone is moving account between groups */
94
95         MEM_CGROUP_STAT_NSTATS,
96 };
97
98 struct mem_cgroup_stat_cpu {
99         s64 count[MEM_CGROUP_STAT_NSTATS];
100 };
101
102 /*
103  * per-zone information in memory controller.
104  */
105 struct mem_cgroup_per_zone {
106         /*
107          * spin_lock to protect the per cgroup LRU
108          */
109         struct list_head        lists[NR_LRU_LISTS];
110         unsigned long           count[NR_LRU_LISTS];
111
112         struct zone_reclaim_stat reclaim_stat;
113         struct rb_node          tree_node;      /* RB tree node */
114         unsigned long long      usage_in_excess;/* Set to the value by which */
115                                                 /* the soft limit is exceeded*/
116         bool                    on_tree;
117         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
118                                                 /* use container_of        */
119 };
120 /* Macro for accessing counter */
121 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
122
123 struct mem_cgroup_per_node {
124         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
125 };
126
127 struct mem_cgroup_lru_info {
128         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
129 };
130
131 /*
132  * Cgroups above their limits are maintained in a RB-Tree, independent of
133  * their hierarchy representation
134  */
135
136 struct mem_cgroup_tree_per_zone {
137         struct rb_root rb_root;
138         spinlock_t lock;
139 };
140
141 struct mem_cgroup_tree_per_node {
142         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
143 };
144
145 struct mem_cgroup_tree {
146         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
147 };
148
149 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
150
151 struct mem_cgroup_threshold {
152         struct eventfd_ctx *eventfd;
153         u64 threshold;
154 };
155
156 /* For threshold */
157 struct mem_cgroup_threshold_ary {
158         /* An array index points to threshold just below usage. */
159         int current_threshold;
160         /* Size of entries[] */
161         unsigned int size;
162         /* Array of thresholds */
163         struct mem_cgroup_threshold entries[0];
164 };
165
166 struct mem_cgroup_thresholds {
167         /* Primary thresholds array */
168         struct mem_cgroup_threshold_ary *primary;
169         /*
170          * Spare threshold array.
171          * This is needed to make mem_cgroup_unregister_event() "never fail".
172          * It must be able to store at least primary->size - 1 entries.
173          */
174         struct mem_cgroup_threshold_ary *spare;
175 };
176
177 /* for OOM */
178 struct mem_cgroup_eventfd_list {
179         struct list_head list;
180         struct eventfd_ctx *eventfd;
181 };
182
183 static void mem_cgroup_threshold(struct mem_cgroup *mem);
184 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
185
186 /*
187  * The memory controller data structure. The memory controller controls both
188  * page cache and RSS per cgroup. We would eventually like to provide
189  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
190  * to help the administrator determine what knobs to tune.
191  *
192  * TODO: Add a water mark for the memory controller. Reclaim will begin when
193  * we hit the water mark. May be even add a low water mark, such that
194  * no reclaim occurs from a cgroup at it's low water mark, this is
195  * a feature that will be implemented much later in the future.
196  */
197 struct mem_cgroup {
198         struct cgroup_subsys_state css;
199         /*
200          * the counter to account for memory usage
201          */
202         struct res_counter res;
203         /*
204          * the counter to account for mem+swap usage.
205          */
206         struct res_counter memsw;
207         /*
208          * Per cgroup active and inactive list, similar to the
209          * per zone LRU lists.
210          */
211         struct mem_cgroup_lru_info info;
212
213         /*
214           protect against reclaim related member.
215         */
216         spinlock_t reclaim_param_lock;
217
218         /*
219          * While reclaiming in a hierarchy, we cache the last child we
220          * reclaimed from.
221          */
222         int last_scanned_child;
223         /*
224          * Should the accounting and control be hierarchical, per subtree?
225          */
226         bool use_hierarchy;
227         atomic_t        oom_lock;
228         atomic_t        refcnt;
229
230         unsigned int    swappiness;
231         /* OOM-Killer disable */
232         int             oom_kill_disable;
233
234         /* set when res.limit == memsw.limit */
235         bool            memsw_is_minimum;
236
237         /* protect arrays of thresholds */
238         struct mutex thresholds_lock;
239
240         /* thresholds for memory usage. RCU-protected */
241         struct mem_cgroup_thresholds thresholds;
242
243         /* thresholds for mem+swap usage. RCU-protected */
244         struct mem_cgroup_thresholds memsw_thresholds;
245
246         /* For oom notifier event fd */
247         struct list_head oom_notify;
248
249         /*
250          * Should we move charges of a task when a task is moved into this
251          * mem_cgroup ? And what type of charges should we move ?
252          */
253         unsigned long   move_charge_at_immigrate;
254         /*
255          * percpu counter.
256          */
257         struct mem_cgroup_stat_cpu *stat;
258 };
259
260 /* Stuffs for move charges at task migration. */
261 /*
262  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
263  * left-shifted bitmap of these types.
264  */
265 enum move_type {
266         MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
267         MOVE_CHARGE_TYPE_FILE,  /* file page(including tmpfs) and swap of it */
268         NR_MOVE_TYPE,
269 };
270
271 /* "mc" and its members are protected by cgroup_mutex */
272 static struct move_charge_struct {
273         spinlock_t        lock; /* for from, to, moving_task */
274         struct mem_cgroup *from;
275         struct mem_cgroup *to;
276         unsigned long precharge;
277         unsigned long moved_charge;
278         unsigned long moved_swap;
279         struct task_struct *moving_task;        /* a task moving charges */
280         wait_queue_head_t waitq;                /* a waitq for other context */
281 } mc = {
282         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
283         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
284 };
285
286 static bool move_anon(void)
287 {
288         return test_bit(MOVE_CHARGE_TYPE_ANON,
289                                         &mc.to->move_charge_at_immigrate);
290 }
291
292 static bool move_file(void)
293 {
294         return test_bit(MOVE_CHARGE_TYPE_FILE,
295                                         &mc.to->move_charge_at_immigrate);
296 }
297
298 /*
299  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
300  * limit reclaim to prevent infinite loops, if they ever occur.
301  */
302 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
303 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
304
305 enum charge_type {
306         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
307         MEM_CGROUP_CHARGE_TYPE_MAPPED,
308         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
309         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
310         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
311         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
312         NR_CHARGE_TYPE,
313 };
314
315 /* only for here (for easy reading.) */
316 #define PCGF_CACHE      (1UL << PCG_CACHE)
317 #define PCGF_USED       (1UL << PCG_USED)
318 #define PCGF_LOCK       (1UL << PCG_LOCK)
319 /* Not used, but added here for completeness */
320 #define PCGF_ACCT       (1UL << PCG_ACCT)
321
322 /* for encoding cft->private value on file */
323 #define _MEM                    (0)
324 #define _MEMSWAP                (1)
325 #define _OOM_TYPE               (2)
326 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
327 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
328 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
329 /* Used for OOM nofiier */
330 #define OOM_CONTROL             (0)
331
332 /*
333  * Reclaim flags for mem_cgroup_hierarchical_reclaim
334  */
335 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
336 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
337 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
338 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
339 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
340 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
341
342 static void mem_cgroup_get(struct mem_cgroup *mem);
343 static void mem_cgroup_put(struct mem_cgroup *mem);
344 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
345 static void drain_all_stock_async(void);
346
347 static struct mem_cgroup_per_zone *
348 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
349 {
350         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
351 }
352
353 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
354 {
355         return &mem->css;
356 }
357
358 static struct mem_cgroup_per_zone *
359 page_cgroup_zoneinfo(struct page_cgroup *pc)
360 {
361         struct mem_cgroup *mem = pc->mem_cgroup;
362         int nid = page_cgroup_nid(pc);
363         int zid = page_cgroup_zid(pc);
364
365         if (!mem)
366                 return NULL;
367
368         return mem_cgroup_zoneinfo(mem, nid, zid);
369 }
370
371 static struct mem_cgroup_tree_per_zone *
372 soft_limit_tree_node_zone(int nid, int zid)
373 {
374         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
375 }
376
377 static struct mem_cgroup_tree_per_zone *
378 soft_limit_tree_from_page(struct page *page)
379 {
380         int nid = page_to_nid(page);
381         int zid = page_zonenum(page);
382
383         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
384 }
385
386 static void
387 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
388                                 struct mem_cgroup_per_zone *mz,
389                                 struct mem_cgroup_tree_per_zone *mctz,
390                                 unsigned long long new_usage_in_excess)
391 {
392         struct rb_node **p = &mctz->rb_root.rb_node;
393         struct rb_node *parent = NULL;
394         struct mem_cgroup_per_zone *mz_node;
395
396         if (mz->on_tree)
397                 return;
398
399         mz->usage_in_excess = new_usage_in_excess;
400         if (!mz->usage_in_excess)
401                 return;
402         while (*p) {
403                 parent = *p;
404                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
405                                         tree_node);
406                 if (mz->usage_in_excess < mz_node->usage_in_excess)
407                         p = &(*p)->rb_left;
408                 /*
409                  * We can't avoid mem cgroups that are over their soft
410                  * limit by the same amount
411                  */
412                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
413                         p = &(*p)->rb_right;
414         }
415         rb_link_node(&mz->tree_node, parent, p);
416         rb_insert_color(&mz->tree_node, &mctz->rb_root);
417         mz->on_tree = true;
418 }
419
420 static void
421 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
422                                 struct mem_cgroup_per_zone *mz,
423                                 struct mem_cgroup_tree_per_zone *mctz)
424 {
425         if (!mz->on_tree)
426                 return;
427         rb_erase(&mz->tree_node, &mctz->rb_root);
428         mz->on_tree = false;
429 }
430
431 static void
432 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
433                                 struct mem_cgroup_per_zone *mz,
434                                 struct mem_cgroup_tree_per_zone *mctz)
435 {
436         spin_lock(&mctz->lock);
437         __mem_cgroup_remove_exceeded(mem, mz, mctz);
438         spin_unlock(&mctz->lock);
439 }
440
441
442 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
443 {
444         unsigned long long excess;
445         struct mem_cgroup_per_zone *mz;
446         struct mem_cgroup_tree_per_zone *mctz;
447         int nid = page_to_nid(page);
448         int zid = page_zonenum(page);
449         mctz = soft_limit_tree_from_page(page);
450
451         /*
452          * Necessary to update all ancestors when hierarchy is used.
453          * because their event counter is not touched.
454          */
455         for (; mem; mem = parent_mem_cgroup(mem)) {
456                 mz = mem_cgroup_zoneinfo(mem, nid, zid);
457                 excess = res_counter_soft_limit_excess(&mem->res);
458                 /*
459                  * We have to update the tree if mz is on RB-tree or
460                  * mem is over its softlimit.
461                  */
462                 if (excess || mz->on_tree) {
463                         spin_lock(&mctz->lock);
464                         /* if on-tree, remove it */
465                         if (mz->on_tree)
466                                 __mem_cgroup_remove_exceeded(mem, mz, mctz);
467                         /*
468                          * Insert again. mz->usage_in_excess will be updated.
469                          * If excess is 0, no tree ops.
470                          */
471                         __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
472                         spin_unlock(&mctz->lock);
473                 }
474         }
475 }
476
477 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
478 {
479         int node, zone;
480         struct mem_cgroup_per_zone *mz;
481         struct mem_cgroup_tree_per_zone *mctz;
482
483         for_each_node_state(node, N_POSSIBLE) {
484                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
485                         mz = mem_cgroup_zoneinfo(mem, node, zone);
486                         mctz = soft_limit_tree_node_zone(node, zone);
487                         mem_cgroup_remove_exceeded(mem, mz, mctz);
488                 }
489         }
490 }
491
492 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
493 {
494         return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
495 }
496
497 static struct mem_cgroup_per_zone *
498 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
499 {
500         struct rb_node *rightmost = NULL;
501         struct mem_cgroup_per_zone *mz;
502
503 retry:
504         mz = NULL;
505         rightmost = rb_last(&mctz->rb_root);
506         if (!rightmost)
507                 goto done;              /* Nothing to reclaim from */
508
509         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
510         /*
511          * Remove the node now but someone else can add it back,
512          * we will to add it back at the end of reclaim to its correct
513          * position in the tree.
514          */
515         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
516         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
517                 !css_tryget(&mz->mem->css))
518                 goto retry;
519 done:
520         return mz;
521 }
522
523 static struct mem_cgroup_per_zone *
524 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
525 {
526         struct mem_cgroup_per_zone *mz;
527
528         spin_lock(&mctz->lock);
529         mz = __mem_cgroup_largest_soft_limit_node(mctz);
530         spin_unlock(&mctz->lock);
531         return mz;
532 }
533
534 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
535                 enum mem_cgroup_stat_index idx)
536 {
537         int cpu;
538         s64 val = 0;
539
540         for_each_possible_cpu(cpu)
541                 val += per_cpu(mem->stat->count[idx], cpu);
542         return val;
543 }
544
545 static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
546 {
547         s64 ret;
548
549         ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
550         ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
551         return ret;
552 }
553
554 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
555                                          bool charge)
556 {
557         int val = (charge) ? 1 : -1;
558         this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
559 }
560
561 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
562                                          struct page_cgroup *pc,
563                                          bool charge)
564 {
565         int val = (charge) ? 1 : -1;
566
567         preempt_disable();
568
569         if (PageCgroupCache(pc))
570                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
571         else
572                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
573
574         if (charge)
575                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
576         else
577                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
578         __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
579
580         preempt_enable();
581 }
582
583 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
584                                         enum lru_list idx)
585 {
586         int nid, zid;
587         struct mem_cgroup_per_zone *mz;
588         u64 total = 0;
589
590         for_each_online_node(nid)
591                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
592                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
593                         total += MEM_CGROUP_ZSTAT(mz, idx);
594                 }
595         return total;
596 }
597
598 static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
599 {
600         s64 val;
601
602         val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
603
604         return !(val & ((1 << event_mask_shift) - 1));
605 }
606
607 /*
608  * Check events in order.
609  *
610  */
611 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
612 {
613         /* threshold event is triggered in finer grain than soft limit */
614         if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
615                 mem_cgroup_threshold(mem);
616                 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
617                         mem_cgroup_update_tree(mem, page);
618         }
619 }
620
621 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
622 {
623         return container_of(cgroup_subsys_state(cont,
624                                 mem_cgroup_subsys_id), struct mem_cgroup,
625                                 css);
626 }
627
628 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
629 {
630         /*
631          * mm_update_next_owner() may clear mm->owner to NULL
632          * if it races with swapoff, page migration, etc.
633          * So this can be called with p == NULL.
634          */
635         if (unlikely(!p))
636                 return NULL;
637
638         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
639                                 struct mem_cgroup, css);
640 }
641
642 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
643 {
644         struct mem_cgroup *mem = NULL;
645
646         if (!mm)
647                 return NULL;
648         /*
649          * Because we have no locks, mm->owner's may be being moved to other
650          * cgroup. We use css_tryget() here even if this looks
651          * pessimistic (rather than adding locks here).
652          */
653         rcu_read_lock();
654         do {
655                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
656                 if (unlikely(!mem))
657                         break;
658         } while (!css_tryget(&mem->css));
659         rcu_read_unlock();
660         return mem;
661 }
662
663 /* The caller has to guarantee "mem" exists before calling this */
664 static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
665 {
666         if (mem && css_tryget(&mem->css))
667                 return mem;
668         return NULL;
669 }
670
671 static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
672                                         struct mem_cgroup *root,
673                                         bool cond)
674 {
675         int nextid = css_id(&iter->css) + 1;
676         int found;
677         int hierarchy_used;
678         struct cgroup_subsys_state *css;
679
680         hierarchy_used = iter->use_hierarchy;
681
682         css_put(&iter->css);
683         if (!cond || !hierarchy_used)
684                 return NULL;
685
686         do {
687                 iter = NULL;
688                 rcu_read_lock();
689
690                 css = css_get_next(&mem_cgroup_subsys, nextid,
691                                 &root->css, &found);
692                 if (css && css_tryget(css))
693                         iter = container_of(css, struct mem_cgroup, css);
694                 rcu_read_unlock();
695                 /* If css is NULL, no more cgroups will be found */
696                 nextid = found + 1;
697         } while (css && !iter);
698
699         return iter;
700 }
701 /*
702  * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
703  * be careful that "break" loop is not allowed. We have reference count.
704  * Instead of that modify "cond" to be false and "continue" to exit the loop.
705  */
706 #define for_each_mem_cgroup_tree_cond(iter, root, cond) \
707         for (iter = mem_cgroup_start_loop(root);\
708              iter != NULL;\
709              iter = mem_cgroup_get_next(iter, root, cond))
710
711 #define for_each_mem_cgroup_tree(iter, root) \
712         for_each_mem_cgroup_tree_cond(iter, root, true)
713
714
715 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
716 {
717         return (mem == root_mem_cgroup);
718 }
719
720 /*
721  * Following LRU functions are allowed to be used without PCG_LOCK.
722  * Operations are called by routine of global LRU independently from memcg.
723  * What we have to take care of here is validness of pc->mem_cgroup.
724  *
725  * Changes to pc->mem_cgroup happens when
726  * 1. charge
727  * 2. moving account
728  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
729  * It is added to LRU before charge.
730  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
731  * When moving account, the page is not on LRU. It's isolated.
732  */
733
734 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
735 {
736         struct page_cgroup *pc;
737         struct mem_cgroup_per_zone *mz;
738
739         if (mem_cgroup_disabled())
740                 return;
741         pc = lookup_page_cgroup(page);
742         /* can happen while we handle swapcache. */
743         if (!TestClearPageCgroupAcctLRU(pc))
744                 return;
745         VM_BUG_ON(!pc->mem_cgroup);
746         /*
747          * We don't check PCG_USED bit. It's cleared when the "page" is finally
748          * removed from global LRU.
749          */
750         mz = page_cgroup_zoneinfo(pc);
751         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
752         if (mem_cgroup_is_root(pc->mem_cgroup))
753                 return;
754         VM_BUG_ON(list_empty(&pc->lru));
755         list_del_init(&pc->lru);
756         return;
757 }
758
759 void mem_cgroup_del_lru(struct page *page)
760 {
761         mem_cgroup_del_lru_list(page, page_lru(page));
762 }
763
764 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
765 {
766         struct mem_cgroup_per_zone *mz;
767         struct page_cgroup *pc;
768
769         if (mem_cgroup_disabled())
770                 return;
771
772         pc = lookup_page_cgroup(page);
773         /*
774          * Used bit is set without atomic ops but after smp_wmb().
775          * For making pc->mem_cgroup visible, insert smp_rmb() here.
776          */
777         smp_rmb();
778         /* unused or root page is not rotated. */
779         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
780                 return;
781         mz = page_cgroup_zoneinfo(pc);
782         list_move(&pc->lru, &mz->lists[lru]);
783 }
784
785 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
786 {
787         struct page_cgroup *pc;
788         struct mem_cgroup_per_zone *mz;
789
790         if (mem_cgroup_disabled())
791                 return;
792         pc = lookup_page_cgroup(page);
793         VM_BUG_ON(PageCgroupAcctLRU(pc));
794         /*
795          * Used bit is set without atomic ops but after smp_wmb().
796          * For making pc->mem_cgroup visible, insert smp_rmb() here.
797          */
798         smp_rmb();
799         if (!PageCgroupUsed(pc))
800                 return;
801
802         mz = page_cgroup_zoneinfo(pc);
803         MEM_CGROUP_ZSTAT(mz, lru) += 1;
804         SetPageCgroupAcctLRU(pc);
805         if (mem_cgroup_is_root(pc->mem_cgroup))
806                 return;
807         list_add(&pc->lru, &mz->lists[lru]);
808 }
809
810 /*
811  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
812  * lru because the page may.be reused after it's fully uncharged (because of
813  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
814  * it again. This function is only used to charge SwapCache. It's done under
815  * lock_page and expected that zone->lru_lock is never held.
816  */
817 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
818 {
819         unsigned long flags;
820         struct zone *zone = page_zone(page);
821         struct page_cgroup *pc = lookup_page_cgroup(page);
822
823         spin_lock_irqsave(&zone->lru_lock, flags);
824         /*
825          * Forget old LRU when this page_cgroup is *not* used. This Used bit
826          * is guarded by lock_page() because the page is SwapCache.
827          */
828         if (!PageCgroupUsed(pc))
829                 mem_cgroup_del_lru_list(page, page_lru(page));
830         spin_unlock_irqrestore(&zone->lru_lock, flags);
831 }
832
833 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
834 {
835         unsigned long flags;
836         struct zone *zone = page_zone(page);
837         struct page_cgroup *pc = lookup_page_cgroup(page);
838
839         spin_lock_irqsave(&zone->lru_lock, flags);
840         /* link when the page is linked to LRU but page_cgroup isn't */
841         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
842                 mem_cgroup_add_lru_list(page, page_lru(page));
843         spin_unlock_irqrestore(&zone->lru_lock, flags);
844 }
845
846
847 void mem_cgroup_move_lists(struct page *page,
848                            enum lru_list from, enum lru_list to)
849 {
850         if (mem_cgroup_disabled())
851                 return;
852         mem_cgroup_del_lru_list(page, from);
853         mem_cgroup_add_lru_list(page, to);
854 }
855
856 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
857 {
858         int ret;
859         struct mem_cgroup *curr = NULL;
860         struct task_struct *p;
861
862         p = find_lock_task_mm(task);
863         if (!p)
864                 return 0;
865         curr = try_get_mem_cgroup_from_mm(p->mm);
866         task_unlock(p);
867         if (!curr)
868                 return 0;
869         /*
870          * We should check use_hierarchy of "mem" not "curr". Because checking
871          * use_hierarchy of "curr" here make this function true if hierarchy is
872          * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
873          * hierarchy(even if use_hierarchy is disabled in "mem").
874          */
875         if (mem->use_hierarchy)
876                 ret = css_is_ancestor(&curr->css, &mem->css);
877         else
878                 ret = (curr == mem);
879         css_put(&curr->css);
880         return ret;
881 }
882
883 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
884 {
885         unsigned long active;
886         unsigned long inactive;
887         unsigned long gb;
888         unsigned long inactive_ratio;
889
890         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
891         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
892
893         gb = (inactive + active) >> (30 - PAGE_SHIFT);
894         if (gb)
895                 inactive_ratio = int_sqrt(10 * gb);
896         else
897                 inactive_ratio = 1;
898
899         if (present_pages) {
900                 present_pages[0] = inactive;
901                 present_pages[1] = active;
902         }
903
904         return inactive_ratio;
905 }
906
907 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
908 {
909         unsigned long active;
910         unsigned long inactive;
911         unsigned long present_pages[2];
912         unsigned long inactive_ratio;
913
914         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
915
916         inactive = present_pages[0];
917         active = present_pages[1];
918
919         if (inactive * inactive_ratio < active)
920                 return 1;
921
922         return 0;
923 }
924
925 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
926 {
927         unsigned long active;
928         unsigned long inactive;
929
930         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
931         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
932
933         return (active > inactive);
934 }
935
936 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
937                                        struct zone *zone,
938                                        enum lru_list lru)
939 {
940         int nid = zone_to_nid(zone);
941         int zid = zone_idx(zone);
942         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
943
944         return MEM_CGROUP_ZSTAT(mz, lru);
945 }
946
947 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
948                                                       struct zone *zone)
949 {
950         int nid = zone_to_nid(zone);
951         int zid = zone_idx(zone);
952         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
953
954         return &mz->reclaim_stat;
955 }
956
957 struct zone_reclaim_stat *
958 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
959 {
960         struct page_cgroup *pc;
961         struct mem_cgroup_per_zone *mz;
962
963         if (mem_cgroup_disabled())
964                 return NULL;
965
966         pc = lookup_page_cgroup(page);
967         /*
968          * Used bit is set without atomic ops but after smp_wmb().
969          * For making pc->mem_cgroup visible, insert smp_rmb() here.
970          */
971         smp_rmb();
972         if (!PageCgroupUsed(pc))
973                 return NULL;
974
975         mz = page_cgroup_zoneinfo(pc);
976         if (!mz)
977                 return NULL;
978
979         return &mz->reclaim_stat;
980 }
981
982 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
983                                         struct list_head *dst,
984                                         unsigned long *scanned, int order,
985                                         int mode, struct zone *z,
986                                         struct mem_cgroup *mem_cont,
987                                         int active, int file)
988 {
989         unsigned long nr_taken = 0;
990         struct page *page;
991         unsigned long scan;
992         LIST_HEAD(pc_list);
993         struct list_head *src;
994         struct page_cgroup *pc, *tmp;
995         int nid = zone_to_nid(z);
996         int zid = zone_idx(z);
997         struct mem_cgroup_per_zone *mz;
998         int lru = LRU_FILE * file + active;
999         int ret;
1000
1001         BUG_ON(!mem_cont);
1002         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1003         src = &mz->lists[lru];
1004
1005         scan = 0;
1006         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1007                 if (scan >= nr_to_scan)
1008                         break;
1009
1010                 page = pc->page;
1011                 if (unlikely(!PageCgroupUsed(pc)))
1012                         continue;
1013                 if (unlikely(!PageLRU(page)))
1014                         continue;
1015
1016                 scan++;
1017                 ret = __isolate_lru_page(page, mode, file);
1018                 switch (ret) {
1019                 case 0:
1020                         list_move(&page->lru, dst);
1021                         mem_cgroup_del_lru(page);
1022                         nr_taken++;
1023                         break;
1024                 case -EBUSY:
1025                         /* we don't affect global LRU but rotate in our LRU */
1026                         mem_cgroup_rotate_lru_list(page, page_lru(page));
1027                         break;
1028                 default:
1029                         break;
1030                 }
1031         }
1032
1033         *scanned = scan;
1034
1035         trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1036                                       0, 0, 0, mode);
1037
1038         return nr_taken;
1039 }
1040
1041 #define mem_cgroup_from_res_counter(counter, member)    \
1042         container_of(counter, struct mem_cgroup, member)
1043
1044 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1045 {
1046         if (do_swap_account) {
1047                 if (res_counter_check_under_limit(&mem->res) &&
1048                         res_counter_check_under_limit(&mem->memsw))
1049                         return true;
1050         } else
1051                 if (res_counter_check_under_limit(&mem->res))
1052                         return true;
1053         return false;
1054 }
1055
1056 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1057 {
1058         struct cgroup *cgrp = memcg->css.cgroup;
1059         unsigned int swappiness;
1060
1061         /* root ? */
1062         if (cgrp->parent == NULL)
1063                 return vm_swappiness;
1064
1065         spin_lock(&memcg->reclaim_param_lock);
1066         swappiness = memcg->swappiness;
1067         spin_unlock(&memcg->reclaim_param_lock);
1068
1069         return swappiness;
1070 }
1071
1072 static void mem_cgroup_start_move(struct mem_cgroup *mem)
1073 {
1074         int cpu;
1075         /* Because this is for moving account, reuse mc.lock */
1076         spin_lock(&mc.lock);
1077         for_each_possible_cpu(cpu)
1078                 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1079         spin_unlock(&mc.lock);
1080
1081         synchronize_rcu();
1082 }
1083
1084 static void mem_cgroup_end_move(struct mem_cgroup *mem)
1085 {
1086         int cpu;
1087
1088         if (!mem)
1089                 return;
1090         spin_lock(&mc.lock);
1091         for_each_possible_cpu(cpu)
1092                 per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1093         spin_unlock(&mc.lock);
1094 }
1095 /*
1096  * 2 routines for checking "mem" is under move_account() or not.
1097  *
1098  * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1099  *                        for avoiding race in accounting. If true,
1100  *                        pc->mem_cgroup may be overwritten.
1101  *
1102  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1103  *                        under hierarchy of moving cgroups. This is for
1104  *                        waiting at hith-memory prressure caused by "move".
1105  */
1106
1107 static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1108 {
1109         VM_BUG_ON(!rcu_read_lock_held());
1110         return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1111 }
1112
1113 static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1114 {
1115         struct mem_cgroup *from;
1116         struct mem_cgroup *to;
1117         bool ret = false;
1118         /*
1119          * Unlike task_move routines, we access mc.to, mc.from not under
1120          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1121          */
1122         spin_lock(&mc.lock);
1123         from = mc.from;
1124         to = mc.to;
1125         if (!from)
1126                 goto unlock;
1127         if (from == mem || to == mem
1128             || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1129             || (mem->use_hierarchy && css_is_ancestor(&to->css, &mem->css)))
1130                 ret = true;
1131 unlock:
1132         spin_unlock(&mc.lock);
1133         return ret;
1134 }
1135
1136 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1137 {
1138         if (mc.moving_task && current != mc.moving_task) {
1139                 if (mem_cgroup_under_move(mem)) {
1140                         DEFINE_WAIT(wait);
1141                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1142                         /* moving charge context might have finished. */
1143                         if (mc.moving_task)
1144                                 schedule();
1145                         finish_wait(&mc.waitq, &wait);
1146                         return true;
1147                 }
1148         }
1149         return false;
1150 }
1151
1152 /**
1153  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1154  * @memcg: The memory cgroup that went over limit
1155  * @p: Task that is going to be killed
1156  *
1157  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1158  * enabled
1159  */
1160 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1161 {
1162         struct cgroup *task_cgrp;
1163         struct cgroup *mem_cgrp;
1164         /*
1165          * Need a buffer in BSS, can't rely on allocations. The code relies
1166          * on the assumption that OOM is serialized for memory controller.
1167          * If this assumption is broken, revisit this code.
1168          */
1169         static char memcg_name[PATH_MAX];
1170         int ret;
1171
1172         if (!memcg || !p)
1173                 return;
1174
1175
1176         rcu_read_lock();
1177
1178         mem_cgrp = memcg->css.cgroup;
1179         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1180
1181         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1182         if (ret < 0) {
1183                 /*
1184                  * Unfortunately, we are unable to convert to a useful name
1185                  * But we'll still print out the usage information
1186                  */
1187                 rcu_read_unlock();
1188                 goto done;
1189         }
1190         rcu_read_unlock();
1191
1192         printk(KERN_INFO "Task in %s killed", memcg_name);
1193
1194         rcu_read_lock();
1195         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1196         if (ret < 0) {
1197                 rcu_read_unlock();
1198                 goto done;
1199         }
1200         rcu_read_unlock();
1201
1202         /*
1203          * Continues from above, so we don't need an KERN_ level
1204          */
1205         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1206 done:
1207
1208         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1209                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1210                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1211                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1212         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1213                 "failcnt %llu\n",
1214                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1215                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1216                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1217 }
1218
1219 /*
1220  * This function returns the number of memcg under hierarchy tree. Returns
1221  * 1(self count) if no children.
1222  */
1223 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1224 {
1225         int num = 0;
1226         struct mem_cgroup *iter;
1227
1228         for_each_mem_cgroup_tree(iter, mem)
1229                 num++;
1230         return num;
1231 }
1232
1233 /*
1234  * Return the memory (and swap, if configured) limit for a memcg.
1235  */
1236 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1237 {
1238         u64 limit;
1239         u64 memsw;
1240
1241         limit = res_counter_read_u64(&memcg->res, RES_LIMIT) +
1242                         total_swap_pages;
1243         memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1244         /*
1245          * If memsw is finite and limits the amount of swap space available
1246          * to this memcg, return that limit.
1247          */
1248         return min(limit, memsw);
1249 }
1250
1251 /*
1252  * Visit the first child (need not be the first child as per the ordering
1253  * of the cgroup list, since we track last_scanned_child) of @mem and use
1254  * that to reclaim free pages from.
1255  */
1256 static struct mem_cgroup *
1257 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1258 {
1259         struct mem_cgroup *ret = NULL;
1260         struct cgroup_subsys_state *css;
1261         int nextid, found;
1262
1263         if (!root_mem->use_hierarchy) {
1264                 css_get(&root_mem->css);
1265                 ret = root_mem;
1266         }
1267
1268         while (!ret) {
1269                 rcu_read_lock();
1270                 nextid = root_mem->last_scanned_child + 1;
1271                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1272                                    &found);
1273                 if (css && css_tryget(css))
1274                         ret = container_of(css, struct mem_cgroup, css);
1275
1276                 rcu_read_unlock();
1277                 /* Updates scanning parameter */
1278                 spin_lock(&root_mem->reclaim_param_lock);
1279                 if (!css) {
1280                         /* this means start scan from ID:1 */
1281                         root_mem->last_scanned_child = 0;
1282                 } else
1283                         root_mem->last_scanned_child = found;
1284                 spin_unlock(&root_mem->reclaim_param_lock);
1285         }
1286
1287         return ret;
1288 }
1289
1290 /*
1291  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1292  * we reclaimed from, so that we don't end up penalizing one child extensively
1293  * based on its position in the children list.
1294  *
1295  * root_mem is the original ancestor that we've been reclaim from.
1296  *
1297  * We give up and return to the caller when we visit root_mem twice.
1298  * (other groups can be removed while we're walking....)
1299  *
1300  * If shrink==true, for avoiding to free too much, this returns immedieately.
1301  */
1302 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1303                                                 struct zone *zone,
1304                                                 gfp_t gfp_mask,
1305                                                 unsigned long reclaim_options)
1306 {
1307         struct mem_cgroup *victim;
1308         int ret, total = 0;
1309         int loop = 0;
1310         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1311         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1312         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1313         unsigned long excess = mem_cgroup_get_excess(root_mem);
1314
1315         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1316         if (root_mem->memsw_is_minimum)
1317                 noswap = true;
1318
1319         while (1) {
1320                 victim = mem_cgroup_select_victim(root_mem);
1321                 if (victim == root_mem) {
1322                         loop++;
1323                         if (loop >= 1)
1324                                 drain_all_stock_async();
1325                         if (loop >= 2) {
1326                                 /*
1327                                  * If we have not been able to reclaim
1328                                  * anything, it might because there are
1329                                  * no reclaimable pages under this hierarchy
1330                                  */
1331                                 if (!check_soft || !total) {
1332                                         css_put(&victim->css);
1333                                         break;
1334                                 }
1335                                 /*
1336                                  * We want to do more targetted reclaim.
1337                                  * excess >> 2 is not to excessive so as to
1338                                  * reclaim too much, nor too less that we keep
1339                                  * coming back to reclaim from this cgroup
1340                                  */
1341                                 if (total >= (excess >> 2) ||
1342                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1343                                         css_put(&victim->css);
1344                                         break;
1345                                 }
1346                         }
1347                 }
1348                 if (!mem_cgroup_local_usage(victim)) {
1349                         /* this cgroup's local usage == 0 */
1350                         css_put(&victim->css);
1351                         continue;
1352                 }
1353                 /* we use swappiness of local cgroup */
1354                 if (check_soft)
1355                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1356                                 noswap, get_swappiness(victim), zone);
1357                 else
1358                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1359                                                 noswap, get_swappiness(victim));
1360                 css_put(&victim->css);
1361                 /*
1362                  * At shrinking usage, we can't check we should stop here or
1363                  * reclaim more. It's depends on callers. last_scanned_child
1364                  * will work enough for keeping fairness under tree.
1365                  */
1366                 if (shrink)
1367                         return ret;
1368                 total += ret;
1369                 if (check_soft) {
1370                         if (res_counter_check_under_soft_limit(&root_mem->res))
1371                                 return total;
1372                 } else if (mem_cgroup_check_under_limit(root_mem))
1373                         return 1 + total;
1374         }
1375         return total;
1376 }
1377
1378 /*
1379  * Check OOM-Killer is already running under our hierarchy.
1380  * If someone is running, return false.
1381  */
1382 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1383 {
1384         int x, lock_count = 0;
1385         struct mem_cgroup *iter;
1386
1387         for_each_mem_cgroup_tree(iter, mem) {
1388                 x = atomic_inc_return(&iter->oom_lock);
1389                 lock_count = max(x, lock_count);
1390         }
1391
1392         if (lock_count == 1)
1393                 return true;
1394         return false;
1395 }
1396
1397 static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1398 {
1399         struct mem_cgroup *iter;
1400
1401         /*
1402          * When a new child is created while the hierarchy is under oom,
1403          * mem_cgroup_oom_lock() may not be called. We have to use
1404          * atomic_add_unless() here.
1405          */
1406         for_each_mem_cgroup_tree(iter, mem)
1407                 atomic_add_unless(&iter->oom_lock, -1, 0);
1408         return 0;
1409 }
1410
1411
1412 static DEFINE_MUTEX(memcg_oom_mutex);
1413 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1414
1415 struct oom_wait_info {
1416         struct mem_cgroup *mem;
1417         wait_queue_t    wait;
1418 };
1419
1420 static int memcg_oom_wake_function(wait_queue_t *wait,
1421         unsigned mode, int sync, void *arg)
1422 {
1423         struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1424         struct oom_wait_info *oom_wait_info;
1425
1426         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1427
1428         if (oom_wait_info->mem == wake_mem)
1429                 goto wakeup;
1430         /* if no hierarchy, no match */
1431         if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1432                 return 0;
1433         /*
1434          * Both of oom_wait_info->mem and wake_mem are stable under us.
1435          * Then we can use css_is_ancestor without taking care of RCU.
1436          */
1437         if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1438             !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1439                 return 0;
1440
1441 wakeup:
1442         return autoremove_wake_function(wait, mode, sync, arg);
1443 }
1444
1445 static void memcg_wakeup_oom(struct mem_cgroup *mem)
1446 {
1447         /* for filtering, pass "mem" as argument. */
1448         __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1449 }
1450
1451 static void memcg_oom_recover(struct mem_cgroup *mem)
1452 {
1453         if (mem && atomic_read(&mem->oom_lock))
1454                 memcg_wakeup_oom(mem);
1455 }
1456
1457 /*
1458  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1459  */
1460 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1461 {
1462         struct oom_wait_info owait;
1463         bool locked, need_to_kill;
1464
1465         owait.mem = mem;
1466         owait.wait.flags = 0;
1467         owait.wait.func = memcg_oom_wake_function;
1468         owait.wait.private = current;
1469         INIT_LIST_HEAD(&owait.wait.task_list);
1470         need_to_kill = true;
1471         /* At first, try to OOM lock hierarchy under mem.*/
1472         mutex_lock(&memcg_oom_mutex);
1473         locked = mem_cgroup_oom_lock(mem);
1474         /*
1475          * Even if signal_pending(), we can't quit charge() loop without
1476          * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1477          * under OOM is always welcomed, use TASK_KILLABLE here.
1478          */
1479         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1480         if (!locked || mem->oom_kill_disable)
1481                 need_to_kill = false;
1482         if (locked)
1483                 mem_cgroup_oom_notify(mem);
1484         mutex_unlock(&memcg_oom_mutex);
1485
1486         if (need_to_kill) {
1487                 finish_wait(&memcg_oom_waitq, &owait.wait);
1488                 mem_cgroup_out_of_memory(mem, mask);
1489         } else {
1490                 schedule();
1491                 finish_wait(&memcg_oom_waitq, &owait.wait);
1492         }
1493         mutex_lock(&memcg_oom_mutex);
1494         mem_cgroup_oom_unlock(mem);
1495         memcg_wakeup_oom(mem);
1496         mutex_unlock(&memcg_oom_mutex);
1497
1498         if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1499                 return false;
1500         /* Give chance to dying process */
1501         schedule_timeout(1);
1502         return true;
1503 }
1504
1505 /*
1506  * Currently used to update mapped file statistics, but the routine can be
1507  * generalized to update other statistics as well.
1508  *
1509  * Notes: Race condition
1510  *
1511  * We usually use page_cgroup_lock() for accessing page_cgroup member but
1512  * it tends to be costly. But considering some conditions, we doesn't need
1513  * to do so _always_.
1514  *
1515  * Considering "charge", lock_page_cgroup() is not required because all
1516  * file-stat operations happen after a page is attached to radix-tree. There
1517  * are no race with "charge".
1518  *
1519  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1520  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1521  * if there are race with "uncharge". Statistics itself is properly handled
1522  * by flags.
1523  *
1524  * Considering "move", this is an only case we see a race. To make the race
1525  * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1526  * possibility of race condition. If there is, we take a lock.
1527  */
1528 void mem_cgroup_update_file_mapped(struct page *page, int val)
1529 {
1530         struct mem_cgroup *mem;
1531         struct page_cgroup *pc = lookup_page_cgroup(page);
1532         bool need_unlock = false;
1533
1534         if (unlikely(!pc))
1535                 return;
1536
1537         rcu_read_lock();
1538         mem = pc->mem_cgroup;
1539         if (unlikely(!mem || !PageCgroupUsed(pc)))
1540                 goto out;
1541         /* pc->mem_cgroup is unstable ? */
1542         if (unlikely(mem_cgroup_stealed(mem))) {
1543                 /* take a lock against to access pc->mem_cgroup */
1544                 lock_page_cgroup(pc);
1545                 need_unlock = true;
1546                 mem = pc->mem_cgroup;
1547                 if (!mem || !PageCgroupUsed(pc))
1548                         goto out;
1549         }
1550         if (val > 0) {
1551                 this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1552                 SetPageCgroupFileMapped(pc);
1553         } else {
1554                 this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1555                 if (!page_mapped(page)) /* for race between dec->inc counter */
1556                         ClearPageCgroupFileMapped(pc);
1557         }
1558
1559 out:
1560         if (unlikely(need_unlock))
1561                 unlock_page_cgroup(pc);
1562         rcu_read_unlock();
1563         return;
1564 }
1565
1566 /*
1567  * size of first charge trial. "32" comes from vmscan.c's magic value.
1568  * TODO: maybe necessary to use big numbers in big irons.
1569  */
1570 #define CHARGE_SIZE     (32 * PAGE_SIZE)
1571 struct memcg_stock_pcp {
1572         struct mem_cgroup *cached; /* this never be root cgroup */
1573         int charge;
1574         struct work_struct work;
1575 };
1576 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1577 static atomic_t memcg_drain_count;
1578
1579 /*
1580  * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1581  * from local stock and true is returned. If the stock is 0 or charges from a
1582  * cgroup which is not current target, returns false. This stock will be
1583  * refilled.
1584  */
1585 static bool consume_stock(struct mem_cgroup *mem)
1586 {
1587         struct memcg_stock_pcp *stock;
1588         bool ret = true;
1589
1590         stock = &get_cpu_var(memcg_stock);
1591         if (mem == stock->cached && stock->charge)
1592                 stock->charge -= PAGE_SIZE;
1593         else /* need to call res_counter_charge */
1594                 ret = false;
1595         put_cpu_var(memcg_stock);
1596         return ret;
1597 }
1598
1599 /*
1600  * Returns stocks cached in percpu to res_counter and reset cached information.
1601  */
1602 static void drain_stock(struct memcg_stock_pcp *stock)
1603 {
1604         struct mem_cgroup *old = stock->cached;
1605
1606         if (stock->charge) {
1607                 res_counter_uncharge(&old->res, stock->charge);
1608                 if (do_swap_account)
1609                         res_counter_uncharge(&old->memsw, stock->charge);
1610         }
1611         stock->cached = NULL;
1612         stock->charge = 0;
1613 }
1614
1615 /*
1616  * This must be called under preempt disabled or must be called by
1617  * a thread which is pinned to local cpu.
1618  */
1619 static void drain_local_stock(struct work_struct *dummy)
1620 {
1621         struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1622         drain_stock(stock);
1623 }
1624
1625 /*
1626  * Cache charges(val) which is from res_counter, to local per_cpu area.
1627  * This will be consumed by consume_stock() function, later.
1628  */
1629 static void refill_stock(struct mem_cgroup *mem, int val)
1630 {
1631         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1632
1633         if (stock->cached != mem) { /* reset if necessary */
1634                 drain_stock(stock);
1635                 stock->cached = mem;
1636         }
1637         stock->charge += val;
1638         put_cpu_var(memcg_stock);
1639 }
1640
1641 /*
1642  * Tries to drain stocked charges in other cpus. This function is asynchronous
1643  * and just put a work per cpu for draining localy on each cpu. Caller can
1644  * expects some charges will be back to res_counter later but cannot wait for
1645  * it.
1646  */
1647 static void drain_all_stock_async(void)
1648 {
1649         int cpu;
1650         /* This function is for scheduling "drain" in asynchronous way.
1651          * The result of "drain" is not directly handled by callers. Then,
1652          * if someone is calling drain, we don't have to call drain more.
1653          * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1654          * there is a race. We just do loose check here.
1655          */
1656         if (atomic_read(&memcg_drain_count))
1657                 return;
1658         /* Notify other cpus that system-wide "drain" is running */
1659         atomic_inc(&memcg_drain_count);
1660         get_online_cpus();
1661         for_each_online_cpu(cpu) {
1662                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1663                 schedule_work_on(cpu, &stock->work);
1664         }
1665         put_online_cpus();
1666         atomic_dec(&memcg_drain_count);
1667         /* We don't wait for flush_work */
1668 }
1669
1670 /* This is a synchronous drain interface. */
1671 static void drain_all_stock_sync(void)
1672 {
1673         /* called when force_empty is called */
1674         atomic_inc(&memcg_drain_count);
1675         schedule_on_each_cpu(drain_local_stock);
1676         atomic_dec(&memcg_drain_count);
1677 }
1678
1679 static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1680                                         unsigned long action,
1681                                         void *hcpu)
1682 {
1683         int cpu = (unsigned long)hcpu;
1684         struct memcg_stock_pcp *stock;
1685
1686         if (action != CPU_DEAD)
1687                 return NOTIFY_OK;
1688         stock = &per_cpu(memcg_stock, cpu);
1689         drain_stock(stock);
1690         return NOTIFY_OK;
1691 }
1692
1693
1694 /* See __mem_cgroup_try_charge() for details */
1695 enum {
1696         CHARGE_OK,              /* success */
1697         CHARGE_RETRY,           /* need to retry but retry is not bad */
1698         CHARGE_NOMEM,           /* we can't do more. return -ENOMEM */
1699         CHARGE_WOULDBLOCK,      /* GFP_WAIT wasn't set and no enough res. */
1700         CHARGE_OOM_DIE,         /* the current is killed because of OOM */
1701 };
1702
1703 static int __mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1704                                 int csize, bool oom_check)
1705 {
1706         struct mem_cgroup *mem_over_limit;
1707         struct res_counter *fail_res;
1708         unsigned long flags = 0;
1709         int ret;
1710
1711         ret = res_counter_charge(&mem->res, csize, &fail_res);
1712
1713         if (likely(!ret)) {
1714                 if (!do_swap_account)
1715                         return CHARGE_OK;
1716                 ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1717                 if (likely(!ret))
1718                         return CHARGE_OK;
1719
1720                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
1721                 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1722         } else
1723                 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
1724
1725         if (csize > PAGE_SIZE) /* change csize and retry */
1726                 return CHARGE_RETRY;
1727
1728         if (!(gfp_mask & __GFP_WAIT))
1729                 return CHARGE_WOULDBLOCK;
1730
1731         ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1732                                         gfp_mask, flags);
1733         /*
1734          * try_to_free_mem_cgroup_pages() might not give us a full
1735          * picture of reclaim. Some pages are reclaimed and might be
1736          * moved to swap cache or just unmapped from the cgroup.
1737          * Check the limit again to see if the reclaim reduced the
1738          * current usage of the cgroup before giving up
1739          */
1740         if (ret || mem_cgroup_check_under_limit(mem_over_limit))
1741                 return CHARGE_RETRY;
1742
1743         /*
1744          * At task move, charge accounts can be doubly counted. So, it's
1745          * better to wait until the end of task_move if something is going on.
1746          */
1747         if (mem_cgroup_wait_acct_move(mem_over_limit))
1748                 return CHARGE_RETRY;
1749
1750         /* If we don't need to call oom-killer at el, return immediately */
1751         if (!oom_check)
1752                 return CHARGE_NOMEM;
1753         /* check OOM */
1754         if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
1755                 return CHARGE_OOM_DIE;
1756
1757         return CHARGE_RETRY;
1758 }
1759
1760 /*
1761  * Unlike exported interface, "oom" parameter is added. if oom==true,
1762  * oom-killer can be invoked.
1763  */
1764 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1765                 gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1766 {
1767         int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1768         struct mem_cgroup *mem = NULL;
1769         int ret;
1770         int csize = CHARGE_SIZE;
1771
1772         /*
1773          * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1774          * in system level. So, allow to go ahead dying process in addition to
1775          * MEMDIE process.
1776          */
1777         if (unlikely(test_thread_flag(TIF_MEMDIE)
1778                      || fatal_signal_pending(current)))
1779                 goto bypass;
1780
1781         /*
1782          * We always charge the cgroup the mm_struct belongs to.
1783          * The mm_struct's mem_cgroup changes on task migration if the
1784          * thread group leader migrates. It's possible that mm is not
1785          * set, if so charge the init_mm (happens for pagecache usage).
1786          */
1787         if (!*memcg && !mm)
1788                 goto bypass;
1789 again:
1790         if (*memcg) { /* css should be a valid one */
1791                 mem = *memcg;
1792                 VM_BUG_ON(css_is_removed(&mem->css));
1793                 if (mem_cgroup_is_root(mem))
1794                         goto done;
1795                 if (consume_stock(mem))
1796                         goto done;
1797                 css_get(&mem->css);
1798         } else {
1799                 struct task_struct *p;
1800
1801                 rcu_read_lock();
1802                 p = rcu_dereference(mm->owner);
1803                 VM_BUG_ON(!p);
1804                 /*
1805                  * because we don't have task_lock(), "p" can exit while
1806                  * we're here. In that case, "mem" can point to root
1807                  * cgroup but never be NULL. (and task_struct itself is freed
1808                  * by RCU, cgroup itself is RCU safe.) Then, we have small
1809                  * risk here to get wrong cgroup. But such kind of mis-account
1810                  * by race always happens because we don't have cgroup_mutex().
1811                  * It's overkill and we allow that small race, here.
1812                  */
1813                 mem = mem_cgroup_from_task(p);
1814                 VM_BUG_ON(!mem);
1815                 if (mem_cgroup_is_root(mem)) {
1816                         rcu_read_unlock();
1817                         goto done;
1818                 }
1819                 if (consume_stock(mem)) {
1820                         /*
1821                          * It seems dagerous to access memcg without css_get().
1822                          * But considering how consume_stok works, it's not
1823                          * necessary. If consume_stock success, some charges
1824                          * from this memcg are cached on this cpu. So, we
1825                          * don't need to call css_get()/css_tryget() before
1826                          * calling consume_stock().
1827                          */
1828                         rcu_read_unlock();
1829                         goto done;
1830                 }
1831                 /* after here, we may be blocked. we need to get refcnt */
1832                 if (!css_tryget(&mem->css)) {
1833                         rcu_read_unlock();
1834                         goto again;
1835                 }
1836                 rcu_read_unlock();
1837         }
1838
1839         do {
1840                 bool oom_check;
1841
1842                 /* If killed, bypass charge */
1843                 if (fatal_signal_pending(current)) {
1844                         css_put(&mem->css);
1845                         goto bypass;
1846                 }
1847
1848                 oom_check = false;
1849                 if (oom && !nr_oom_retries) {
1850                         oom_check = true;
1851                         nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1852                 }
1853
1854                 ret = __mem_cgroup_do_charge(mem, gfp_mask, csize, oom_check);
1855
1856                 switch (ret) {
1857                 case CHARGE_OK:
1858                         break;
1859                 case CHARGE_RETRY: /* not in OOM situation but retry */
1860                         csize = PAGE_SIZE;
1861                         css_put(&mem->css);
1862                         mem = NULL;
1863                         goto again;
1864                 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
1865                         css_put(&mem->css);
1866                         goto nomem;
1867                 case CHARGE_NOMEM: /* OOM routine works */
1868                         if (!oom) {
1869                                 css_put(&mem->css);
1870                                 goto nomem;
1871                         }
1872                         /* If oom, we never return -ENOMEM */
1873                         nr_oom_retries--;
1874                         break;
1875                 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
1876                         css_put(&mem->css);
1877                         goto bypass;
1878                 }
1879         } while (ret != CHARGE_OK);
1880
1881         if (csize > PAGE_SIZE)
1882                 refill_stock(mem, csize - PAGE_SIZE);
1883         css_put(&mem->css);
1884 done:
1885         *memcg = mem;
1886         return 0;
1887 nomem:
1888         *memcg = NULL;
1889         return -ENOMEM;
1890 bypass:
1891         *memcg = NULL;
1892         return 0;
1893 }
1894
1895 /*
1896  * Somemtimes we have to undo a charge we got by try_charge().
1897  * This function is for that and do uncharge, put css's refcnt.
1898  * gotten by try_charge().
1899  */
1900 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
1901                                                         unsigned long count)
1902 {
1903         if (!mem_cgroup_is_root(mem)) {
1904                 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
1905                 if (do_swap_account)
1906                         res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
1907         }
1908 }
1909
1910 static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1911 {
1912         __mem_cgroup_cancel_charge(mem, 1);
1913 }
1914
1915 /*
1916  * A helper function to get mem_cgroup from ID. must be called under
1917  * rcu_read_lock(). The caller must check css_is_removed() or some if
1918  * it's concern. (dropping refcnt from swap can be called against removed
1919  * memcg.)
1920  */
1921 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1922 {
1923         struct cgroup_subsys_state *css;
1924
1925         /* ID 0 is unused ID */
1926         if (!id)
1927                 return NULL;
1928         css = css_lookup(&mem_cgroup_subsys, id);
1929         if (!css)
1930                 return NULL;
1931         return container_of(css, struct mem_cgroup, css);
1932 }
1933
1934 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
1935 {
1936         struct mem_cgroup *mem = NULL;
1937         struct page_cgroup *pc;
1938         unsigned short id;
1939         swp_entry_t ent;
1940
1941         VM_BUG_ON(!PageLocked(page));
1942
1943         pc = lookup_page_cgroup(page);
1944         lock_page_cgroup(pc);
1945         if (PageCgroupUsed(pc)) {
1946                 mem = pc->mem_cgroup;
1947                 if (mem && !css_tryget(&mem->css))
1948                         mem = NULL;
1949         } else if (PageSwapCache(page)) {
1950                 ent.val = page_private(page);
1951                 id = lookup_swap_cgroup(ent);
1952                 rcu_read_lock();
1953                 mem = mem_cgroup_lookup(id);
1954                 if (mem && !css_tryget(&mem->css))
1955                         mem = NULL;
1956                 rcu_read_unlock();
1957         }
1958         unlock_page_cgroup(pc);
1959         return mem;
1960 }
1961
1962 /*
1963  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1964  * USED state. If already USED, uncharge and return.
1965  */
1966
1967 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1968                                      struct page_cgroup *pc,
1969                                      enum charge_type ctype)
1970 {
1971         /* try_charge() can return NULL to *memcg, taking care of it. */
1972         if (!mem)
1973                 return;
1974
1975         lock_page_cgroup(pc);
1976         if (unlikely(PageCgroupUsed(pc))) {
1977                 unlock_page_cgroup(pc);
1978                 mem_cgroup_cancel_charge(mem);
1979                 return;
1980         }
1981
1982         pc->mem_cgroup = mem;
1983         /*
1984          * We access a page_cgroup asynchronously without lock_page_cgroup().
1985          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1986          * is accessed after testing USED bit. To make pc->mem_cgroup visible
1987          * before USED bit, we need memory barrier here.
1988          * See mem_cgroup_add_lru_list(), etc.
1989          */
1990         smp_wmb();
1991         switch (ctype) {
1992         case MEM_CGROUP_CHARGE_TYPE_CACHE:
1993         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1994                 SetPageCgroupCache(pc);
1995                 SetPageCgroupUsed(pc);
1996                 break;
1997         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1998                 ClearPageCgroupCache(pc);
1999                 SetPageCgroupUsed(pc);
2000                 break;
2001         default:
2002                 break;
2003         }
2004
2005         mem_cgroup_charge_statistics(mem, pc, true);
2006
2007         unlock_page_cgroup(pc);
2008         /*
2009          * "charge_statistics" updated event counter. Then, check it.
2010          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2011          * if they exceeds softlimit.
2012          */
2013         memcg_check_events(mem, pc->page);
2014 }
2015
2016 /**
2017  * __mem_cgroup_move_account - move account of the page
2018  * @pc: page_cgroup of the page.
2019  * @from: mem_cgroup which the page is moved from.
2020  * @to: mem_cgroup which the page is moved to. @from != @to.
2021  * @uncharge: whether we should call uncharge and css_put against @from.
2022  *
2023  * The caller must confirm following.
2024  * - page is not on LRU (isolate_page() is useful.)
2025  * - the pc is locked, used, and ->mem_cgroup points to @from.
2026  *
2027  * This function doesn't do "charge" nor css_get to new cgroup. It should be
2028  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
2029  * true, this function does "uncharge" from old cgroup, but it doesn't if
2030  * @uncharge is false, so a caller should do "uncharge".
2031  */
2032
2033 static void __mem_cgroup_move_account(struct page_cgroup *pc,
2034         struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
2035 {
2036         VM_BUG_ON(from == to);
2037         VM_BUG_ON(PageLRU(pc->page));
2038         VM_BUG_ON(!PageCgroupLocked(pc));
2039         VM_BUG_ON(!PageCgroupUsed(pc));
2040         VM_BUG_ON(pc->mem_cgroup != from);
2041
2042         if (PageCgroupFileMapped(pc)) {
2043                 /* Update mapped_file data for mem_cgroup */
2044                 preempt_disable();
2045                 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2046                 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2047                 preempt_enable();
2048         }
2049         mem_cgroup_charge_statistics(from, pc, false);
2050         if (uncharge)
2051                 /* This is not "cancel", but cancel_charge does all we need. */
2052                 mem_cgroup_cancel_charge(from);
2053
2054         /* caller should have done css_get */
2055         pc->mem_cgroup = to;
2056         mem_cgroup_charge_statistics(to, pc, true);
2057         /*
2058          * We charges against "to" which may not have any tasks. Then, "to"
2059          * can be under rmdir(). But in current implementation, caller of
2060          * this function is just force_empty() and move charge, so it's
2061          * garanteed that "to" is never removed. So, we don't check rmdir
2062          * status here.
2063          */
2064 }
2065
2066 /*
2067  * check whether the @pc is valid for moving account and call
2068  * __mem_cgroup_move_account()
2069  */
2070 static int mem_cgroup_move_account(struct page_cgroup *pc,
2071                 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
2072 {
2073         int ret = -EINVAL;
2074         lock_page_cgroup(pc);
2075         if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
2076                 __mem_cgroup_move_account(pc, from, to, uncharge);
2077                 ret = 0;
2078         }
2079         unlock_page_cgroup(pc);
2080         /*
2081          * check events
2082          */
2083         memcg_check_events(to, pc->page);
2084         memcg_check_events(from, pc->page);
2085         return ret;
2086 }
2087
2088 /*
2089  * move charges to its parent.
2090  */
2091
2092 static int mem_cgroup_move_parent(struct page_cgroup *pc,
2093                                   struct mem_cgroup *child,
2094                                   gfp_t gfp_mask)
2095 {
2096         struct page *page = pc->page;
2097         struct cgroup *cg = child->css.cgroup;
2098         struct cgroup *pcg = cg->parent;
2099         struct mem_cgroup *parent;
2100         int ret;
2101
2102         /* Is ROOT ? */
2103         if (!pcg)
2104                 return -EINVAL;
2105
2106         ret = -EBUSY;
2107         if (!get_page_unless_zero(page))
2108                 goto out;
2109         if (isolate_lru_page(page))
2110                 goto put;
2111
2112         parent = mem_cgroup_from_cont(pcg);
2113         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
2114         if (ret || !parent)
2115                 goto put_back;
2116
2117         ret = mem_cgroup_move_account(pc, child, parent, true);
2118         if (ret)
2119                 mem_cgroup_cancel_charge(parent);
2120 put_back:
2121         putback_lru_page(page);
2122 put:
2123         put_page(page);
2124 out:
2125         return ret;
2126 }
2127
2128 /*
2129  * Charge the memory controller for page usage.
2130  * Return
2131  * 0 if the charge was successful
2132  * < 0 if the cgroup is over its limit
2133  */
2134 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2135                                 gfp_t gfp_mask, enum charge_type ctype)
2136 {
2137         struct mem_cgroup *mem = NULL;
2138         struct page_cgroup *pc;
2139         int ret;
2140
2141         pc = lookup_page_cgroup(page);
2142         /* can happen at boot */
2143         if (unlikely(!pc))
2144                 return 0;
2145         prefetchw(pc);
2146
2147         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
2148         if (ret || !mem)
2149                 return ret;
2150
2151         __mem_cgroup_commit_charge(mem, pc, ctype);
2152         return 0;
2153 }
2154
2155 int mem_cgroup_newpage_charge(struct page *page,
2156                               struct mm_struct *mm, gfp_t gfp_mask)
2157 {
2158         if (mem_cgroup_disabled())
2159                 return 0;
2160         if (PageCompound(page))
2161                 return 0;
2162         /*
2163          * If already mapped, we don't have to account.
2164          * If page cache, page->mapping has address_space.
2165          * But page->mapping may have out-of-use anon_vma pointer,
2166          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2167          * is NULL.
2168          */
2169         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2170                 return 0;
2171         if (unlikely(!mm))
2172                 mm = &init_mm;
2173         return mem_cgroup_charge_common(page, mm, gfp_mask,
2174                                 MEM_CGROUP_CHARGE_TYPE_MAPPED);
2175 }
2176
2177 static void
2178 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2179                                         enum charge_type ctype);
2180
2181 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2182                                 gfp_t gfp_mask)
2183 {
2184         int ret;
2185
2186         if (mem_cgroup_disabled())
2187                 return 0;
2188         if (PageCompound(page))
2189                 return 0;
2190         /*
2191          * Corner case handling. This is called from add_to_page_cache()
2192          * in usual. But some FS (shmem) precharges this page before calling it
2193          * and call add_to_page_cache() with GFP_NOWAIT.
2194          *
2195          * For GFP_NOWAIT case, the page may be pre-charged before calling
2196          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2197          * charge twice. (It works but has to pay a bit larger cost.)
2198          * And when the page is SwapCache, it should take swap information
2199          * into account. This is under lock_page() now.
2200          */
2201         if (!(gfp_mask & __GFP_WAIT)) {
2202                 struct page_cgroup *pc;
2203
2204                 pc = lookup_page_cgroup(page);
2205                 if (!pc)
2206                         return 0;
2207                 lock_page_cgroup(pc);
2208                 if (PageCgroupUsed(pc)) {
2209                         unlock_page_cgroup(pc);
2210                         return 0;
2211                 }
2212                 unlock_page_cgroup(pc);
2213         }
2214
2215         if (unlikely(!mm))
2216                 mm = &init_mm;
2217
2218         if (page_is_file_cache(page))
2219                 return mem_cgroup_charge_common(page, mm, gfp_mask,
2220                                 MEM_CGROUP_CHARGE_TYPE_CACHE);
2221
2222         /* shmem */
2223         if (PageSwapCache(page)) {
2224                 struct mem_cgroup *mem = NULL;
2225
2226                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2227                 if (!ret)
2228                         __mem_cgroup_commit_charge_swapin(page, mem,
2229                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2230         } else
2231                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2232                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2233
2234         return ret;
2235 }
2236
2237 /*
2238  * While swap-in, try_charge -> commit or cancel, the page is locked.
2239  * And when try_charge() successfully returns, one refcnt to memcg without
2240  * struct page_cgroup is acquired. This refcnt will be consumed by
2241  * "commit()" or removed by "cancel()"
2242  */
2243 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2244                                  struct page *page,
2245                                  gfp_t mask, struct mem_cgroup **ptr)
2246 {
2247         struct mem_cgroup *mem;
2248         int ret;
2249
2250         if (mem_cgroup_disabled())
2251                 return 0;
2252
2253         if (!do_swap_account)
2254                 goto charge_cur_mm;
2255         /*
2256          * A racing thread's fault, or swapoff, may have already updated
2257          * the pte, and even removed page from swap cache: in those cases
2258          * do_swap_page()'s pte_same() test will fail; but there's also a
2259          * KSM case which does need to charge the page.
2260          */
2261         if (!PageSwapCache(page))
2262                 goto charge_cur_mm;
2263         mem = try_get_mem_cgroup_from_page(page);
2264         if (!mem)
2265                 goto charge_cur_mm;
2266         *ptr = mem;
2267         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
2268         css_put(&mem->css);
2269         return ret;
2270 charge_cur_mm:
2271         if (unlikely(!mm))
2272                 mm = &init_mm;
2273         return __mem_cgroup_try_charge(mm, mask, ptr, true);
2274 }
2275
2276 static void
2277 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2278                                         enum charge_type ctype)
2279 {
2280         struct page_cgroup *pc;
2281
2282         if (mem_cgroup_disabled())
2283                 return;
2284         if (!ptr)
2285                 return;
2286         cgroup_exclude_rmdir(&ptr->css);
2287         pc = lookup_page_cgroup(page);
2288         mem_cgroup_lru_del_before_commit_swapcache(page);
2289         __mem_cgroup_commit_charge(ptr, pc, ctype);
2290         mem_cgroup_lru_add_after_commit_swapcache(page);
2291         /*
2292          * Now swap is on-memory. This means this page may be
2293          * counted both as mem and swap....double count.
2294          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2295          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2296          * may call delete_from_swap_cache() before reach here.
2297          */
2298         if (do_swap_account && PageSwapCache(page)) {
2299                 swp_entry_t ent = {.val = page_private(page)};
2300                 unsigned short id;
2301                 struct mem_cgroup *memcg;
2302
2303                 id = swap_cgroup_record(ent, 0);
2304                 rcu_read_lock();
2305                 memcg = mem_cgroup_lookup(id);
2306                 if (memcg) {
2307                         /*
2308                          * This recorded memcg can be obsolete one. So, avoid
2309                          * calling css_tryget
2310                          */
2311                         if (!mem_cgroup_is_root(memcg))
2312                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2313                         mem_cgroup_swap_statistics(memcg, false);
2314                         mem_cgroup_put(memcg);
2315                 }
2316                 rcu_read_unlock();
2317         }
2318         /*
2319          * At swapin, we may charge account against cgroup which has no tasks.
2320          * So, rmdir()->pre_destroy() can be called while we do this charge.
2321          * In that case, we need to call pre_destroy() again. check it here.
2322          */
2323         cgroup_release_and_wakeup_rmdir(&ptr->css);
2324 }
2325
2326 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2327 {
2328         __mem_cgroup_commit_charge_swapin(page, ptr,
2329                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
2330 }
2331
2332 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2333 {
2334         if (mem_cgroup_disabled())
2335                 return;
2336         if (!mem)
2337                 return;
2338         mem_cgroup_cancel_charge(mem);
2339 }
2340
2341 static void
2342 __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2343 {
2344         struct memcg_batch_info *batch = NULL;
2345         bool uncharge_memsw = true;
2346         /* If swapout, usage of swap doesn't decrease */
2347         if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2348                 uncharge_memsw = false;
2349
2350         batch = &current->memcg_batch;
2351         /*
2352          * In usual, we do css_get() when we remember memcg pointer.
2353          * But in this case, we keep res->usage until end of a series of
2354          * uncharges. Then, it's ok to ignore memcg's refcnt.
2355          */
2356         if (!batch->memcg)
2357                 batch->memcg = mem;
2358         /*
2359          * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2360          * In those cases, all pages freed continously can be expected to be in
2361          * the same cgroup and we have chance to coalesce uncharges.
2362          * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2363          * because we want to do uncharge as soon as possible.
2364          */
2365
2366         if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2367                 goto direct_uncharge;
2368
2369         /*
2370          * In typical case, batch->memcg == mem. This means we can
2371          * merge a series of uncharges to an uncharge of res_counter.
2372          * If not, we uncharge res_counter ony by one.
2373          */
2374         if (batch->memcg != mem)
2375                 goto direct_uncharge;
2376         /* remember freed charge and uncharge it later */
2377         batch->bytes += PAGE_SIZE;
2378         if (uncharge_memsw)
2379                 batch->memsw_bytes += PAGE_SIZE;
2380         return;
2381 direct_uncharge:
2382         res_counter_uncharge(&mem->res, PAGE_SIZE);
2383         if (uncharge_memsw)
2384                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2385         if (unlikely(batch->memcg != mem))
2386                 memcg_oom_recover(mem);
2387         return;
2388 }
2389
2390 /*
2391  * uncharge if !page_mapped(page)
2392  */
2393 static struct mem_cgroup *
2394 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2395 {
2396         struct page_cgroup *pc;
2397         struct mem_cgroup *mem = NULL;
2398
2399         if (mem_cgroup_disabled())
2400                 return NULL;
2401
2402         if (PageSwapCache(page))
2403                 return NULL;
2404
2405         /*
2406          * Check if our page_cgroup is valid
2407          */
2408         pc = lookup_page_cgroup(page);
2409         if (unlikely(!pc || !PageCgroupUsed(pc)))
2410                 return NULL;
2411
2412         lock_page_cgroup(pc);
2413
2414         mem = pc->mem_cgroup;
2415
2416         if (!PageCgroupUsed(pc))
2417                 goto unlock_out;
2418
2419         switch (ctype) {
2420         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2421         case MEM_CGROUP_CHARGE_TYPE_DROP:
2422                 /* See mem_cgroup_prepare_migration() */
2423                 if (page_mapped(page) || PageCgroupMigration(pc))
2424                         goto unlock_out;
2425                 break;
2426         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2427                 if (!PageAnon(page)) {  /* Shared memory */
2428                         if (page->mapping && !page_is_file_cache(page))
2429                                 goto unlock_out;
2430                 } else if (page_mapped(page)) /* Anon */
2431                                 goto unlock_out;
2432                 break;
2433         default:
2434                 break;
2435         }
2436
2437         mem_cgroup_charge_statistics(mem, pc, false);
2438
2439         ClearPageCgroupUsed(pc);
2440         /*
2441          * pc->mem_cgroup is not cleared here. It will be accessed when it's
2442          * freed from LRU. This is safe because uncharged page is expected not
2443          * to be reused (freed soon). Exception is SwapCache, it's handled by
2444          * special functions.
2445          */
2446
2447         unlock_page_cgroup(pc);
2448         /*
2449          * even after unlock, we have mem->res.usage here and this memcg
2450          * will never be freed.
2451          */
2452         memcg_check_events(mem, page);
2453         if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2454                 mem_cgroup_swap_statistics(mem, true);
2455                 mem_cgroup_get(mem);
2456         }
2457         if (!mem_cgroup_is_root(mem))
2458                 __do_uncharge(mem, ctype);
2459
2460         return mem;
2461
2462 unlock_out:
2463         unlock_page_cgroup(pc);
2464         return NULL;
2465 }
2466
2467 void mem_cgroup_uncharge_page(struct page *page)
2468 {
2469         /* early check. */
2470         if (page_mapped(page))
2471                 return;
2472         if (page->mapping && !PageAnon(page))
2473                 return;
2474         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2475 }
2476
2477 void mem_cgroup_uncharge_cache_page(struct page *page)
2478 {
2479         VM_BUG_ON(page_mapped(page));
2480         VM_BUG_ON(page->mapping);
2481         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2482 }
2483
2484 /*
2485  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2486  * In that cases, pages are freed continuously and we can expect pages
2487  * are in the same memcg. All these calls itself limits the number of
2488  * pages freed at once, then uncharge_start/end() is called properly.
2489  * This may be called prural(2) times in a context,
2490  */
2491
2492 void mem_cgroup_uncharge_start(void)
2493 {
2494         current->memcg_batch.do_batch++;
2495         /* We can do nest. */
2496         if (current->memcg_batch.do_batch == 1) {
2497                 current->memcg_batch.memcg = NULL;
2498                 current->memcg_batch.bytes = 0;
2499                 current->memcg_batch.memsw_bytes = 0;
2500         }
2501 }
2502
2503 void mem_cgroup_uncharge_end(void)
2504 {
2505         struct memcg_batch_info *batch = &current->memcg_batch;
2506
2507         if (!batch->do_batch)
2508                 return;
2509
2510         batch->do_batch--;
2511         if (batch->do_batch) /* If stacked, do nothing. */
2512                 return;
2513
2514         if (!batch->memcg)
2515                 return;
2516         /*
2517          * This "batch->memcg" is valid without any css_get/put etc...
2518          * bacause we hide charges behind us.
2519          */
2520         if (batch->bytes)
2521                 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2522         if (batch->memsw_bytes)
2523                 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2524         memcg_oom_recover(batch->memcg);
2525         /* forget this pointer (for sanity check) */
2526         batch->memcg = NULL;
2527 }
2528
2529 #ifdef CONFIG_SWAP
2530 /*
2531  * called after __delete_from_swap_cache() and drop "page" account.
2532  * memcg information is recorded to swap_cgroup of "ent"
2533  */
2534 void
2535 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2536 {
2537         struct mem_cgroup *memcg;
2538         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2539
2540         if (!swapout) /* this was a swap cache but the swap is unused ! */
2541                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2542
2543         memcg = __mem_cgroup_uncharge_common(page, ctype);
2544
2545         /*
2546          * record memcg information,  if swapout && memcg != NULL,
2547          * mem_cgroup_get() was called in uncharge().
2548          */
2549         if (do_swap_account && swapout && memcg)
2550                 swap_cgroup_record(ent, css_id(&memcg->css));
2551 }
2552 #endif
2553
2554 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2555 /*
2556  * called from swap_entry_free(). remove record in swap_cgroup and
2557  * uncharge "memsw" account.
2558  */
2559 void mem_cgroup_uncharge_swap(swp_entry_t ent)
2560 {
2561         struct mem_cgroup *memcg;
2562         unsigned short id;
2563
2564         if (!do_swap_account)
2565                 return;
2566
2567         id = swap_cgroup_record(ent, 0);
2568         rcu_read_lock();
2569         memcg = mem_cgroup_lookup(id);
2570         if (memcg) {
2571                 /*
2572                  * We uncharge this because swap is freed.
2573                  * This memcg can be obsolete one. We avoid calling css_tryget
2574                  */
2575                 if (!mem_cgroup_is_root(memcg))
2576                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2577                 mem_cgroup_swap_statistics(memcg, false);
2578                 mem_cgroup_put(memcg);
2579         }
2580         rcu_read_unlock();
2581 }
2582
2583 /**
2584  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2585  * @entry: swap entry to be moved
2586  * @from:  mem_cgroup which the entry is moved from
2587  * @to:  mem_cgroup which the entry is moved to
2588  * @need_fixup: whether we should fixup res_counters and refcounts.
2589  *
2590  * It succeeds only when the swap_cgroup's record for this entry is the same
2591  * as the mem_cgroup's id of @from.
2592  *
2593  * Returns 0 on success, -EINVAL on failure.
2594  *
2595  * The caller must have charged to @to, IOW, called res_counter_charge() about
2596  * both res and memsw, and called css_get().
2597  */
2598 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2599                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2600 {
2601         unsigned short old_id, new_id;
2602
2603         old_id = css_id(&from->css);
2604         new_id = css_id(&to->css);
2605
2606         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2607                 mem_cgroup_swap_statistics(from, false);
2608                 mem_cgroup_swap_statistics(to, true);
2609                 /*
2610                  * This function is only called from task migration context now.
2611                  * It postpones res_counter and refcount handling till the end
2612                  * of task migration(mem_cgroup_clear_mc()) for performance
2613                  * improvement. But we cannot postpone mem_cgroup_get(to)
2614                  * because if the process that has been moved to @to does
2615                  * swap-in, the refcount of @to might be decreased to 0.
2616                  */
2617                 mem_cgroup_get(to);
2618                 if (need_fixup) {
2619                         if (!mem_cgroup_is_root(from))
2620                                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2621                         mem_cgroup_put(from);
2622                         /*
2623                          * we charged both to->res and to->memsw, so we should
2624                          * uncharge to->res.
2625                          */
2626                         if (!mem_cgroup_is_root(to))
2627                                 res_counter_uncharge(&to->res, PAGE_SIZE);
2628                 }
2629                 return 0;
2630         }
2631         return -EINVAL;
2632 }
2633 #else
2634 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2635                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2636 {
2637         return -EINVAL;
2638 }
2639 #endif
2640
2641 /*
2642  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2643  * page belongs to.
2644  */
2645 int mem_cgroup_prepare_migration(struct page *page,
2646         struct page *newpage, struct mem_cgroup **ptr)
2647 {
2648         struct page_cgroup *pc;
2649         struct mem_cgroup *mem = NULL;
2650         enum charge_type ctype;
2651         int ret = 0;
2652
2653         if (mem_cgroup_disabled())
2654                 return 0;
2655
2656         pc = lookup_page_cgroup(page);
2657         lock_page_cgroup(pc);
2658         if (PageCgroupUsed(pc)) {
2659                 mem = pc->mem_cgroup;
2660                 css_get(&mem->css);
2661                 /*
2662                  * At migrating an anonymous page, its mapcount goes down
2663                  * to 0 and uncharge() will be called. But, even if it's fully
2664                  * unmapped, migration may fail and this page has to be
2665                  * charged again. We set MIGRATION flag here and delay uncharge
2666                  * until end_migration() is called
2667                  *
2668                  * Corner Case Thinking
2669                  * A)
2670                  * When the old page was mapped as Anon and it's unmap-and-freed
2671                  * while migration was ongoing.
2672                  * If unmap finds the old page, uncharge() of it will be delayed
2673                  * until end_migration(). If unmap finds a new page, it's
2674                  * uncharged when it make mapcount to be 1->0. If unmap code
2675                  * finds swap_migration_entry, the new page will not be mapped
2676                  * and end_migration() will find it(mapcount==0).
2677                  *
2678                  * B)
2679                  * When the old page was mapped but migraion fails, the kernel
2680                  * remaps it. A charge for it is kept by MIGRATION flag even
2681                  * if mapcount goes down to 0. We can do remap successfully
2682                  * without charging it again.
2683                  *
2684                  * C)
2685                  * The "old" page is under lock_page() until the end of
2686                  * migration, so, the old page itself will not be swapped-out.
2687                  * If the new page is swapped out before end_migraton, our
2688                  * hook to usual swap-out path will catch the event.
2689                  */
2690                 if (PageAnon(page))
2691                         SetPageCgroupMigration(pc);
2692         }
2693         unlock_page_cgroup(pc);
2694         /*
2695          * If the page is not charged at this point,
2696          * we return here.
2697          */
2698         if (!mem)
2699                 return 0;
2700
2701         *ptr = mem;
2702         ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2703         css_put(&mem->css);/* drop extra refcnt */
2704         if (ret || *ptr == NULL) {
2705                 if (PageAnon(page)) {
2706                         lock_page_cgroup(pc);
2707                         ClearPageCgroupMigration(pc);
2708                         unlock_page_cgroup(pc);
2709                         /*
2710                          * The old page may be fully unmapped while we kept it.
2711                          */
2712                         mem_cgroup_uncharge_page(page);
2713                 }
2714                 return -ENOMEM;
2715         }
2716         /*
2717          * We charge new page before it's used/mapped. So, even if unlock_page()
2718          * is called before end_migration, we can catch all events on this new
2719          * page. In the case new page is migrated but not remapped, new page's
2720          * mapcount will be finally 0 and we call uncharge in end_migration().
2721          */
2722         pc = lookup_page_cgroup(newpage);
2723         if (PageAnon(page))
2724                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2725         else if (page_is_file_cache(page))
2726                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2727         else
2728                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2729         __mem_cgroup_commit_charge(mem, pc, ctype);
2730         return ret;
2731 }
2732
2733 /* remove redundant charge if migration failed*/
2734 void mem_cgroup_end_migration(struct mem_cgroup *mem,
2735         struct page *oldpage, struct page *newpage)
2736 {
2737         struct page *used, *unused;
2738         struct page_cgroup *pc;
2739
2740         if (!mem)
2741                 return;
2742         /* blocks rmdir() */
2743         cgroup_exclude_rmdir(&mem->css);
2744         /* at migration success, oldpage->mapping is NULL. */
2745         if (oldpage->mapping) {
2746                 used = oldpage;
2747                 unused = newpage;
2748         } else {
2749                 used = newpage;
2750                 unused = oldpage;
2751         }
2752         /*
2753          * We disallowed uncharge of pages under migration because mapcount
2754          * of the page goes down to zero, temporarly.
2755          * Clear the flag and check the page should be charged.
2756          */
2757         pc = lookup_page_cgroup(oldpage);
2758         lock_page_cgroup(pc);
2759         ClearPageCgroupMigration(pc);
2760         unlock_page_cgroup(pc);
2761
2762         __mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
2763
2764         /*
2765          * If a page is a file cache, radix-tree replacement is very atomic
2766          * and we can skip this check. When it was an Anon page, its mapcount
2767          * goes down to 0. But because we added MIGRATION flage, it's not
2768          * uncharged yet. There are several case but page->mapcount check
2769          * and USED bit check in mem_cgroup_uncharge_page() will do enough
2770          * check. (see prepare_charge() also)
2771          */
2772         if (PageAnon(used))
2773                 mem_cgroup_uncharge_page(used);
2774         /*
2775          * At migration, we may charge account against cgroup which has no
2776          * tasks.
2777          * So, rmdir()->pre_destroy() can be called while we do this charge.
2778          * In that case, we need to call pre_destroy() again. check it here.
2779          */
2780         cgroup_release_and_wakeup_rmdir(&mem->css);
2781 }
2782
2783 /*
2784  * A call to try to shrink memory usage on charge failure at shmem's swapin.
2785  * Calling hierarchical_reclaim is not enough because we should update
2786  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2787  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2788  * not from the memcg which this page would be charged to.
2789  * try_charge_swapin does all of these works properly.
2790  */
2791 int mem_cgroup_shmem_charge_fallback(struct page *page,
2792                             struct mm_struct *mm,
2793                             gfp_t gfp_mask)
2794 {
2795         struct mem_cgroup *mem = NULL;
2796         int ret;
2797
2798         if (mem_cgroup_disabled())
2799                 return 0;
2800
2801         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2802         if (!ret)
2803                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2804
2805         return ret;
2806 }
2807
2808 static DEFINE_MUTEX(set_limit_mutex);
2809
2810 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2811                                 unsigned long long val)
2812 {
2813         int retry_count;
2814         u64 memswlimit, memlimit;
2815         int ret = 0;
2816         int children = mem_cgroup_count_children(memcg);
2817         u64 curusage, oldusage;
2818         int enlarge;
2819
2820         /*
2821          * For keeping hierarchical_reclaim simple, how long we should retry
2822          * is depends on callers. We set our retry-count to be function
2823          * of # of children which we should visit in this loop.
2824          */
2825         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2826
2827         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2828
2829         enlarge = 0;
2830         while (retry_count) {
2831                 if (signal_pending(current)) {
2832                         ret = -EINTR;
2833                         break;
2834                 }
2835                 /*
2836                  * Rather than hide all in some function, I do this in
2837                  * open coded manner. You see what this really does.
2838                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2839                  */
2840                 mutex_lock(&set_limit_mutex);
2841                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2842                 if (memswlimit < val) {
2843                         ret = -EINVAL;
2844                         mutex_unlock(&set_limit_mutex);
2845                         break;
2846                 }
2847
2848                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2849                 if (memlimit < val)
2850                         enlarge = 1;
2851
2852                 ret = res_counter_set_limit(&memcg->res, val);
2853                 if (!ret) {
2854                         if (memswlimit == val)
2855                                 memcg->memsw_is_minimum = true;
2856                         else
2857                                 memcg->memsw_is_minimum = false;
2858                 }
2859                 mutex_unlock(&set_limit_mutex);
2860
2861                 if (!ret)
2862                         break;
2863
2864                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2865                                                 MEM_CGROUP_RECLAIM_SHRINK);
2866                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2867                 /* Usage is reduced ? */
2868                 if (curusage >= oldusage)
2869                         retry_count--;
2870                 else
2871                         oldusage = curusage;
2872         }
2873         if (!ret && enlarge)
2874                 memcg_oom_recover(memcg);
2875
2876         return ret;
2877 }
2878
2879 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2880                                         unsigned long long val)
2881 {
2882         int retry_count;
2883         u64 memlimit, memswlimit, oldusage, curusage;
2884         int children = mem_cgroup_count_children(memcg);
2885         int ret = -EBUSY;
2886         int enlarge = 0;
2887
2888         /* see mem_cgroup_resize_res_limit */
2889         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2890         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2891         while (retry_count) {
2892                 if (signal_pending(current)) {
2893                         ret = -EINTR;
2894                         break;
2895                 }
2896                 /*
2897                  * Rather than hide all in some function, I do this in
2898                  * open coded manner. You see what this really does.
2899                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2900                  */
2901                 mutex_lock(&set_limit_mutex);
2902                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2903                 if (memlimit > val) {
2904                         ret = -EINVAL;
2905                         mutex_unlock(&set_limit_mutex);
2906                         break;
2907                 }
2908                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2909                 if (memswlimit < val)
2910                         enlarge = 1;
2911                 ret = res_counter_set_limit(&memcg->memsw, val);
2912                 if (!ret) {
2913                         if (memlimit == val)
2914                                 memcg->memsw_is_minimum = true;
2915                         else
2916                                 memcg->memsw_is_minimum = false;
2917                 }
2918                 mutex_unlock(&set_limit_mutex);
2919
2920                 if (!ret)
2921                         break;
2922
2923                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2924                                                 MEM_CGROUP_RECLAIM_NOSWAP |
2925                                                 MEM_CGROUP_RECLAIM_SHRINK);
2926                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2927                 /* Usage is reduced ? */
2928                 if (curusage >= oldusage)
2929                         retry_count--;
2930                 else
2931                         oldusage = curusage;
2932         }
2933         if (!ret && enlarge)
2934                 memcg_oom_recover(memcg);
2935         return ret;
2936 }
2937
2938 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2939                                             gfp_t gfp_mask)
2940 {
2941         unsigned long nr_reclaimed = 0;
2942         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2943         unsigned long reclaimed;
2944         int loop = 0;
2945         struct mem_cgroup_tree_per_zone *mctz;
2946         unsigned long long excess;
2947
2948         if (order > 0)
2949                 return 0;
2950
2951         mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2952         /*
2953          * This loop can run a while, specially if mem_cgroup's continuously
2954          * keep exceeding their soft limit and putting the system under
2955          * pressure
2956          */
2957         do {
2958                 if (next_mz)
2959                         mz = next_mz;
2960                 else
2961                         mz = mem_cgroup_largest_soft_limit_node(mctz);
2962                 if (!mz)
2963                         break;
2964
2965                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2966                                                 gfp_mask,
2967                                                 MEM_CGROUP_RECLAIM_SOFT);
2968                 nr_reclaimed += reclaimed;
2969                 spin_lock(&mctz->lock);
2970
2971                 /*
2972                  * If we failed to reclaim anything from this memory cgroup
2973                  * it is time to move on to the next cgroup
2974                  */
2975                 next_mz = NULL;
2976                 if (!reclaimed) {
2977                         do {
2978                                 /*
2979                                  * Loop until we find yet another one.
2980                                  *
2981                                  * By the time we get the soft_limit lock
2982                                  * again, someone might have aded the
2983                                  * group back on the RB tree. Iterate to
2984                                  * make sure we get a different mem.
2985                                  * mem_cgroup_largest_soft_limit_node returns
2986                                  * NULL if no other cgroup is present on
2987                                  * the tree
2988                                  */
2989                                 next_mz =
2990                                 __mem_cgroup_largest_soft_limit_node(mctz);
2991                                 if (next_mz == mz) {
2992                                         css_put(&next_mz->mem->css);
2993                                         next_mz = NULL;
2994                                 } else /* next_mz == NULL or other memcg */
2995                                         break;
2996                         } while (1);
2997                 }
2998                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2999                 excess = res_counter_soft_limit_excess(&mz->mem->res);
3000                 /*
3001                  * One school of thought says that we should not add
3002                  * back the node to the tree if reclaim returns 0.
3003                  * But our reclaim could return 0, simply because due
3004                  * to priority we are exposing a smaller subset of
3005                  * memory to reclaim from. Consider this as a longer
3006                  * term TODO.
3007                  */
3008                 /* If excess == 0, no tree ops */
3009                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3010                 spin_unlock(&mctz->lock);
3011                 css_put(&mz->mem->css);
3012                 loop++;
3013                 /*
3014                  * Could not reclaim anything and there are no more
3015                  * mem cgroups to try or we seem to be looping without
3016                  * reclaiming anything.
3017                  */
3018                 if (!nr_reclaimed &&
3019                         (next_mz == NULL ||
3020                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3021                         break;
3022         } while (!nr_reclaimed);
3023         if (next_mz)
3024                 css_put(&next_mz->mem->css);
3025         return nr_reclaimed;
3026 }
3027
3028 /*
3029  * This routine traverse page_cgroup in given list and drop them all.
3030  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3031  */
3032 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
3033                                 int node, int zid, enum lru_list lru)
3034 {
3035         struct zone *zone;
3036         struct mem_cgroup_per_zone *mz;
3037         struct page_cgroup *pc, *busy;
3038         unsigned long flags, loop;
3039         struct list_head *list;
3040         int ret = 0;
3041
3042         zone = &NODE_DATA(node)->node_zones[zid];
3043         mz = mem_cgroup_zoneinfo(mem, node, zid);
3044         list = &mz->lists[lru];
3045
3046         loop = MEM_CGROUP_ZSTAT(mz, lru);
3047         /* give some margin against EBUSY etc...*/
3048         loop += 256;
3049         busy = NULL;
3050         while (loop--) {
3051                 ret = 0;
3052                 spin_lock_irqsave(&zone->lru_lock, flags);
3053                 if (list_empty(list)) {
3054                         spin_unlock_irqrestore(&zone->lru_lock, flags);
3055                         break;
3056                 }
3057                 pc = list_entry(list->prev, struct page_cgroup, lru);
3058                 if (busy == pc) {
3059                         list_move(&pc->lru, list);
3060                         busy = NULL;
3061                         spin_unlock_irqrestore(&zone->lru_lock, flags);
3062                         continue;
3063                 }
3064                 spin_unlock_irqrestore(&zone->lru_lock, flags);
3065
3066                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
3067                 if (ret == -ENOMEM)
3068                         break;
3069
3070                 if (ret == -EBUSY || ret == -EINVAL) {
3071                         /* found lock contention or "pc" is obsolete. */
3072                         busy = pc;
3073                         cond_resched();
3074                 } else
3075                         busy = NULL;
3076         }
3077
3078         if (!ret && !list_empty(list))
3079                 return -EBUSY;
3080         return ret;
3081 }
3082
3083 /*
3084  * make mem_cgroup's charge to be 0 if there is no task.
3085  * This enables deleting this mem_cgroup.
3086  */
3087 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
3088 {
3089         int ret;
3090         int node, zid, shrink;
3091         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3092         struct cgroup *cgrp = mem->css.cgroup;
3093
3094         css_get(&mem->css);
3095
3096         shrink = 0;
3097         /* should free all ? */
3098         if (free_all)
3099                 goto try_to_free;
3100 move_account:
3101         do {
3102                 ret = -EBUSY;
3103                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3104                         goto out;
3105                 ret = -EINTR;
3106                 if (signal_pending(current))
3107                         goto out;
3108                 /* This is for making all *used* pages to be on LRU. */
3109                 lru_add_drain_all();
3110                 drain_all_stock_sync();
3111                 ret = 0;
3112                 mem_cgroup_start_move(mem);
3113                 for_each_node_state(node, N_HIGH_MEMORY) {
3114                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3115                                 enum lru_list l;
3116                                 for_each_lru(l) {
3117                                         ret = mem_cgroup_force_empty_list(mem,
3118                                                         node, zid, l);
3119                                         if (ret)
3120                                                 break;
3121                                 }
3122                         }
3123                         if (ret)
3124                                 break;
3125                 }
3126                 mem_cgroup_end_move(mem);
3127                 memcg_oom_recover(mem);
3128                 /* it seems parent cgroup doesn't have enough mem */
3129                 if (ret == -ENOMEM)
3130                         goto try_to_free;
3131                 cond_resched();
3132         /* "ret" should also be checked to ensure all lists are empty. */
3133         } while (mem->res.usage > 0 || ret);
3134 out:
3135         css_put(&mem->css);
3136         return ret;
3137
3138 try_to_free:
3139         /* returns EBUSY if there is a task or if we come here twice. */
3140         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3141                 ret = -EBUSY;
3142                 goto out;
3143         }
3144         /* we call try-to-free pages for make this cgroup empty */
3145         lru_add_drain_all();
3146         /* try to free all pages in this cgroup */
3147         shrink = 1;
3148         while (nr_retries && mem->res.usage > 0) {
3149                 int progress;
3150
3151                 if (signal_pending(current)) {
3152                         ret = -EINTR;
3153                         goto out;
3154                 }
3155                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3156                                                 false, get_swappiness(mem));
3157                 if (!progress) {
3158                         nr_retries--;
3159                         /* maybe some writeback is necessary */
3160                         congestion_wait(BLK_RW_ASYNC, HZ/10);
3161                 }
3162
3163         }
3164         lru_add_drain();
3165         /* try move_account...there may be some *locked* pages. */
3166         goto move_account;
3167 }
3168
3169 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3170 {
3171         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3172 }
3173
3174
3175 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3176 {
3177         return mem_cgroup_from_cont(cont)->use_hierarchy;
3178 }
3179
3180 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3181                                         u64 val)
3182 {
3183         int retval = 0;
3184         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3185         struct cgroup *parent = cont->parent;
3186         struct mem_cgroup *parent_mem = NULL;
3187
3188         if (parent)
3189                 parent_mem = mem_cgroup_from_cont(parent);
3190
3191         cgroup_lock();
3192         /*
3193          * If parent's use_hierarchy is set, we can't make any modifications
3194          * in the child subtrees. If it is unset, then the change can
3195          * occur, provided the current cgroup has no children.
3196          *
3197          * For the root cgroup, parent_mem is NULL, we allow value to be
3198          * set if there are no children.
3199          */
3200         if ((!parent_mem || !parent_mem->use_hierarchy) &&
3201                                 (val == 1 || val == 0)) {
3202                 if (list_empty(&cont->children))
3203                         mem->use_hierarchy = val;
3204                 else
3205                         retval = -EBUSY;
3206         } else
3207                 retval = -EINVAL;
3208         cgroup_unlock();
3209
3210         return retval;
3211 }
3212
3213
3214 static u64 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
3215                                 enum mem_cgroup_stat_index idx)
3216 {
3217         struct mem_cgroup *iter;
3218         s64 val = 0;
3219
3220         /* each per cpu's value can be minus.Then, use s64 */
3221         for_each_mem_cgroup_tree(iter, mem)
3222                 val += mem_cgroup_read_stat(iter, idx);
3223
3224         if (val < 0) /* race ? */
3225                 val = 0;
3226         return val;
3227 }
3228
3229 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3230 {
3231         u64 val;
3232
3233         if (!mem_cgroup_is_root(mem)) {
3234                 if (!swap)
3235                         return res_counter_read_u64(&mem->res, RES_USAGE);
3236                 else
3237                         return res_counter_read_u64(&mem->memsw, RES_USAGE);
3238         }
3239
3240         val = mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE);
3241         val += mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS);
3242
3243         if (swap)
3244                 val += mem_cgroup_get_recursive_idx_stat(mem,
3245                                 MEM_CGROUP_STAT_SWAPOUT);
3246
3247         return val << PAGE_SHIFT;
3248 }
3249
3250 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3251 {
3252         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3253         u64 val;
3254         int type, name;
3255
3256         type = MEMFILE_TYPE(cft->private);
3257         name = MEMFILE_ATTR(cft->private);
3258         switch (type) {
3259         case _MEM:
3260                 if (name == RES_USAGE)
3261                         val = mem_cgroup_usage(mem, false);
3262                 else
3263                         val = res_counter_read_u64(&mem->res, name);
3264                 break;
3265         case _MEMSWAP:
3266                 if (name == RES_USAGE)
3267                         val = mem_cgroup_usage(mem, true);
3268                 else
3269                         val = res_counter_read_u64(&mem->memsw, name);
3270                 break;
3271         default:
3272                 BUG();
3273                 break;
3274         }
3275         return val;
3276 }
3277 /*
3278  * The user of this function is...
3279  * RES_LIMIT.
3280  */
3281 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3282                             const char *buffer)
3283 {
3284         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3285         int type, name;
3286         unsigned long long val;
3287         int ret;
3288
3289         type = MEMFILE_TYPE(cft->private);
3290         name = MEMFILE_ATTR(cft->private);
3291         switch (name) {
3292         case RES_LIMIT:
3293                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3294                         ret = -EINVAL;
3295                         break;
3296                 }
3297                 /* This function does all necessary parse...reuse it */
3298                 ret = res_counter_memparse_write_strategy(buffer, &val);
3299                 if (ret)
3300                         break;
3301                 if (type == _MEM)
3302                         ret = mem_cgroup_resize_limit(memcg, val);
3303                 else
3304                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
3305                 break;
3306         case RES_SOFT_LIMIT:
3307                 ret = res_counter_memparse_write_strategy(buffer, &val);
3308                 if (ret)
3309                         break;
3310                 /*
3311                  * For memsw, soft limits are hard to implement in terms
3312                  * of semantics, for now, we support soft limits for
3313                  * control without swap
3314                  */
3315                 if (type == _MEM)
3316                         ret = res_counter_set_soft_limit(&memcg->res, val);
3317                 else
3318                         ret = -EINVAL;
3319                 break;
3320         default:
3321                 ret = -EINVAL; /* should be BUG() ? */
3322                 break;
3323         }
3324         return ret;
3325 }
3326
3327 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3328                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3329 {
3330         struct cgroup *cgroup;
3331         unsigned long long min_limit, min_memsw_limit, tmp;
3332
3333         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3334         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3335         cgroup = memcg->css.cgroup;
3336         if (!memcg->use_hierarchy)
3337                 goto out;
3338
3339         while (cgroup->parent) {
3340                 cgroup = cgroup->parent;
3341                 memcg = mem_cgroup_from_cont(cgroup);
3342                 if (!memcg->use_hierarchy)
3343                         break;
3344                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3345                 min_limit = min(min_limit, tmp);
3346                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3347                 min_memsw_limit = min(min_memsw_limit, tmp);
3348         }
3349 out:
3350         *mem_limit = min_limit;
3351         *memsw_limit = min_memsw_limit;
3352         return;
3353 }
3354
3355 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3356 {
3357         struct mem_cgroup *mem;
3358         int type, name;
3359
3360         mem = mem_cgroup_from_cont(cont);
3361         type = MEMFILE_TYPE(event);
3362         name = MEMFILE_ATTR(event);
3363         switch (name) {
3364         case RES_MAX_USAGE:
3365                 if (type == _MEM)
3366                         res_counter_reset_max(&mem->res);
3367                 else
3368                         res_counter_reset_max(&mem->memsw);
3369                 break;
3370         case RES_FAILCNT:
3371                 if (type == _MEM)
3372                         res_counter_reset_failcnt(&mem->res);
3373                 else
3374                         res_counter_reset_failcnt(&mem->memsw);
3375                 break;
3376         }
3377
3378         return 0;
3379 }
3380
3381 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3382                                         struct cftype *cft)
3383 {
3384         return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3385 }
3386
3387 #ifdef CONFIG_MMU
3388 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3389                                         struct cftype *cft, u64 val)
3390 {
3391         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3392
3393         if (val >= (1 << NR_MOVE_TYPE))
3394                 return -EINVAL;
3395         /*
3396          * We check this value several times in both in can_attach() and
3397          * attach(), so we need cgroup lock to prevent this value from being
3398          * inconsistent.
3399          */
3400         cgroup_lock();
3401         mem->move_charge_at_immigrate = val;
3402         cgroup_unlock();
3403
3404         return 0;
3405 }
3406 #else
3407 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3408                                         struct cftype *cft, u64 val)
3409 {
3410         return -ENOSYS;
3411 }
3412 #endif
3413
3414
3415 /* For read statistics */
3416 enum {
3417         MCS_CACHE,
3418         MCS_RSS,
3419         MCS_FILE_MAPPED,
3420         MCS_PGPGIN,
3421         MCS_PGPGOUT,
3422         MCS_SWAP,
3423         MCS_INACTIVE_ANON,
3424         MCS_ACTIVE_ANON,
3425         MCS_INACTIVE_FILE,
3426         MCS_ACTIVE_FILE,
3427         MCS_UNEVICTABLE,
3428         NR_MCS_STAT,
3429 };
3430
3431 struct mcs_total_stat {
3432         s64 stat[NR_MCS_STAT];
3433 };
3434
3435 struct {
3436         char *local_name;
3437         char *total_name;
3438 } memcg_stat_strings[NR_MCS_STAT] = {
3439         {"cache", "total_cache"},
3440         {"rss", "total_rss"},
3441         {"mapped_file", "total_mapped_file"},
3442         {"pgpgin", "total_pgpgin"},
3443         {"pgpgout", "total_pgpgout"},
3444         {"swap", "total_swap"},
3445         {"inactive_anon", "total_inactive_anon"},
3446         {"active_anon", "total_active_anon"},
3447         {"inactive_file", "total_inactive_file"},
3448         {"active_file", "total_active_file"},
3449         {"unevictable", "total_unevictable"}
3450 };
3451
3452
3453 static void
3454 mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3455 {
3456         s64 val;
3457
3458         /* per cpu stat */
3459         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3460         s->stat[MCS_CACHE] += val * PAGE_SIZE;
3461         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3462         s->stat[MCS_RSS] += val * PAGE_SIZE;
3463         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3464         s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3465         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3466         s->stat[MCS_PGPGIN] += val;
3467         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3468         s->stat[MCS_PGPGOUT] += val;
3469         if (do_swap_account) {
3470                 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3471                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3472         }
3473
3474         /* per zone stat */
3475         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3476         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3477         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3478         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3479         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3480         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3481         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3482         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3483         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3484         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3485 }
3486
3487 static void
3488 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3489 {
3490         struct mem_cgroup *iter;
3491
3492         for_each_mem_cgroup_tree(iter, mem)
3493                 mem_cgroup_get_local_stat(iter, s);
3494 }
3495
3496 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3497                                  struct cgroup_map_cb *cb)
3498 {
3499         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3500         struct mcs_total_stat mystat;
3501         int i;
3502
3503         memset(&mystat, 0, sizeof(mystat));
3504         mem_cgroup_get_local_stat(mem_cont, &mystat);
3505
3506         for (i = 0; i < NR_MCS_STAT; i++) {
3507                 if (i == MCS_SWAP && !do_swap_account)
3508                         continue;
3509                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3510         }
3511
3512         /* Hierarchical information */
3513         {
3514                 unsigned long long limit, memsw_limit;
3515                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3516                 cb->fill(cb, "hierarchical_memory_limit", limit);
3517                 if (do_swap_account)
3518                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3519         }
3520
3521         memset(&mystat, 0, sizeof(mystat));
3522         mem_cgroup_get_total_stat(mem_cont, &mystat);
3523         for (i = 0; i < NR_MCS_STAT; i++) {
3524                 if (i == MCS_SWAP && !do_swap_account)
3525                         continue;
3526                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3527         }
3528
3529 #ifdef CONFIG_DEBUG_VM
3530         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3531
3532         {
3533                 int nid, zid;
3534                 struct mem_cgroup_per_zone *mz;
3535                 unsigned long recent_rotated[2] = {0, 0};
3536                 unsigned long recent_scanned[2] = {0, 0};
3537
3538                 for_each_online_node(nid)
3539                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3540                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3541
3542                                 recent_rotated[0] +=
3543                                         mz->reclaim_stat.recent_rotated[0];
3544                                 recent_rotated[1] +=
3545                                         mz->reclaim_stat.recent_rotated[1];
3546                                 recent_scanned[0] +=
3547                                         mz->reclaim_stat.recent_scanned[0];
3548                                 recent_scanned[1] +=
3549                                         mz->reclaim_stat.recent_scanned[1];
3550                         }
3551                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3552                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3553                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3554                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3555         }
3556 #endif
3557
3558         return 0;
3559 }
3560
3561 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3562 {
3563         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3564
3565         return get_swappiness(memcg);
3566 }
3567
3568 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3569                                        u64 val)
3570 {
3571         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3572         struct mem_cgroup *parent;
3573
3574         if (val > 100)
3575                 return -EINVAL;
3576
3577         if (cgrp->parent == NULL)
3578                 return -EINVAL;
3579
3580         parent = mem_cgroup_from_cont(cgrp->parent);
3581
3582         cgroup_lock();
3583
3584         /* If under hierarchy, only empty-root can set this value */
3585         if ((parent->use_hierarchy) ||
3586             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3587                 cgroup_unlock();
3588                 return -EINVAL;
3589         }
3590
3591         spin_lock(&memcg->reclaim_param_lock);
3592         memcg->swappiness = val;
3593         spin_unlock(&memcg->reclaim_param_lock);
3594
3595         cgroup_unlock();
3596
3597         return 0;
3598 }
3599
3600 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3601 {
3602         struct mem_cgroup_threshold_ary *t;
3603         u64 usage;
3604         int i;
3605
3606         rcu_read_lock();
3607         if (!swap)
3608                 t = rcu_dereference(memcg->thresholds.primary);
3609         else
3610                 t = rcu_dereference(memcg->memsw_thresholds.primary);
3611
3612         if (!t)
3613                 goto unlock;
3614
3615         usage = mem_cgroup_usage(memcg, swap);
3616
3617         /*
3618          * current_threshold points to threshold just below usage.
3619          * If it's not true, a threshold was crossed after last
3620          * call of __mem_cgroup_threshold().
3621          */
3622         i = t->current_threshold;
3623
3624         /*
3625          * Iterate backward over array of thresholds starting from
3626          * current_threshold and check if a threshold is crossed.
3627          * If none of thresholds below usage is crossed, we read
3628          * only one element of the array here.
3629          */
3630         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3631                 eventfd_signal(t->entries[i].eventfd, 1);
3632
3633         /* i = current_threshold + 1 */
3634         i++;
3635
3636         /*
3637          * Iterate forward over array of thresholds starting from
3638          * current_threshold+1 and check if a threshold is crossed.
3639          * If none of thresholds above usage is crossed, we read
3640          * only one element of the array here.
3641          */
3642         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3643                 eventfd_signal(t->entries[i].eventfd, 1);
3644
3645         /* Update current_threshold */
3646         t->current_threshold = i - 1;
3647 unlock:
3648         rcu_read_unlock();
3649 }
3650
3651 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3652 {
3653         while (memcg) {
3654                 __mem_cgroup_threshold(memcg, false);
3655                 if (do_swap_account)
3656                         __mem_cgroup_threshold(memcg, true);
3657
3658                 memcg = parent_mem_cgroup(memcg);
3659         }
3660 }
3661
3662 static int compare_thresholds(const void *a, const void *b)
3663 {
3664         const struct mem_cgroup_threshold *_a = a;
3665         const struct mem_cgroup_threshold *_b = b;
3666
3667         return _a->threshold - _b->threshold;
3668 }
3669
3670 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
3671 {
3672         struct mem_cgroup_eventfd_list *ev;
3673
3674         list_for_each_entry(ev, &mem->oom_notify, list)
3675                 eventfd_signal(ev->eventfd, 1);
3676         return 0;
3677 }
3678
3679 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3680 {
3681         struct mem_cgroup *iter;
3682
3683         for_each_mem_cgroup_tree(iter, mem)
3684                 mem_cgroup_oom_notify_cb(iter);
3685 }
3686
3687 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3688         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3689 {
3690         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3691         struct mem_cgroup_thresholds *thresholds;
3692         struct mem_cgroup_threshold_ary *new;
3693         int type = MEMFILE_TYPE(cft->private);
3694         u64 threshold, usage;
3695         int i, size, ret;
3696
3697         ret = res_counter_memparse_write_strategy(args, &threshold);
3698         if (ret)
3699                 return ret;
3700
3701         mutex_lock(&memcg->thresholds_lock);
3702
3703         if (type == _MEM)
3704                 thresholds = &memcg->thresholds;
3705         else if (type == _MEMSWAP)
3706                 thresholds = &memcg->memsw_thresholds;
3707         else
3708                 BUG();
3709
3710         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3711
3712         /* Check if a threshold crossed before adding a new one */
3713         if (thresholds->primary)
3714                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3715
3716         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3717
3718         /* Allocate memory for new array of thresholds */
3719         new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3720                         GFP_KERNEL);
3721         if (!new) {
3722                 ret = -ENOMEM;
3723                 goto unlock;
3724         }
3725         new->size = size;
3726
3727         /* Copy thresholds (if any) to new array */
3728         if (thresholds->primary) {
3729                 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3730                                 sizeof(struct mem_cgroup_threshold));
3731         }
3732
3733         /* Add new threshold */
3734         new->entries[size - 1].eventfd = eventfd;
3735         new->entries[size - 1].threshold = threshold;
3736
3737         /* Sort thresholds. Registering of new threshold isn't time-critical */
3738         sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3739                         compare_thresholds, NULL);
3740
3741         /* Find current threshold */
3742         new->current_threshold = -1;
3743         for (i = 0; i < size; i++) {
3744                 if (new->entries[i].threshold < usage) {
3745                         /*
3746                          * new->current_threshold will not be used until
3747                          * rcu_assign_pointer(), so it's safe to increment
3748                          * it here.
3749                          */
3750                         ++new->current_threshold;
3751                 }
3752         }
3753
3754         /* Free old spare buffer and save old primary buffer as spare */
3755         kfree(thresholds->spare);
3756         thresholds->spare = thresholds->primary;
3757
3758         rcu_assign_pointer(thresholds->primary, new);
3759
3760         /* To be sure that nobody uses thresholds */
3761         synchronize_rcu();
3762
3763 unlock:
3764         mutex_unlock(&memcg->thresholds_lock);
3765
3766         return ret;
3767 }
3768
3769 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3770         struct cftype *cft, struct eventfd_ctx *eventfd)
3771 {
3772         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3773         struct mem_cgroup_thresholds *thresholds;
3774         struct mem_cgroup_threshold_ary *new;
3775         int type = MEMFILE_TYPE(cft->private);
3776         u64 usage;
3777         int i, j, size;
3778
3779         mutex_lock(&memcg->thresholds_lock);
3780         if (type == _MEM)
3781                 thresholds = &memcg->thresholds;
3782         else if (type == _MEMSWAP)
3783                 thresholds = &memcg->memsw_thresholds;
3784         else
3785                 BUG();
3786
3787         /*
3788          * Something went wrong if we trying to unregister a threshold
3789          * if we don't have thresholds
3790          */
3791         BUG_ON(!thresholds);
3792
3793         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3794
3795         /* Check if a threshold crossed before removing */
3796         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3797
3798         /* Calculate new number of threshold */
3799         size = 0;
3800         for (i = 0; i < thresholds->primary->size; i++) {
3801                 if (thresholds->primary->entries[i].eventfd != eventfd)
3802                         size++;
3803         }
3804
3805         new = thresholds->spare;
3806
3807         /* Set thresholds array to NULL if we don't have thresholds */
3808         if (!size) {
3809                 kfree(new);
3810                 new = NULL;
3811                 goto swap_buffers;
3812         }
3813
3814         new->size = size;
3815
3816         /* Copy thresholds and find current threshold */
3817         new->current_threshold = -1;
3818         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3819                 if (thresholds->primary->entries[i].eventfd == eventfd)
3820                         continue;
3821
3822                 new->entries[j] = thresholds->primary->entries[i];
3823                 if (new->entries[j].threshold < usage) {
3824                         /*
3825                          * new->current_threshold will not be used
3826                          * until rcu_assign_pointer(), so it's safe to increment
3827                          * it here.
3828                          */
3829                         ++new->current_threshold;
3830                 }
3831                 j++;
3832         }
3833
3834 swap_buffers:
3835         /* Swap primary and spare array */
3836         thresholds->spare = thresholds->primary;
3837         rcu_assign_pointer(thresholds->primary, new);
3838
3839         /* To be sure that nobody uses thresholds */
3840         synchronize_rcu();
3841
3842         mutex_unlock(&memcg->thresholds_lock);
3843 }
3844
3845 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3846         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3847 {
3848         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3849         struct mem_cgroup_eventfd_list *event;
3850         int type = MEMFILE_TYPE(cft->private);
3851
3852         BUG_ON(type != _OOM_TYPE);
3853         event = kmalloc(sizeof(*event), GFP_KERNEL);
3854         if (!event)
3855                 return -ENOMEM;
3856
3857         mutex_lock(&memcg_oom_mutex);
3858
3859         event->eventfd = eventfd;
3860         list_add(&event->list, &memcg->oom_notify);
3861
3862         /* already in OOM ? */
3863         if (atomic_read(&memcg->oom_lock))
3864                 eventfd_signal(eventfd, 1);
3865         mutex_unlock(&memcg_oom_mutex);
3866
3867         return 0;
3868 }
3869
3870 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3871         struct cftype *cft, struct eventfd_ctx *eventfd)
3872 {
3873         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3874         struct mem_cgroup_eventfd_list *ev, *tmp;
3875         int type = MEMFILE_TYPE(cft->private);
3876
3877         BUG_ON(type != _OOM_TYPE);
3878
3879         mutex_lock(&memcg_oom_mutex);
3880
3881         list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3882                 if (ev->eventfd == eventfd) {
3883                         list_del(&ev->list);
3884                         kfree(ev);
3885                 }
3886         }
3887
3888         mutex_unlock(&memcg_oom_mutex);
3889 }
3890
3891 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3892         struct cftype *cft,  struct cgroup_map_cb *cb)
3893 {
3894         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3895
3896         cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
3897
3898         if (atomic_read(&mem->oom_lock))
3899                 cb->fill(cb, "under_oom", 1);
3900         else
3901                 cb->fill(cb, "under_oom", 0);
3902         return 0;
3903 }
3904
3905 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
3906         struct cftype *cft, u64 val)
3907 {
3908         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3909         struct mem_cgroup *parent;
3910
3911         /* cannot set to root cgroup and only 0 and 1 are allowed */
3912         if (!cgrp->parent || !((val == 0) || (val == 1)))
3913                 return -EINVAL;
3914
3915         parent = mem_cgroup_from_cont(cgrp->parent);
3916
3917         cgroup_lock();
3918         /* oom-kill-disable is a flag for subhierarchy. */
3919         if ((parent->use_hierarchy) ||
3920             (mem->use_hierarchy && !list_empty(&cgrp->children))) {
3921                 cgroup_unlock();
3922                 return -EINVAL;
3923         }
3924         mem->oom_kill_disable = val;
3925         if (!val)
3926                 memcg_oom_recover(mem);
3927         cgroup_unlock();
3928         return 0;
3929 }
3930
3931 static struct cftype mem_cgroup_files[] = {
3932         {
3933                 .name = "usage_in_bytes",
3934                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3935                 .read_u64 = mem_cgroup_read,
3936                 .register_event = mem_cgroup_usage_register_event,
3937                 .unregister_event = mem_cgroup_usage_unregister_event,
3938         },
3939         {
3940                 .name = "max_usage_in_bytes",
3941                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3942                 .trigger = mem_cgroup_reset,
3943                 .read_u64 = mem_cgroup_read,
3944         },
3945         {
3946                 .name = "limit_in_bytes",
3947                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3948                 .write_string = mem_cgroup_write,
3949                 .read_u64 = mem_cgroup_read,
3950         },
3951         {
3952                 .name = "soft_limit_in_bytes",
3953                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3954                 .write_string = mem_cgroup_write,
3955                 .read_u64 = mem_cgroup_read,
3956         },
3957         {
3958                 .name = "failcnt",
3959                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3960                 .trigger = mem_cgroup_reset,
3961                 .read_u64 = mem_cgroup_read,
3962         },
3963         {
3964                 .name = "stat",
3965                 .read_map = mem_control_stat_show,
3966         },
3967         {
3968                 .name = "force_empty",
3969                 .trigger = mem_cgroup_force_empty_write,
3970         },
3971         {
3972                 .name = "use_hierarchy",
3973                 .write_u64 = mem_cgroup_hierarchy_write,
3974                 .read_u64 = mem_cgroup_hierarchy_read,
3975         },
3976         {
3977                 .name = "swappiness",
3978                 .read_u64 = mem_cgroup_swappiness_read,
3979                 .write_u64 = mem_cgroup_swappiness_write,
3980         },
3981         {
3982                 .name = "move_charge_at_immigrate",
3983                 .read_u64 = mem_cgroup_move_charge_read,
3984                 .write_u64 = mem_cgroup_move_charge_write,
3985         },
3986         {
3987                 .name = "oom_control",
3988                 .read_map = mem_cgroup_oom_control_read,
3989                 .write_u64 = mem_cgroup_oom_control_write,
3990                 .register_event = mem_cgroup_oom_register_event,
3991                 .unregister_event = mem_cgroup_oom_unregister_event,
3992                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3993         },
3994 };
3995
3996 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3997 static struct cftype memsw_cgroup_files[] = {
3998         {
3999                 .name = "memsw.usage_in_bytes",
4000                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4001                 .read_u64 = mem_cgroup_read,
4002                 .register_event = mem_cgroup_usage_register_event,
4003                 .unregister_event = mem_cgroup_usage_unregister_event,
4004         },
4005         {
4006                 .name = "memsw.max_usage_in_bytes",
4007                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4008                 .trigger = mem_cgroup_reset,
4009                 .read_u64 = mem_cgroup_read,
4010         },
4011         {
4012                 .name = "memsw.limit_in_bytes",
4013                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4014                 .write_string = mem_cgroup_write,
4015                 .read_u64 = mem_cgroup_read,
4016         },
4017         {
4018                 .name = "memsw.failcnt",
4019                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4020                 .trigger = mem_cgroup_reset,
4021                 .read_u64 = mem_cgroup_read,
4022         },
4023 };
4024
4025 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4026 {
4027         if (!do_swap_account)
4028                 return 0;
4029         return cgroup_add_files(cont, ss, memsw_cgroup_files,
4030                                 ARRAY_SIZE(memsw_cgroup_files));
4031 };
4032 #else
4033 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4034 {
4035         return 0;
4036 }
4037 #endif
4038
4039 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4040 {
4041         struct mem_cgroup_per_node *pn;
4042         struct mem_cgroup_per_zone *mz;
4043         enum lru_list l;
4044         int zone, tmp = node;
4045         /*
4046          * This routine is called against possible nodes.
4047          * But it's BUG to call kmalloc() against offline node.
4048          *
4049          * TODO: this routine can waste much memory for nodes which will
4050          *       never be onlined. It's better to use memory hotplug callback
4051          *       function.
4052          */
4053         if (!node_state(node, N_NORMAL_MEMORY))
4054                 tmp = -1;
4055         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4056         if (!pn)
4057                 return 1;
4058
4059         mem->info.nodeinfo[node] = pn;
4060         memset(pn, 0, sizeof(*pn));
4061
4062         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4063                 mz = &pn->zoneinfo[zone];
4064                 for_each_lru(l)
4065                         INIT_LIST_HEAD(&mz->lists[l]);
4066                 mz->usage_in_excess = 0;
4067                 mz->on_tree = false;
4068                 mz->mem = mem;
4069         }
4070         return 0;
4071 }
4072
4073 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4074 {
4075         kfree(mem->info.nodeinfo[node]);
4076 }
4077
4078 static struct mem_cgroup *mem_cgroup_alloc(void)
4079 {
4080         struct mem_cgroup *mem;
4081         int size = sizeof(struct mem_cgroup);
4082
4083         /* Can be very big if MAX_NUMNODES is very big */
4084         if (size < PAGE_SIZE)
4085                 mem = kmalloc(size, GFP_KERNEL);
4086         else
4087                 mem = vmalloc(size);
4088
4089         if (!mem)
4090                 return NULL;
4091
4092         memset(mem, 0, size);
4093         mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4094         if (!mem->stat) {
4095                 if (size < PAGE_SIZE)
4096                         kfree(mem);
4097                 else
4098                         vfree(mem);
4099                 mem = NULL;
4100         }
4101         return mem;
4102 }
4103
4104 /*
4105  * At destroying mem_cgroup, references from swap_cgroup can remain.
4106  * (scanning all at force_empty is too costly...)
4107  *
4108  * Instead of clearing all references at force_empty, we remember
4109  * the number of reference from swap_cgroup and free mem_cgroup when
4110  * it goes down to 0.
4111  *
4112  * Removal of cgroup itself succeeds regardless of refs from swap.
4113  */
4114
4115 static void __mem_cgroup_free(struct mem_cgroup *mem)
4116 {
4117         int node;
4118
4119         mem_cgroup_remove_from_trees(mem);
4120         free_css_id(&mem_cgroup_subsys, &mem->css);
4121
4122         for_each_node_state(node, N_POSSIBLE)
4123                 free_mem_cgroup_per_zone_info(mem, node);
4124
4125         free_percpu(mem->stat);
4126         if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4127                 kfree(mem);
4128         else
4129                 vfree(mem);
4130 }
4131
4132 static void mem_cgroup_get(struct mem_cgroup *mem)
4133 {
4134         atomic_inc(&mem->refcnt);
4135 }
4136
4137 static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4138 {
4139         if (atomic_sub_and_test(count, &mem->refcnt)) {
4140                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
4141                 __mem_cgroup_free(mem);
4142                 if (parent)
4143                         mem_cgroup_put(parent);
4144         }
4145 }
4146
4147 static void mem_cgroup_put(struct mem_cgroup *mem)
4148 {
4149         __mem_cgroup_put(mem, 1);
4150 }
4151
4152 /*
4153  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4154  */
4155 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4156 {
4157         if (!mem->res.parent)
4158                 return NULL;
4159         return mem_cgroup_from_res_counter(mem->res.parent, res);
4160 }
4161
4162 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4163 static void __init enable_swap_cgroup(void)
4164 {
4165         if (!mem_cgroup_disabled() && really_do_swap_account)
4166                 do_swap_account = 1;
4167 }
4168 #else
4169 static void __init enable_swap_cgroup(void)
4170 {
4171 }
4172 #endif
4173
4174 static int mem_cgroup_soft_limit_tree_init(void)
4175 {
4176         struct mem_cgroup_tree_per_node *rtpn;
4177         struct mem_cgroup_tree_per_zone *rtpz;
4178         int tmp, node, zone;
4179
4180         for_each_node_state(node, N_POSSIBLE) {
4181                 tmp = node;
4182                 if (!node_state(node, N_NORMAL_MEMORY))
4183                         tmp = -1;
4184                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4185                 if (!rtpn)
4186                         return 1;
4187
4188                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
4189
4190                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4191                         rtpz = &rtpn->rb_tree_per_zone[zone];
4192                         rtpz->rb_root = RB_ROOT;
4193                         spin_lock_init(&rtpz->lock);
4194                 }
4195         }
4196         return 0;
4197 }
4198
4199 static struct cgroup_subsys_state * __ref
4200 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4201 {
4202         struct mem_cgroup *mem, *parent;
4203         long error = -ENOMEM;
4204         int node;
4205
4206         mem = mem_cgroup_alloc();
4207         if (!mem)
4208                 return ERR_PTR(error);
4209
4210         for_each_node_state(node, N_POSSIBLE)
4211                 if (alloc_mem_cgroup_per_zone_info(mem, node))
4212                         goto free_out;
4213
4214         /* root ? */
4215         if (cont->parent == NULL) {
4216                 int cpu;
4217                 enable_swap_cgroup();
4218                 parent = NULL;
4219                 root_mem_cgroup = mem;
4220                 if (mem_cgroup_soft_limit_tree_init())
4221                         goto free_out;
4222                 for_each_possible_cpu(cpu) {
4223                         struct memcg_stock_pcp *stock =
4224                                                 &per_cpu(memcg_stock, cpu);
4225                         INIT_WORK(&stock->work, drain_local_stock);
4226                 }
4227                 hotcpu_notifier(memcg_stock_cpu_callback, 0);
4228         } else {
4229                 parent = mem_cgroup_from_cont(cont->parent);
4230                 mem->use_hierarchy = parent->use_hierarchy;
4231                 mem->oom_kill_disable = parent->oom_kill_disable;
4232         }
4233
4234         if (parent && parent->use_hierarchy) {
4235                 res_counter_init(&mem->res, &parent->res);
4236                 res_counter_init(&mem->memsw, &parent->memsw);
4237                 /*
4238                  * We increment refcnt of the parent to ensure that we can
4239                  * safely access it on res_counter_charge/uncharge.
4240                  * This refcnt will be decremented when freeing this
4241                  * mem_cgroup(see mem_cgroup_put).
4242                  */
4243                 mem_cgroup_get(parent);
4244         } else {
4245                 res_counter_init(&mem->res, NULL);
4246                 res_counter_init(&mem->memsw, NULL);
4247         }
4248         mem->last_scanned_child = 0;
4249         spin_lock_init(&mem->reclaim_param_lock);
4250         INIT_LIST_HEAD(&mem->oom_notify);
4251
4252         if (parent)
4253                 mem->swappiness = get_swappiness(parent);
4254         atomic_set(&mem->refcnt, 1);
4255         mem->move_charge_at_immigrate = 0;
4256         mutex_init(&mem->thresholds_lock);
4257         return &mem->css;
4258 free_out:
4259         __mem_cgroup_free(mem);
4260         root_mem_cgroup = NULL;
4261         return ERR_PTR(error);
4262 }
4263
4264 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4265                                         struct cgroup *cont)
4266 {
4267         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4268
4269         return mem_cgroup_force_empty(mem, false);
4270 }
4271
4272 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4273                                 struct cgroup *cont)
4274 {
4275         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4276
4277         mem_cgroup_put(mem);
4278 }
4279
4280 static int mem_cgroup_populate(struct cgroup_subsys *ss,
4281                                 struct cgroup *cont)
4282 {
4283         int ret;
4284
4285         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4286                                 ARRAY_SIZE(mem_cgroup_files));
4287
4288         if (!ret)
4289                 ret = register_memsw_files(cont, ss);
4290         return ret;
4291 }
4292
4293 #ifdef CONFIG_MMU
4294 /* Handlers for move charge at task migration. */
4295 #define PRECHARGE_COUNT_AT_ONCE 256
4296 static int mem_cgroup_do_precharge(unsigned long count)
4297 {
4298         int ret = 0;
4299         int batch_count = PRECHARGE_COUNT_AT_ONCE;
4300         struct mem_cgroup *mem = mc.to;
4301
4302         if (mem_cgroup_is_root(mem)) {
4303                 mc.precharge += count;
4304                 /* we don't need css_get for root */
4305                 return ret;
4306         }
4307         /* try to charge at once */
4308         if (count > 1) {
4309                 struct res_counter *dummy;
4310                 /*
4311                  * "mem" cannot be under rmdir() because we've already checked
4312                  * by cgroup_lock_live_cgroup() that it is not removed and we
4313                  * are still under the same cgroup_mutex. So we can postpone
4314                  * css_get().
4315                  */
4316                 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4317                         goto one_by_one;
4318                 if (do_swap_account && res_counter_charge(&mem->memsw,
4319                                                 PAGE_SIZE * count, &dummy)) {
4320                         res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4321                         goto one_by_one;
4322                 }
4323                 mc.precharge += count;
4324                 return ret;
4325         }
4326 one_by_one:
4327         /* fall back to one by one charge */
4328         while (count--) {
4329                 if (signal_pending(current)) {
4330                         ret = -EINTR;
4331                         break;
4332                 }
4333                 if (!batch_count--) {
4334                         batch_count = PRECHARGE_COUNT_AT_ONCE;
4335                         cond_resched();
4336                 }
4337                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
4338                 if (ret || !mem)
4339                         /* mem_cgroup_clear_mc() will do uncharge later */
4340                         return -ENOMEM;
4341                 mc.precharge++;
4342         }
4343         return ret;
4344 }
4345
4346 /**
4347  * is_target_pte_for_mc - check a pte whether it is valid for move charge
4348  * @vma: the vma the pte to be checked belongs
4349  * @addr: the address corresponding to the pte to be checked
4350  * @ptent: the pte to be checked
4351  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4352  *
4353  * Returns
4354  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4355  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4356  *     move charge. if @target is not NULL, the page is stored in target->page
4357  *     with extra refcnt got(Callers should handle it).
4358  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4359  *     target for charge migration. if @target is not NULL, the entry is stored
4360  *     in target->ent.
4361  *
4362  * Called with pte lock held.
4363  */
4364 union mc_target {
4365         struct page     *page;
4366         swp_entry_t     ent;
4367 };
4368
4369 enum mc_target_type {
4370         MC_TARGET_NONE, /* not used */
4371         MC_TARGET_PAGE,
4372         MC_TARGET_SWAP,
4373 };
4374
4375 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4376                                                 unsigned long addr, pte_t ptent)
4377 {
4378         struct page *page = vm_normal_page(vma, addr, ptent);
4379
4380         if (!page || !page_mapped(page))
4381                 return NULL;
4382         if (PageAnon(page)) {
4383                 /* we don't move shared anon */
4384                 if (!move_anon() || page_mapcount(page) > 2)
4385                         return NULL;
4386         } else if (!move_file())
4387                 /* we ignore mapcount for file pages */
4388                 return NULL;
4389         if (!get_page_unless_zero(page))
4390                 return NULL;
4391
4392         return page;
4393 }
4394
4395 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4396                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4397 {
4398         int usage_count;
4399         struct page *page = NULL;
4400         swp_entry_t ent = pte_to_swp_entry(ptent);
4401
4402         if (!move_anon() || non_swap_entry(ent))
4403                 return NULL;
4404         usage_count = mem_cgroup_count_swap_user(ent, &page);
4405         if (usage_count > 1) { /* we don't move shared anon */
4406                 if (page)
4407                         put_page(page);
4408                 return NULL;
4409         }
4410         if (do_swap_account)
4411                 entry->val = ent.val;
4412
4413         return page;
4414 }
4415
4416 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4417                         unsigned long addr, pte_t ptent, swp_entry_t *entry)
4418 {
4419         struct page *page = NULL;
4420         struct inode *inode;
4421         struct address_space *mapping;
4422         pgoff_t pgoff;
4423
4424         if (!vma->vm_file) /* anonymous vma */
4425                 return NULL;
4426         if (!move_file())
4427                 return NULL;
4428
4429         inode = vma->vm_file->f_path.dentry->d_inode;
4430         mapping = vma->vm_file->f_mapping;
4431         if (pte_none(ptent))
4432                 pgoff = linear_page_index(vma, addr);
4433         else /* pte_file(ptent) is true */
4434                 pgoff = pte_to_pgoff(ptent);
4435
4436         /* page is moved even if it's not RSS of this task(page-faulted). */
4437         if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4438                 page = find_get_page(mapping, pgoff);
4439         } else { /* shmem/tmpfs file. we should take account of swap too. */
4440                 swp_entry_t ent;
4441                 mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4442                 if (do_swap_account)
4443                         entry->val = ent.val;
4444         }
4445
4446         return page;
4447 }
4448
4449 static int is_target_pte_for_mc(struct vm_area_struct *vma,
4450                 unsigned long addr, pte_t ptent, union mc_target *target)
4451 {
4452         struct page *page = NULL;
4453         struct page_cgroup *pc;
4454         int ret = 0;
4455         swp_entry_t ent = { .val = 0 };
4456
4457         if (pte_present(ptent))
4458                 page = mc_handle_present_pte(vma, addr, ptent);
4459         else if (is_swap_pte(ptent))
4460                 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4461         else if (pte_none(ptent) || pte_file(ptent))
4462                 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4463
4464         if (!page && !ent.val)
4465                 return 0;
4466         if (page) {
4467                 pc = lookup_page_cgroup(page);
4468                 /*
4469                  * Do only loose check w/o page_cgroup lock.
4470                  * mem_cgroup_move_account() checks the pc is valid or not under
4471                  * the lock.
4472                  */
4473                 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4474                         ret = MC_TARGET_PAGE;
4475                         if (target)
4476                                 target->page = page;
4477                 }
4478                 if (!ret || !target)
4479                         put_page(page);
4480         }
4481         /* There is a swap entry and a page doesn't exist or isn't charged */
4482         if (ent.val && !ret &&
4483                         css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4484                 ret = MC_TARGET_SWAP;
4485                 if (target)
4486                         target->ent = ent;
4487         }
4488         return ret;
4489 }
4490
4491 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4492                                         unsigned long addr, unsigned long end,
4493                                         struct mm_walk *walk)
4494 {
4495         struct vm_area_struct *vma = walk->private;
4496         pte_t *pte;
4497         spinlock_t *ptl;
4498
4499         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4500         for (; addr != end; pte++, addr += PAGE_SIZE)
4501                 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4502                         mc.precharge++; /* increment precharge temporarily */
4503         pte_unmap_unlock(pte - 1, ptl);
4504         cond_resched();
4505
4506         return 0;
4507 }
4508
4509 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4510 {
4511         unsigned long precharge;
4512         struct vm_area_struct *vma;
4513
4514         down_read(&mm->mmap_sem);
4515         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4516                 struct mm_walk mem_cgroup_count_precharge_walk = {
4517                         .pmd_entry = mem_cgroup_count_precharge_pte_range,
4518                         .mm = mm,
4519                         .private = vma,
4520                 };
4521                 if (is_vm_hugetlb_page(vma))
4522                         continue;
4523                 walk_page_range(vma->vm_start, vma->vm_end,
4524                                         &mem_cgroup_count_precharge_walk);
4525         }
4526         up_read(&mm->mmap_sem);
4527
4528         precharge = mc.precharge;
4529         mc.precharge = 0;
4530
4531         return precharge;
4532 }
4533
4534 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4535 {
4536         return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4537 }
4538
4539 static void mem_cgroup_clear_mc(void)
4540 {
4541         struct mem_cgroup *from = mc.from;
4542         struct mem_cgroup *to = mc.to;
4543
4544         /* we must uncharge all the leftover precharges from mc.to */
4545         if (mc.precharge) {
4546                 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4547                 mc.precharge = 0;
4548         }
4549         /*
4550          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4551          * we must uncharge here.
4552          */
4553         if (mc.moved_charge) {
4554                 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4555                 mc.moved_charge = 0;
4556         }
4557         /* we must fixup refcnts and charges */
4558         if (mc.moved_swap) {
4559                 /* uncharge swap account from the old cgroup */
4560                 if (!mem_cgroup_is_root(mc.from))
4561                         res_counter_uncharge(&mc.from->memsw,
4562                                                 PAGE_SIZE * mc.moved_swap);
4563                 __mem_cgroup_put(mc.from, mc.moved_swap);
4564
4565                 if (!mem_cgroup_is_root(mc.to)) {
4566                         /*
4567                          * we charged both to->res and to->memsw, so we should
4568                          * uncharge to->res.
4569                          */
4570                         res_counter_uncharge(&mc.to->res,
4571                                                 PAGE_SIZE * mc.moved_swap);
4572                 }
4573                 /* we've already done mem_cgroup_get(mc.to) */
4574
4575                 mc.moved_swap = 0;
4576         }
4577         spin_lock(&mc.lock);
4578         mc.from = NULL;
4579         mc.to = NULL;
4580         mc.moving_task = NULL;
4581         spin_unlock(&mc.lock);
4582         mem_cgroup_end_move(from);
4583         memcg_oom_recover(from);
4584         memcg_oom_recover(to);
4585         wake_up_all(&mc.waitq);
4586 }
4587
4588 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4589                                 struct cgroup *cgroup,
4590                                 struct task_struct *p,
4591                                 bool threadgroup)
4592 {
4593         int ret = 0;
4594         struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4595
4596         if (mem->move_charge_at_immigrate) {
4597                 struct mm_struct *mm;
4598                 struct mem_cgroup *from = mem_cgroup_from_task(p);
4599
4600                 VM_BUG_ON(from == mem);
4601
4602                 mm = get_task_mm(p);
4603                 if (!mm)
4604                         return 0;
4605                 /* We move charges only when we move a owner of the mm */
4606                 if (mm->owner == p) {
4607                         VM_BUG_ON(mc.from);
4608                         VM_BUG_ON(mc.to);
4609                         VM_BUG_ON(mc.precharge);
4610                         VM_BUG_ON(mc.moved_charge);
4611                         VM_BUG_ON(mc.moved_swap);
4612                         VM_BUG_ON(mc.moving_task);
4613                         mem_cgroup_start_move(from);
4614                         spin_lock(&mc.lock);
4615                         mc.from = from;
4616                         mc.to = mem;
4617                         mc.precharge = 0;
4618                         mc.moved_charge = 0;
4619                         mc.moved_swap = 0;
4620                         mc.moving_task = current;
4621                         spin_unlock(&mc.lock);
4622
4623                         ret = mem_cgroup_precharge_mc(mm);
4624                         if (ret)
4625                                 mem_cgroup_clear_mc();
4626                 }
4627                 mmput(mm);
4628         }
4629         return ret;
4630 }
4631
4632 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4633                                 struct cgroup *cgroup,
4634                                 struct task_struct *p,
4635                                 bool threadgroup)
4636 {
4637         mem_cgroup_clear_mc();
4638 }
4639
4640 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4641                                 unsigned long addr, unsigned long end,
4642                                 struct mm_walk *walk)
4643 {
4644         int ret = 0;
4645         struct vm_area_struct *vma = walk->private;
4646         pte_t *pte;
4647         spinlock_t *ptl;
4648
4649 retry:
4650         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4651         for (; addr != end; addr += PAGE_SIZE) {
4652                 pte_t ptent = *(pte++);
4653                 union mc_target target;
4654                 int type;
4655                 struct page *page;
4656                 struct page_cgroup *pc;
4657                 swp_entry_t ent;
4658
4659                 if (!mc.precharge)
4660                         break;
4661
4662                 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4663                 switch (type) {
4664                 case MC_TARGET_PAGE:
4665                         page = target.page;
4666                         if (isolate_lru_page(page))
4667                                 goto put;
4668                         pc = lookup_page_cgroup(page);
4669                         if (!mem_cgroup_move_account(pc,
4670                                                 mc.from, mc.to, false)) {
4671                                 mc.precharge--;
4672                                 /* we uncharge from mc.from later. */
4673                                 mc.moved_charge++;
4674                         }
4675                         putback_lru_page(page);
4676 put:                    /* is_target_pte_for_mc() gets the page */
4677                         put_page(page);
4678                         break;
4679                 case MC_TARGET_SWAP:
4680                         ent = target.ent;
4681                         if (!mem_cgroup_move_swap_account(ent,
4682                                                 mc.from, mc.to, false)) {
4683                                 mc.precharge--;
4684                                 /* we fixup refcnts and charges later. */
4685                                 mc.moved_swap++;
4686                         }
4687                         break;
4688                 default:
4689                         break;
4690                 }
4691         }
4692         pte_unmap_unlock(pte - 1, ptl);
4693         cond_resched();
4694
4695         if (addr != end) {
4696                 /*
4697                  * We have consumed all precharges we got in can_attach().
4698                  * We try charge one by one, but don't do any additional
4699                  * charges to mc.to if we have failed in charge once in attach()
4700                  * phase.
4701                  */
4702                 ret = mem_cgroup_do_precharge(1);
4703                 if (!ret)
4704                         goto retry;
4705         }
4706
4707         return ret;
4708 }
4709
4710 static void mem_cgroup_move_charge(struct mm_struct *mm)
4711 {
4712         struct vm_area_struct *vma;
4713
4714         lru_add_drain_all();
4715         down_read(&mm->mmap_sem);
4716         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4717                 int ret;
4718                 struct mm_walk mem_cgroup_move_charge_walk = {
4719                         .pmd_entry = mem_cgroup_move_charge_pte_range,
4720                         .mm = mm,
4721                         .private = vma,
4722                 };
4723                 if (is_vm_hugetlb_page(vma))
4724                         continue;
4725                 ret = walk_page_range(vma->vm_start, vma->vm_end,
4726                                                 &mem_cgroup_move_charge_walk);
4727                 if (ret)
4728                         /*
4729                          * means we have consumed all precharges and failed in
4730                          * doing additional charge. Just abandon here.
4731                          */
4732                         break;
4733         }
4734         up_read(&mm->mmap_sem);
4735 }
4736
4737 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4738                                 struct cgroup *cont,
4739                                 struct cgroup *old_cont,
4740                                 struct task_struct *p,
4741                                 bool threadgroup)
4742 {
4743         struct mm_struct *mm;
4744
4745         if (!mc.to)
4746                 /* no need to move charge */
4747                 return;
4748
4749         mm = get_task_mm(p);
4750         if (mm) {
4751                 mem_cgroup_move_charge(mm);
4752                 mmput(mm);
4753         }
4754         mem_cgroup_clear_mc();
4755 }
4756 #else   /* !CONFIG_MMU */
4757 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4758                                 struct cgroup *cgroup,
4759                                 struct task_struct *p,
4760                                 bool threadgroup)
4761 {
4762         return 0;
4763 }
4764 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4765                                 struct cgroup *cgroup,
4766                                 struct task_struct *p,
4767                                 bool threadgroup)
4768 {
4769 }
4770 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4771                                 struct cgroup *cont,
4772                                 struct cgroup *old_cont,
4773                                 struct task_struct *p,
4774                                 bool threadgroup)
4775 {
4776 }
4777 #endif
4778
4779 struct cgroup_subsys mem_cgroup_subsys = {
4780         .name = "memory",
4781         .subsys_id = mem_cgroup_subsys_id,
4782         .create = mem_cgroup_create,
4783         .pre_destroy = mem_cgroup_pre_destroy,
4784         .destroy = mem_cgroup_destroy,
4785         .populate = mem_cgroup_populate,
4786         .can_attach = mem_cgroup_can_attach,
4787         .cancel_attach = mem_cgroup_cancel_attach,
4788         .attach = mem_cgroup_move_task,
4789         .early_init = 0,
4790         .use_id = 1,
4791 };
4792
4793 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4794
4795 static int __init disable_swap_account(char *s)
4796 {
4797         really_do_swap_account = 0;
4798         return 1;
4799 }
4800 __setup("noswapaccount", disable_swap_account);
4801 #endif