memcg: oom kill disable and oom status
[linux-2.6.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/mutex.h>
37 #include <linux/rbtree.h>
38 #include <linux/slab.h>
39 #include <linux/swap.h>
40 #include <linux/swapops.h>
41 #include <linux/spinlock.h>
42 #include <linux/eventfd.h>
43 #include <linux/sort.h>
44 #include <linux/fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/vmalloc.h>
47 #include <linux/mm_inline.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/cpu.h>
50 #include "internal.h"
51
52 #include <asm/uaccess.h>
53
54 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
55 #define MEM_CGROUP_RECLAIM_RETRIES      5
56 struct mem_cgroup *root_mem_cgroup __read_mostly;
57
58 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
59 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
60 int do_swap_account __read_mostly;
61 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
62 #else
63 #define do_swap_account         (0)
64 #endif
65
66 /*
67  * Per memcg event counter is incremented at every pagein/pageout. This counter
68  * is used for trigger some periodic events. This is straightforward and better
69  * than using jiffies etc. to handle periodic memcg event.
70  *
71  * These values will be used as !((event) & ((1 <<(thresh)) - 1))
72  */
73 #define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
74 #define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
75
76 /*
77  * Statistics for memory cgroup.
78  */
79 enum mem_cgroup_stat_index {
80         /*
81          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
82          */
83         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
84         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
85         MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
86         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
87         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
88         MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
89         MEM_CGROUP_EVENTS,      /* incremented at every  pagein/pageout */
90
91         MEM_CGROUP_STAT_NSTATS,
92 };
93
94 struct mem_cgroup_stat_cpu {
95         s64 count[MEM_CGROUP_STAT_NSTATS];
96 };
97
98 /*
99  * per-zone information in memory controller.
100  */
101 struct mem_cgroup_per_zone {
102         /*
103          * spin_lock to protect the per cgroup LRU
104          */
105         struct list_head        lists[NR_LRU_LISTS];
106         unsigned long           count[NR_LRU_LISTS];
107
108         struct zone_reclaim_stat reclaim_stat;
109         struct rb_node          tree_node;      /* RB tree node */
110         unsigned long long      usage_in_excess;/* Set to the value by which */
111                                                 /* the soft limit is exceeded*/
112         bool                    on_tree;
113         struct mem_cgroup       *mem;           /* Back pointer, we cannot */
114                                                 /* use container_of        */
115 };
116 /* Macro for accessing counter */
117 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
118
119 struct mem_cgroup_per_node {
120         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
121 };
122
123 struct mem_cgroup_lru_info {
124         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
125 };
126
127 /*
128  * Cgroups above their limits are maintained in a RB-Tree, independent of
129  * their hierarchy representation
130  */
131
132 struct mem_cgroup_tree_per_zone {
133         struct rb_root rb_root;
134         spinlock_t lock;
135 };
136
137 struct mem_cgroup_tree_per_node {
138         struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
139 };
140
141 struct mem_cgroup_tree {
142         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
143 };
144
145 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
146
147 struct mem_cgroup_threshold {
148         struct eventfd_ctx *eventfd;
149         u64 threshold;
150 };
151
152 /* For threshold */
153 struct mem_cgroup_threshold_ary {
154         /* An array index points to threshold just below usage. */
155         atomic_t current_threshold;
156         /* Size of entries[] */
157         unsigned int size;
158         /* Array of thresholds */
159         struct mem_cgroup_threshold entries[0];
160 };
161 /* for OOM */
162 struct mem_cgroup_eventfd_list {
163         struct list_head list;
164         struct eventfd_ctx *eventfd;
165 };
166
167 static void mem_cgroup_threshold(struct mem_cgroup *mem);
168 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
169
170 /*
171  * The memory controller data structure. The memory controller controls both
172  * page cache and RSS per cgroup. We would eventually like to provide
173  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
174  * to help the administrator determine what knobs to tune.
175  *
176  * TODO: Add a water mark for the memory controller. Reclaim will begin when
177  * we hit the water mark. May be even add a low water mark, such that
178  * no reclaim occurs from a cgroup at it's low water mark, this is
179  * a feature that will be implemented much later in the future.
180  */
181 struct mem_cgroup {
182         struct cgroup_subsys_state css;
183         /*
184          * the counter to account for memory usage
185          */
186         struct res_counter res;
187         /*
188          * the counter to account for mem+swap usage.
189          */
190         struct res_counter memsw;
191         /*
192          * Per cgroup active and inactive list, similar to the
193          * per zone LRU lists.
194          */
195         struct mem_cgroup_lru_info info;
196
197         /*
198           protect against reclaim related member.
199         */
200         spinlock_t reclaim_param_lock;
201
202         int     prev_priority;  /* for recording reclaim priority */
203
204         /*
205          * While reclaiming in a hierarchy, we cache the last child we
206          * reclaimed from.
207          */
208         int last_scanned_child;
209         /*
210          * Should the accounting and control be hierarchical, per subtree?
211          */
212         bool use_hierarchy;
213         atomic_t        oom_lock;
214         atomic_t        refcnt;
215
216         unsigned int    swappiness;
217         /* OOM-Killer disable */
218         int             oom_kill_disable;
219
220         /* set when res.limit == memsw.limit */
221         bool            memsw_is_minimum;
222
223         /* protect arrays of thresholds */
224         struct mutex thresholds_lock;
225
226         /* thresholds for memory usage. RCU-protected */
227         struct mem_cgroup_threshold_ary *thresholds;
228
229         /* thresholds for mem+swap usage. RCU-protected */
230         struct mem_cgroup_threshold_ary *memsw_thresholds;
231
232         /* For oom notifier event fd */
233         struct list_head oom_notify;
234
235         /*
236          * Should we move charges of a task when a task is moved into this
237          * mem_cgroup ? And what type of charges should we move ?
238          */
239         unsigned long   move_charge_at_immigrate;
240         /*
241          * percpu counter.
242          */
243         struct mem_cgroup_stat_cpu *stat;
244 };
245
246 /* Stuffs for move charges at task migration. */
247 /*
248  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
249  * left-shifted bitmap of these types.
250  */
251 enum move_type {
252         MOVE_CHARGE_TYPE_ANON,  /* private anonymous page and swap of it */
253         NR_MOVE_TYPE,
254 };
255
256 /* "mc" and its members are protected by cgroup_mutex */
257 static struct move_charge_struct {
258         struct mem_cgroup *from;
259         struct mem_cgroup *to;
260         unsigned long precharge;
261         unsigned long moved_charge;
262         unsigned long moved_swap;
263         struct task_struct *moving_task;        /* a task moving charges */
264         wait_queue_head_t waitq;                /* a waitq for other context */
265 } mc = {
266         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
267 };
268
269 /*
270  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
271  * limit reclaim to prevent infinite loops, if they ever occur.
272  */
273 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            (100)
274 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2)
275
276 enum charge_type {
277         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
278         MEM_CGROUP_CHARGE_TYPE_MAPPED,
279         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
280         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
281         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
282         MEM_CGROUP_CHARGE_TYPE_DROP,    /* a page was unused swap cache */
283         NR_CHARGE_TYPE,
284 };
285
286 /* only for here (for easy reading.) */
287 #define PCGF_CACHE      (1UL << PCG_CACHE)
288 #define PCGF_USED       (1UL << PCG_USED)
289 #define PCGF_LOCK       (1UL << PCG_LOCK)
290 /* Not used, but added here for completeness */
291 #define PCGF_ACCT       (1UL << PCG_ACCT)
292
293 /* for encoding cft->private value on file */
294 #define _MEM                    (0)
295 #define _MEMSWAP                (1)
296 #define _OOM_TYPE               (2)
297 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
298 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
299 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
300 /* Used for OOM nofiier */
301 #define OOM_CONTROL             (0)
302
303 /*
304  * Reclaim flags for mem_cgroup_hierarchical_reclaim
305  */
306 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT   0x0
307 #define MEM_CGROUP_RECLAIM_NOSWAP       (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
308 #define MEM_CGROUP_RECLAIM_SHRINK_BIT   0x1
309 #define MEM_CGROUP_RECLAIM_SHRINK       (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
310 #define MEM_CGROUP_RECLAIM_SOFT_BIT     0x2
311 #define MEM_CGROUP_RECLAIM_SOFT         (1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
312
313 static void mem_cgroup_get(struct mem_cgroup *mem);
314 static void mem_cgroup_put(struct mem_cgroup *mem);
315 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
316 static void drain_all_stock_async(void);
317
318 static struct mem_cgroup_per_zone *
319 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
320 {
321         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
322 }
323
324 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
325 {
326         return &mem->css;
327 }
328
329 static struct mem_cgroup_per_zone *
330 page_cgroup_zoneinfo(struct page_cgroup *pc)
331 {
332         struct mem_cgroup *mem = pc->mem_cgroup;
333         int nid = page_cgroup_nid(pc);
334         int zid = page_cgroup_zid(pc);
335
336         if (!mem)
337                 return NULL;
338
339         return mem_cgroup_zoneinfo(mem, nid, zid);
340 }
341
342 static struct mem_cgroup_tree_per_zone *
343 soft_limit_tree_node_zone(int nid, int zid)
344 {
345         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
346 }
347
348 static struct mem_cgroup_tree_per_zone *
349 soft_limit_tree_from_page(struct page *page)
350 {
351         int nid = page_to_nid(page);
352         int zid = page_zonenum(page);
353
354         return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
355 }
356
357 static void
358 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
359                                 struct mem_cgroup_per_zone *mz,
360                                 struct mem_cgroup_tree_per_zone *mctz,
361                                 unsigned long long new_usage_in_excess)
362 {
363         struct rb_node **p = &mctz->rb_root.rb_node;
364         struct rb_node *parent = NULL;
365         struct mem_cgroup_per_zone *mz_node;
366
367         if (mz->on_tree)
368                 return;
369
370         mz->usage_in_excess = new_usage_in_excess;
371         if (!mz->usage_in_excess)
372                 return;
373         while (*p) {
374                 parent = *p;
375                 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
376                                         tree_node);
377                 if (mz->usage_in_excess < mz_node->usage_in_excess)
378                         p = &(*p)->rb_left;
379                 /*
380                  * We can't avoid mem cgroups that are over their soft
381                  * limit by the same amount
382                  */
383                 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
384                         p = &(*p)->rb_right;
385         }
386         rb_link_node(&mz->tree_node, parent, p);
387         rb_insert_color(&mz->tree_node, &mctz->rb_root);
388         mz->on_tree = true;
389 }
390
391 static void
392 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
393                                 struct mem_cgroup_per_zone *mz,
394                                 struct mem_cgroup_tree_per_zone *mctz)
395 {
396         if (!mz->on_tree)
397                 return;
398         rb_erase(&mz->tree_node, &mctz->rb_root);
399         mz->on_tree = false;
400 }
401
402 static void
403 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
404                                 struct mem_cgroup_per_zone *mz,
405                                 struct mem_cgroup_tree_per_zone *mctz)
406 {
407         spin_lock(&mctz->lock);
408         __mem_cgroup_remove_exceeded(mem, mz, mctz);
409         spin_unlock(&mctz->lock);
410 }
411
412
413 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
414 {
415         unsigned long long excess;
416         struct mem_cgroup_per_zone *mz;
417         struct mem_cgroup_tree_per_zone *mctz;
418         int nid = page_to_nid(page);
419         int zid = page_zonenum(page);
420         mctz = soft_limit_tree_from_page(page);
421
422         /*
423          * Necessary to update all ancestors when hierarchy is used.
424          * because their event counter is not touched.
425          */
426         for (; mem; mem = parent_mem_cgroup(mem)) {
427                 mz = mem_cgroup_zoneinfo(mem, nid, zid);
428                 excess = res_counter_soft_limit_excess(&mem->res);
429                 /*
430                  * We have to update the tree if mz is on RB-tree or
431                  * mem is over its softlimit.
432                  */
433                 if (excess || mz->on_tree) {
434                         spin_lock(&mctz->lock);
435                         /* if on-tree, remove it */
436                         if (mz->on_tree)
437                                 __mem_cgroup_remove_exceeded(mem, mz, mctz);
438                         /*
439                          * Insert again. mz->usage_in_excess will be updated.
440                          * If excess is 0, no tree ops.
441                          */
442                         __mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
443                         spin_unlock(&mctz->lock);
444                 }
445         }
446 }
447
448 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
449 {
450         int node, zone;
451         struct mem_cgroup_per_zone *mz;
452         struct mem_cgroup_tree_per_zone *mctz;
453
454         for_each_node_state(node, N_POSSIBLE) {
455                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
456                         mz = mem_cgroup_zoneinfo(mem, node, zone);
457                         mctz = soft_limit_tree_node_zone(node, zone);
458                         mem_cgroup_remove_exceeded(mem, mz, mctz);
459                 }
460         }
461 }
462
463 static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
464 {
465         return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
466 }
467
468 static struct mem_cgroup_per_zone *
469 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
470 {
471         struct rb_node *rightmost = NULL;
472         struct mem_cgroup_per_zone *mz;
473
474 retry:
475         mz = NULL;
476         rightmost = rb_last(&mctz->rb_root);
477         if (!rightmost)
478                 goto done;              /* Nothing to reclaim from */
479
480         mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
481         /*
482          * Remove the node now but someone else can add it back,
483          * we will to add it back at the end of reclaim to its correct
484          * position in the tree.
485          */
486         __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
487         if (!res_counter_soft_limit_excess(&mz->mem->res) ||
488                 !css_tryget(&mz->mem->css))
489                 goto retry;
490 done:
491         return mz;
492 }
493
494 static struct mem_cgroup_per_zone *
495 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
496 {
497         struct mem_cgroup_per_zone *mz;
498
499         spin_lock(&mctz->lock);
500         mz = __mem_cgroup_largest_soft_limit_node(mctz);
501         spin_unlock(&mctz->lock);
502         return mz;
503 }
504
505 static s64 mem_cgroup_read_stat(struct mem_cgroup *mem,
506                 enum mem_cgroup_stat_index idx)
507 {
508         int cpu;
509         s64 val = 0;
510
511         for_each_possible_cpu(cpu)
512                 val += per_cpu(mem->stat->count[idx], cpu);
513         return val;
514 }
515
516 static s64 mem_cgroup_local_usage(struct mem_cgroup *mem)
517 {
518         s64 ret;
519
520         ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
521         ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
522         return ret;
523 }
524
525 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
526                                          bool charge)
527 {
528         int val = (charge) ? 1 : -1;
529         this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
530 }
531
532 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
533                                          struct page_cgroup *pc,
534                                          bool charge)
535 {
536         int val = (charge) ? 1 : -1;
537
538         preempt_disable();
539
540         if (PageCgroupCache(pc))
541                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
542         else
543                 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
544
545         if (charge)
546                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
547         else
548                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
549         __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
550
551         preempt_enable();
552 }
553
554 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
555                                         enum lru_list idx)
556 {
557         int nid, zid;
558         struct mem_cgroup_per_zone *mz;
559         u64 total = 0;
560
561         for_each_online_node(nid)
562                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
563                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
564                         total += MEM_CGROUP_ZSTAT(mz, idx);
565                 }
566         return total;
567 }
568
569 static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
570 {
571         s64 val;
572
573         val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
574
575         return !(val & ((1 << event_mask_shift) - 1));
576 }
577
578 /*
579  * Check events in order.
580  *
581  */
582 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
583 {
584         /* threshold event is triggered in finer grain than soft limit */
585         if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
586                 mem_cgroup_threshold(mem);
587                 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
588                         mem_cgroup_update_tree(mem, page);
589         }
590 }
591
592 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
593 {
594         return container_of(cgroup_subsys_state(cont,
595                                 mem_cgroup_subsys_id), struct mem_cgroup,
596                                 css);
597 }
598
599 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
600 {
601         /*
602          * mm_update_next_owner() may clear mm->owner to NULL
603          * if it races with swapoff, page migration, etc.
604          * So this can be called with p == NULL.
605          */
606         if (unlikely(!p))
607                 return NULL;
608
609         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
610                                 struct mem_cgroup, css);
611 }
612
613 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
614 {
615         struct mem_cgroup *mem = NULL;
616
617         if (!mm)
618                 return NULL;
619         /*
620          * Because we have no locks, mm->owner's may be being moved to other
621          * cgroup. We use css_tryget() here even if this looks
622          * pessimistic (rather than adding locks here).
623          */
624         rcu_read_lock();
625         do {
626                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
627                 if (unlikely(!mem))
628                         break;
629         } while (!css_tryget(&mem->css));
630         rcu_read_unlock();
631         return mem;
632 }
633
634 /*
635  * Call callback function against all cgroup under hierarchy tree.
636  */
637 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
638                           int (*func)(struct mem_cgroup *, void *))
639 {
640         int found, ret, nextid;
641         struct cgroup_subsys_state *css;
642         struct mem_cgroup *mem;
643
644         if (!root->use_hierarchy)
645                 return (*func)(root, data);
646
647         nextid = 1;
648         do {
649                 ret = 0;
650                 mem = NULL;
651
652                 rcu_read_lock();
653                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
654                                    &found);
655                 if (css && css_tryget(css))
656                         mem = container_of(css, struct mem_cgroup, css);
657                 rcu_read_unlock();
658
659                 if (mem) {
660                         ret = (*func)(mem, data);
661                         css_put(&mem->css);
662                 }
663                 nextid = found + 1;
664         } while (!ret && css);
665
666         return ret;
667 }
668
669 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
670 {
671         return (mem == root_mem_cgroup);
672 }
673
674 /*
675  * Following LRU functions are allowed to be used without PCG_LOCK.
676  * Operations are called by routine of global LRU independently from memcg.
677  * What we have to take care of here is validness of pc->mem_cgroup.
678  *
679  * Changes to pc->mem_cgroup happens when
680  * 1. charge
681  * 2. moving account
682  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
683  * It is added to LRU before charge.
684  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
685  * When moving account, the page is not on LRU. It's isolated.
686  */
687
688 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
689 {
690         struct page_cgroup *pc;
691         struct mem_cgroup_per_zone *mz;
692
693         if (mem_cgroup_disabled())
694                 return;
695         pc = lookup_page_cgroup(page);
696         /* can happen while we handle swapcache. */
697         if (!TestClearPageCgroupAcctLRU(pc))
698                 return;
699         VM_BUG_ON(!pc->mem_cgroup);
700         /*
701          * We don't check PCG_USED bit. It's cleared when the "page" is finally
702          * removed from global LRU.
703          */
704         mz = page_cgroup_zoneinfo(pc);
705         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
706         if (mem_cgroup_is_root(pc->mem_cgroup))
707                 return;
708         VM_BUG_ON(list_empty(&pc->lru));
709         list_del_init(&pc->lru);
710         return;
711 }
712
713 void mem_cgroup_del_lru(struct page *page)
714 {
715         mem_cgroup_del_lru_list(page, page_lru(page));
716 }
717
718 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
719 {
720         struct mem_cgroup_per_zone *mz;
721         struct page_cgroup *pc;
722
723         if (mem_cgroup_disabled())
724                 return;
725
726         pc = lookup_page_cgroup(page);
727         /*
728          * Used bit is set without atomic ops but after smp_wmb().
729          * For making pc->mem_cgroup visible, insert smp_rmb() here.
730          */
731         smp_rmb();
732         /* unused or root page is not rotated. */
733         if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
734                 return;
735         mz = page_cgroup_zoneinfo(pc);
736         list_move(&pc->lru, &mz->lists[lru]);
737 }
738
739 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
740 {
741         struct page_cgroup *pc;
742         struct mem_cgroup_per_zone *mz;
743
744         if (mem_cgroup_disabled())
745                 return;
746         pc = lookup_page_cgroup(page);
747         VM_BUG_ON(PageCgroupAcctLRU(pc));
748         /*
749          * Used bit is set without atomic ops but after smp_wmb().
750          * For making pc->mem_cgroup visible, insert smp_rmb() here.
751          */
752         smp_rmb();
753         if (!PageCgroupUsed(pc))
754                 return;
755
756         mz = page_cgroup_zoneinfo(pc);
757         MEM_CGROUP_ZSTAT(mz, lru) += 1;
758         SetPageCgroupAcctLRU(pc);
759         if (mem_cgroup_is_root(pc->mem_cgroup))
760                 return;
761         list_add(&pc->lru, &mz->lists[lru]);
762 }
763
764 /*
765  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
766  * lru because the page may.be reused after it's fully uncharged (because of
767  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
768  * it again. This function is only used to charge SwapCache. It's done under
769  * lock_page and expected that zone->lru_lock is never held.
770  */
771 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
772 {
773         unsigned long flags;
774         struct zone *zone = page_zone(page);
775         struct page_cgroup *pc = lookup_page_cgroup(page);
776
777         spin_lock_irqsave(&zone->lru_lock, flags);
778         /*
779          * Forget old LRU when this page_cgroup is *not* used. This Used bit
780          * is guarded by lock_page() because the page is SwapCache.
781          */
782         if (!PageCgroupUsed(pc))
783                 mem_cgroup_del_lru_list(page, page_lru(page));
784         spin_unlock_irqrestore(&zone->lru_lock, flags);
785 }
786
787 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
788 {
789         unsigned long flags;
790         struct zone *zone = page_zone(page);
791         struct page_cgroup *pc = lookup_page_cgroup(page);
792
793         spin_lock_irqsave(&zone->lru_lock, flags);
794         /* link when the page is linked to LRU but page_cgroup isn't */
795         if (PageLRU(page) && !PageCgroupAcctLRU(pc))
796                 mem_cgroup_add_lru_list(page, page_lru(page));
797         spin_unlock_irqrestore(&zone->lru_lock, flags);
798 }
799
800
801 void mem_cgroup_move_lists(struct page *page,
802                            enum lru_list from, enum lru_list to)
803 {
804         if (mem_cgroup_disabled())
805                 return;
806         mem_cgroup_del_lru_list(page, from);
807         mem_cgroup_add_lru_list(page, to);
808 }
809
810 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
811 {
812         int ret;
813         struct mem_cgroup *curr = NULL;
814
815         task_lock(task);
816         rcu_read_lock();
817         curr = try_get_mem_cgroup_from_mm(task->mm);
818         rcu_read_unlock();
819         task_unlock(task);
820         if (!curr)
821                 return 0;
822         /*
823          * We should check use_hierarchy of "mem" not "curr". Because checking
824          * use_hierarchy of "curr" here make this function true if hierarchy is
825          * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
826          * hierarchy(even if use_hierarchy is disabled in "mem").
827          */
828         if (mem->use_hierarchy)
829                 ret = css_is_ancestor(&curr->css, &mem->css);
830         else
831                 ret = (curr == mem);
832         css_put(&curr->css);
833         return ret;
834 }
835
836 /*
837  * prev_priority control...this will be used in memory reclaim path.
838  */
839 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
840 {
841         int prev_priority;
842
843         spin_lock(&mem->reclaim_param_lock);
844         prev_priority = mem->prev_priority;
845         spin_unlock(&mem->reclaim_param_lock);
846
847         return prev_priority;
848 }
849
850 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
851 {
852         spin_lock(&mem->reclaim_param_lock);
853         if (priority < mem->prev_priority)
854                 mem->prev_priority = priority;
855         spin_unlock(&mem->reclaim_param_lock);
856 }
857
858 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
859 {
860         spin_lock(&mem->reclaim_param_lock);
861         mem->prev_priority = priority;
862         spin_unlock(&mem->reclaim_param_lock);
863 }
864
865 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
866 {
867         unsigned long active;
868         unsigned long inactive;
869         unsigned long gb;
870         unsigned long inactive_ratio;
871
872         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
873         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
874
875         gb = (inactive + active) >> (30 - PAGE_SHIFT);
876         if (gb)
877                 inactive_ratio = int_sqrt(10 * gb);
878         else
879                 inactive_ratio = 1;
880
881         if (present_pages) {
882                 present_pages[0] = inactive;
883                 present_pages[1] = active;
884         }
885
886         return inactive_ratio;
887 }
888
889 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
890 {
891         unsigned long active;
892         unsigned long inactive;
893         unsigned long present_pages[2];
894         unsigned long inactive_ratio;
895
896         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
897
898         inactive = present_pages[0];
899         active = present_pages[1];
900
901         if (inactive * inactive_ratio < active)
902                 return 1;
903
904         return 0;
905 }
906
907 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
908 {
909         unsigned long active;
910         unsigned long inactive;
911
912         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
913         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
914
915         return (active > inactive);
916 }
917
918 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
919                                        struct zone *zone,
920                                        enum lru_list lru)
921 {
922         int nid = zone->zone_pgdat->node_id;
923         int zid = zone_idx(zone);
924         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
925
926         return MEM_CGROUP_ZSTAT(mz, lru);
927 }
928
929 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
930                                                       struct zone *zone)
931 {
932         int nid = zone->zone_pgdat->node_id;
933         int zid = zone_idx(zone);
934         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
935
936         return &mz->reclaim_stat;
937 }
938
939 struct zone_reclaim_stat *
940 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
941 {
942         struct page_cgroup *pc;
943         struct mem_cgroup_per_zone *mz;
944
945         if (mem_cgroup_disabled())
946                 return NULL;
947
948         pc = lookup_page_cgroup(page);
949         /*
950          * Used bit is set without atomic ops but after smp_wmb().
951          * For making pc->mem_cgroup visible, insert smp_rmb() here.
952          */
953         smp_rmb();
954         if (!PageCgroupUsed(pc))
955                 return NULL;
956
957         mz = page_cgroup_zoneinfo(pc);
958         if (!mz)
959                 return NULL;
960
961         return &mz->reclaim_stat;
962 }
963
964 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
965                                         struct list_head *dst,
966                                         unsigned long *scanned, int order,
967                                         int mode, struct zone *z,
968                                         struct mem_cgroup *mem_cont,
969                                         int active, int file)
970 {
971         unsigned long nr_taken = 0;
972         struct page *page;
973         unsigned long scan;
974         LIST_HEAD(pc_list);
975         struct list_head *src;
976         struct page_cgroup *pc, *tmp;
977         int nid = z->zone_pgdat->node_id;
978         int zid = zone_idx(z);
979         struct mem_cgroup_per_zone *mz;
980         int lru = LRU_FILE * file + active;
981         int ret;
982
983         BUG_ON(!mem_cont);
984         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
985         src = &mz->lists[lru];
986
987         scan = 0;
988         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
989                 if (scan >= nr_to_scan)
990                         break;
991
992                 page = pc->page;
993                 if (unlikely(!PageCgroupUsed(pc)))
994                         continue;
995                 if (unlikely(!PageLRU(page)))
996                         continue;
997
998                 scan++;
999                 ret = __isolate_lru_page(page, mode, file);
1000                 switch (ret) {
1001                 case 0:
1002                         list_move(&page->lru, dst);
1003                         mem_cgroup_del_lru(page);
1004                         nr_taken++;
1005                         break;
1006                 case -EBUSY:
1007                         /* we don't affect global LRU but rotate in our LRU */
1008                         mem_cgroup_rotate_lru_list(page, page_lru(page));
1009                         break;
1010                 default:
1011                         break;
1012                 }
1013         }
1014
1015         *scanned = scan;
1016         return nr_taken;
1017 }
1018
1019 #define mem_cgroup_from_res_counter(counter, member)    \
1020         container_of(counter, struct mem_cgroup, member)
1021
1022 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
1023 {
1024         if (do_swap_account) {
1025                 if (res_counter_check_under_limit(&mem->res) &&
1026                         res_counter_check_under_limit(&mem->memsw))
1027                         return true;
1028         } else
1029                 if (res_counter_check_under_limit(&mem->res))
1030                         return true;
1031         return false;
1032 }
1033
1034 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1035 {
1036         struct cgroup *cgrp = memcg->css.cgroup;
1037         unsigned int swappiness;
1038
1039         /* root ? */
1040         if (cgrp->parent == NULL)
1041                 return vm_swappiness;
1042
1043         spin_lock(&memcg->reclaim_param_lock);
1044         swappiness = memcg->swappiness;
1045         spin_unlock(&memcg->reclaim_param_lock);
1046
1047         return swappiness;
1048 }
1049
1050 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
1051 {
1052         int *val = data;
1053         (*val)++;
1054         return 0;
1055 }
1056
1057 /**
1058  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1059  * @memcg: The memory cgroup that went over limit
1060  * @p: Task that is going to be killed
1061  *
1062  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1063  * enabled
1064  */
1065 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1066 {
1067         struct cgroup *task_cgrp;
1068         struct cgroup *mem_cgrp;
1069         /*
1070          * Need a buffer in BSS, can't rely on allocations. The code relies
1071          * on the assumption that OOM is serialized for memory controller.
1072          * If this assumption is broken, revisit this code.
1073          */
1074         static char memcg_name[PATH_MAX];
1075         int ret;
1076
1077         if (!memcg || !p)
1078                 return;
1079
1080
1081         rcu_read_lock();
1082
1083         mem_cgrp = memcg->css.cgroup;
1084         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1085
1086         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1087         if (ret < 0) {
1088                 /*
1089                  * Unfortunately, we are unable to convert to a useful name
1090                  * But we'll still print out the usage information
1091                  */
1092                 rcu_read_unlock();
1093                 goto done;
1094         }
1095         rcu_read_unlock();
1096
1097         printk(KERN_INFO "Task in %s killed", memcg_name);
1098
1099         rcu_read_lock();
1100         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1101         if (ret < 0) {
1102                 rcu_read_unlock();
1103                 goto done;
1104         }
1105         rcu_read_unlock();
1106
1107         /*
1108          * Continues from above, so we don't need an KERN_ level
1109          */
1110         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1111 done:
1112
1113         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1114                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1115                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1116                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
1117         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1118                 "failcnt %llu\n",
1119                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1120                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1121                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1122 }
1123
1124 /*
1125  * This function returns the number of memcg under hierarchy tree. Returns
1126  * 1(self count) if no children.
1127  */
1128 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1129 {
1130         int num = 0;
1131         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
1132         return num;
1133 }
1134
1135 /*
1136  * Visit the first child (need not be the first child as per the ordering
1137  * of the cgroup list, since we track last_scanned_child) of @mem and use
1138  * that to reclaim free pages from.
1139  */
1140 static struct mem_cgroup *
1141 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1142 {
1143         struct mem_cgroup *ret = NULL;
1144         struct cgroup_subsys_state *css;
1145         int nextid, found;
1146
1147         if (!root_mem->use_hierarchy) {
1148                 css_get(&root_mem->css);
1149                 ret = root_mem;
1150         }
1151
1152         while (!ret) {
1153                 rcu_read_lock();
1154                 nextid = root_mem->last_scanned_child + 1;
1155                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1156                                    &found);
1157                 if (css && css_tryget(css))
1158                         ret = container_of(css, struct mem_cgroup, css);
1159
1160                 rcu_read_unlock();
1161                 /* Updates scanning parameter */
1162                 spin_lock(&root_mem->reclaim_param_lock);
1163                 if (!css) {
1164                         /* this means start scan from ID:1 */
1165                         root_mem->last_scanned_child = 0;
1166                 } else
1167                         root_mem->last_scanned_child = found;
1168                 spin_unlock(&root_mem->reclaim_param_lock);
1169         }
1170
1171         return ret;
1172 }
1173
1174 /*
1175  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1176  * we reclaimed from, so that we don't end up penalizing one child extensively
1177  * based on its position in the children list.
1178  *
1179  * root_mem is the original ancestor that we've been reclaim from.
1180  *
1181  * We give up and return to the caller when we visit root_mem twice.
1182  * (other groups can be removed while we're walking....)
1183  *
1184  * If shrink==true, for avoiding to free too much, this returns immedieately.
1185  */
1186 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1187                                                 struct zone *zone,
1188                                                 gfp_t gfp_mask,
1189                                                 unsigned long reclaim_options)
1190 {
1191         struct mem_cgroup *victim;
1192         int ret, total = 0;
1193         int loop = 0;
1194         bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1195         bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1196         bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1197         unsigned long excess = mem_cgroup_get_excess(root_mem);
1198
1199         /* If memsw_is_minimum==1, swap-out is of-no-use. */
1200         if (root_mem->memsw_is_minimum)
1201                 noswap = true;
1202
1203         while (1) {
1204                 victim = mem_cgroup_select_victim(root_mem);
1205                 if (victim == root_mem) {
1206                         loop++;
1207                         if (loop >= 1)
1208                                 drain_all_stock_async();
1209                         if (loop >= 2) {
1210                                 /*
1211                                  * If we have not been able to reclaim
1212                                  * anything, it might because there are
1213                                  * no reclaimable pages under this hierarchy
1214                                  */
1215                                 if (!check_soft || !total) {
1216                                         css_put(&victim->css);
1217                                         break;
1218                                 }
1219                                 /*
1220                                  * We want to do more targetted reclaim.
1221                                  * excess >> 2 is not to excessive so as to
1222                                  * reclaim too much, nor too less that we keep
1223                                  * coming back to reclaim from this cgroup
1224                                  */
1225                                 if (total >= (excess >> 2) ||
1226                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1227                                         css_put(&victim->css);
1228                                         break;
1229                                 }
1230                         }
1231                 }
1232                 if (!mem_cgroup_local_usage(victim)) {
1233                         /* this cgroup's local usage == 0 */
1234                         css_put(&victim->css);
1235                         continue;
1236                 }
1237                 /* we use swappiness of local cgroup */
1238                 if (check_soft)
1239                         ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1240                                 noswap, get_swappiness(victim), zone,
1241                                 zone->zone_pgdat->node_id);
1242                 else
1243                         ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1244                                                 noswap, get_swappiness(victim));
1245                 css_put(&victim->css);
1246                 /*
1247                  * At shrinking usage, we can't check we should stop here or
1248                  * reclaim more. It's depends on callers. last_scanned_child
1249                  * will work enough for keeping fairness under tree.
1250                  */
1251                 if (shrink)
1252                         return ret;
1253                 total += ret;
1254                 if (check_soft) {
1255                         if (res_counter_check_under_soft_limit(&root_mem->res))
1256                                 return total;
1257                 } else if (mem_cgroup_check_under_limit(root_mem))
1258                         return 1 + total;
1259         }
1260         return total;
1261 }
1262
1263 static int mem_cgroup_oom_lock_cb(struct mem_cgroup *mem, void *data)
1264 {
1265         int *val = (int *)data;
1266         int x;
1267         /*
1268          * Logically, we can stop scanning immediately when we find
1269          * a memcg is already locked. But condidering unlock ops and
1270          * creation/removal of memcg, scan-all is simple operation.
1271          */
1272         x = atomic_inc_return(&mem->oom_lock);
1273         *val = max(x, *val);
1274         return 0;
1275 }
1276 /*
1277  * Check OOM-Killer is already running under our hierarchy.
1278  * If someone is running, return false.
1279  */
1280 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1281 {
1282         int lock_count = 0;
1283
1284         mem_cgroup_walk_tree(mem, &lock_count, mem_cgroup_oom_lock_cb);
1285
1286         if (lock_count == 1)
1287                 return true;
1288         return false;
1289 }
1290
1291 static int mem_cgroup_oom_unlock_cb(struct mem_cgroup *mem, void *data)
1292 {
1293         /*
1294          * When a new child is created while the hierarchy is under oom,
1295          * mem_cgroup_oom_lock() may not be called. We have to use
1296          * atomic_add_unless() here.
1297          */
1298         atomic_add_unless(&mem->oom_lock, -1, 0);
1299         return 0;
1300 }
1301
1302 static void mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1303 {
1304         mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_unlock_cb);
1305 }
1306
1307 static DEFINE_MUTEX(memcg_oom_mutex);
1308 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1309
1310 struct oom_wait_info {
1311         struct mem_cgroup *mem;
1312         wait_queue_t    wait;
1313 };
1314
1315 static int memcg_oom_wake_function(wait_queue_t *wait,
1316         unsigned mode, int sync, void *arg)
1317 {
1318         struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1319         struct oom_wait_info *oom_wait_info;
1320
1321         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1322
1323         if (oom_wait_info->mem == wake_mem)
1324                 goto wakeup;
1325         /* if no hierarchy, no match */
1326         if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1327                 return 0;
1328         /*
1329          * Both of oom_wait_info->mem and wake_mem are stable under us.
1330          * Then we can use css_is_ancestor without taking care of RCU.
1331          */
1332         if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1333             !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1334                 return 0;
1335
1336 wakeup:
1337         return autoremove_wake_function(wait, mode, sync, arg);
1338 }
1339
1340 static void memcg_wakeup_oom(struct mem_cgroup *mem)
1341 {
1342         /* for filtering, pass "mem" as argument. */
1343         __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1344 }
1345
1346 static void memcg_oom_recover(struct mem_cgroup *mem)
1347 {
1348         if (mem->oom_kill_disable && atomic_read(&mem->oom_lock))
1349                 memcg_wakeup_oom(mem);
1350 }
1351
1352 /*
1353  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1354  */
1355 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1356 {
1357         struct oom_wait_info owait;
1358         bool locked, need_to_kill;
1359
1360         owait.mem = mem;
1361         owait.wait.flags = 0;
1362         owait.wait.func = memcg_oom_wake_function;
1363         owait.wait.private = current;
1364         INIT_LIST_HEAD(&owait.wait.task_list);
1365         need_to_kill = true;
1366         /* At first, try to OOM lock hierarchy under mem.*/
1367         mutex_lock(&memcg_oom_mutex);
1368         locked = mem_cgroup_oom_lock(mem);
1369         /*
1370          * Even if signal_pending(), we can't quit charge() loop without
1371          * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1372          * under OOM is always welcomed, use TASK_KILLABLE here.
1373          */
1374         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1375         if (!locked || mem->oom_kill_disable)
1376                 need_to_kill = false;
1377         if (locked)
1378                 mem_cgroup_oom_notify(mem);
1379         mutex_unlock(&memcg_oom_mutex);
1380
1381         if (need_to_kill) {
1382                 finish_wait(&memcg_oom_waitq, &owait.wait);
1383                 mem_cgroup_out_of_memory(mem, mask);
1384         } else {
1385                 schedule();
1386                 finish_wait(&memcg_oom_waitq, &owait.wait);
1387         }
1388         mutex_lock(&memcg_oom_mutex);
1389         mem_cgroup_oom_unlock(mem);
1390         memcg_wakeup_oom(mem);
1391         mutex_unlock(&memcg_oom_mutex);
1392
1393         if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1394                 return false;
1395         /* Give chance to dying process */
1396         schedule_timeout(1);
1397         return true;
1398 }
1399
1400 /*
1401  * Currently used to update mapped file statistics, but the routine can be
1402  * generalized to update other statistics as well.
1403  */
1404 void mem_cgroup_update_file_mapped(struct page *page, int val)
1405 {
1406         struct mem_cgroup *mem;
1407         struct page_cgroup *pc;
1408
1409         pc = lookup_page_cgroup(page);
1410         if (unlikely(!pc))
1411                 return;
1412
1413         lock_page_cgroup(pc);
1414         mem = pc->mem_cgroup;
1415         if (!mem || !PageCgroupUsed(pc))
1416                 goto done;
1417
1418         /*
1419          * Preemption is already disabled. We can use __this_cpu_xxx
1420          */
1421         if (val > 0) {
1422                 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1423                 SetPageCgroupFileMapped(pc);
1424         } else {
1425                 __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1426                 ClearPageCgroupFileMapped(pc);
1427         }
1428
1429 done:
1430         unlock_page_cgroup(pc);
1431 }
1432
1433 /*
1434  * size of first charge trial. "32" comes from vmscan.c's magic value.
1435  * TODO: maybe necessary to use big numbers in big irons.
1436  */
1437 #define CHARGE_SIZE     (32 * PAGE_SIZE)
1438 struct memcg_stock_pcp {
1439         struct mem_cgroup *cached; /* this never be root cgroup */
1440         int charge;
1441         struct work_struct work;
1442 };
1443 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1444 static atomic_t memcg_drain_count;
1445
1446 /*
1447  * Try to consume stocked charge on this cpu. If success, PAGE_SIZE is consumed
1448  * from local stock and true is returned. If the stock is 0 or charges from a
1449  * cgroup which is not current target, returns false. This stock will be
1450  * refilled.
1451  */
1452 static bool consume_stock(struct mem_cgroup *mem)
1453 {
1454         struct memcg_stock_pcp *stock;
1455         bool ret = true;
1456
1457         stock = &get_cpu_var(memcg_stock);
1458         if (mem == stock->cached && stock->charge)
1459                 stock->charge -= PAGE_SIZE;
1460         else /* need to call res_counter_charge */
1461                 ret = false;
1462         put_cpu_var(memcg_stock);
1463         return ret;
1464 }
1465
1466 /*
1467  * Returns stocks cached in percpu to res_counter and reset cached information.
1468  */
1469 static void drain_stock(struct memcg_stock_pcp *stock)
1470 {
1471         struct mem_cgroup *old = stock->cached;
1472
1473         if (stock->charge) {
1474                 res_counter_uncharge(&old->res, stock->charge);
1475                 if (do_swap_account)
1476                         res_counter_uncharge(&old->memsw, stock->charge);
1477         }
1478         stock->cached = NULL;
1479         stock->charge = 0;
1480 }
1481
1482 /*
1483  * This must be called under preempt disabled or must be called by
1484  * a thread which is pinned to local cpu.
1485  */
1486 static void drain_local_stock(struct work_struct *dummy)
1487 {
1488         struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1489         drain_stock(stock);
1490 }
1491
1492 /*
1493  * Cache charges(val) which is from res_counter, to local per_cpu area.
1494  * This will be consumed by consume_stock() function, later.
1495  */
1496 static void refill_stock(struct mem_cgroup *mem, int val)
1497 {
1498         struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1499
1500         if (stock->cached != mem) { /* reset if necessary */
1501                 drain_stock(stock);
1502                 stock->cached = mem;
1503         }
1504         stock->charge += val;
1505         put_cpu_var(memcg_stock);
1506 }
1507
1508 /*
1509  * Tries to drain stocked charges in other cpus. This function is asynchronous
1510  * and just put a work per cpu for draining localy on each cpu. Caller can
1511  * expects some charges will be back to res_counter later but cannot wait for
1512  * it.
1513  */
1514 static void drain_all_stock_async(void)
1515 {
1516         int cpu;
1517         /* This function is for scheduling "drain" in asynchronous way.
1518          * The result of "drain" is not directly handled by callers. Then,
1519          * if someone is calling drain, we don't have to call drain more.
1520          * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1521          * there is a race. We just do loose check here.
1522          */
1523         if (atomic_read(&memcg_drain_count))
1524                 return;
1525         /* Notify other cpus that system-wide "drain" is running */
1526         atomic_inc(&memcg_drain_count);
1527         get_online_cpus();
1528         for_each_online_cpu(cpu) {
1529                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1530                 schedule_work_on(cpu, &stock->work);
1531         }
1532         put_online_cpus();
1533         atomic_dec(&memcg_drain_count);
1534         /* We don't wait for flush_work */
1535 }
1536
1537 /* This is a synchronous drain interface. */
1538 static void drain_all_stock_sync(void)
1539 {
1540         /* called when force_empty is called */
1541         atomic_inc(&memcg_drain_count);
1542         schedule_on_each_cpu(drain_local_stock);
1543         atomic_dec(&memcg_drain_count);
1544 }
1545
1546 static int __cpuinit memcg_stock_cpu_callback(struct notifier_block *nb,
1547                                         unsigned long action,
1548                                         void *hcpu)
1549 {
1550         int cpu = (unsigned long)hcpu;
1551         struct memcg_stock_pcp *stock;
1552
1553         if (action != CPU_DEAD)
1554                 return NOTIFY_OK;
1555         stock = &per_cpu(memcg_stock, cpu);
1556         drain_stock(stock);
1557         return NOTIFY_OK;
1558 }
1559
1560 /*
1561  * Unlike exported interface, "oom" parameter is added. if oom==true,
1562  * oom-killer can be invoked.
1563  */
1564 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1565                         gfp_t gfp_mask, struct mem_cgroup **memcg, bool oom)
1566 {
1567         struct mem_cgroup *mem, *mem_over_limit;
1568         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1569         struct res_counter *fail_res;
1570         int csize = CHARGE_SIZE;
1571
1572         /*
1573          * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1574          * in system level. So, allow to go ahead dying process in addition to
1575          * MEMDIE process.
1576          */
1577         if (unlikely(test_thread_flag(TIF_MEMDIE)
1578                      || fatal_signal_pending(current)))
1579                 goto bypass;
1580
1581         /*
1582          * We always charge the cgroup the mm_struct belongs to.
1583          * The mm_struct's mem_cgroup changes on task migration if the
1584          * thread group leader migrates. It's possible that mm is not
1585          * set, if so charge the init_mm (happens for pagecache usage).
1586          */
1587         mem = *memcg;
1588         if (likely(!mem)) {
1589                 mem = try_get_mem_cgroup_from_mm(mm);
1590                 *memcg = mem;
1591         } else {
1592                 css_get(&mem->css);
1593         }
1594         if (unlikely(!mem))
1595                 return 0;
1596
1597         VM_BUG_ON(css_is_removed(&mem->css));
1598         if (mem_cgroup_is_root(mem))
1599                 goto done;
1600
1601         while (1) {
1602                 int ret = 0;
1603                 unsigned long flags = 0;
1604
1605                 if (consume_stock(mem))
1606                         goto done;
1607
1608                 ret = res_counter_charge(&mem->res, csize, &fail_res);
1609                 if (likely(!ret)) {
1610                         if (!do_swap_account)
1611                                 break;
1612                         ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1613                         if (likely(!ret))
1614                                 break;
1615                         /* mem+swap counter fails */
1616                         res_counter_uncharge(&mem->res, csize);
1617                         flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1618                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1619                                                                         memsw);
1620                 } else
1621                         /* mem counter fails */
1622                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
1623                                                                         res);
1624
1625                 /* reduce request size and retry */
1626                 if (csize > PAGE_SIZE) {
1627                         csize = PAGE_SIZE;
1628                         continue;
1629                 }
1630                 if (!(gfp_mask & __GFP_WAIT))
1631                         goto nomem;
1632
1633                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1634                                                 gfp_mask, flags);
1635                 if (ret)
1636                         continue;
1637
1638                 /*
1639                  * try_to_free_mem_cgroup_pages() might not give us a full
1640                  * picture of reclaim. Some pages are reclaimed and might be
1641                  * moved to swap cache or just unmapped from the cgroup.
1642                  * Check the limit again to see if the reclaim reduced the
1643                  * current usage of the cgroup before giving up
1644                  *
1645                  */
1646                 if (mem_cgroup_check_under_limit(mem_over_limit))
1647                         continue;
1648
1649                 /* try to avoid oom while someone is moving charge */
1650                 if (mc.moving_task && current != mc.moving_task) {
1651                         struct mem_cgroup *from, *to;
1652                         bool do_continue = false;
1653                         /*
1654                          * There is a small race that "from" or "to" can be
1655                          * freed by rmdir, so we use css_tryget().
1656                          */
1657                         from = mc.from;
1658                         to = mc.to;
1659                         if (from && css_tryget(&from->css)) {
1660                                 if (mem_over_limit->use_hierarchy)
1661                                         do_continue = css_is_ancestor(
1662                                                         &from->css,
1663                                                         &mem_over_limit->css);
1664                                 else
1665                                         do_continue = (from == mem_over_limit);
1666                                 css_put(&from->css);
1667                         }
1668                         if (!do_continue && to && css_tryget(&to->css)) {
1669                                 if (mem_over_limit->use_hierarchy)
1670                                         do_continue = css_is_ancestor(
1671                                                         &to->css,
1672                                                         &mem_over_limit->css);
1673                                 else
1674                                         do_continue = (to == mem_over_limit);
1675                                 css_put(&to->css);
1676                         }
1677                         if (do_continue) {
1678                                 DEFINE_WAIT(wait);
1679                                 prepare_to_wait(&mc.waitq, &wait,
1680                                                         TASK_INTERRUPTIBLE);
1681                                 /* moving charge context might have finished. */
1682                                 if (mc.moving_task)
1683                                         schedule();
1684                                 finish_wait(&mc.waitq, &wait);
1685                                 continue;
1686                         }
1687                 }
1688
1689                 if (!nr_retries--) {
1690                         if (!oom)
1691                                 goto nomem;
1692                         if (mem_cgroup_handle_oom(mem_over_limit, gfp_mask)) {
1693                                 nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1694                                 continue;
1695                         }
1696                         /* When we reach here, current task is dying .*/
1697                         css_put(&mem->css);
1698                         goto bypass;
1699                 }
1700         }
1701         if (csize > PAGE_SIZE)
1702                 refill_stock(mem, csize - PAGE_SIZE);
1703 done:
1704         return 0;
1705 nomem:
1706         css_put(&mem->css);
1707         return -ENOMEM;
1708 bypass:
1709         *memcg = NULL;
1710         return 0;
1711 }
1712
1713 /*
1714  * Somemtimes we have to undo a charge we got by try_charge().
1715  * This function is for that and do uncharge, put css's refcnt.
1716  * gotten by try_charge().
1717  */
1718 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
1719                                                         unsigned long count)
1720 {
1721         if (!mem_cgroup_is_root(mem)) {
1722                 res_counter_uncharge(&mem->res, PAGE_SIZE * count);
1723                 if (do_swap_account)
1724                         res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
1725                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
1726                 WARN_ON_ONCE(count > INT_MAX);
1727                 __css_put(&mem->css, (int)count);
1728         }
1729         /* we don't need css_put for root */
1730 }
1731
1732 static void mem_cgroup_cancel_charge(struct mem_cgroup *mem)
1733 {
1734         __mem_cgroup_cancel_charge(mem, 1);
1735 }
1736
1737 /*
1738  * A helper function to get mem_cgroup from ID. must be called under
1739  * rcu_read_lock(). The caller must check css_is_removed() or some if
1740  * it's concern. (dropping refcnt from swap can be called against removed
1741  * memcg.)
1742  */
1743 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1744 {
1745         struct cgroup_subsys_state *css;
1746
1747         /* ID 0 is unused ID */
1748         if (!id)
1749                 return NULL;
1750         css = css_lookup(&mem_cgroup_subsys, id);
1751         if (!css)
1752                 return NULL;
1753         return container_of(css, struct mem_cgroup, css);
1754 }
1755
1756 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
1757 {
1758         struct mem_cgroup *mem = NULL;
1759         struct page_cgroup *pc;
1760         unsigned short id;
1761         swp_entry_t ent;
1762
1763         VM_BUG_ON(!PageLocked(page));
1764
1765         pc = lookup_page_cgroup(page);
1766         lock_page_cgroup(pc);
1767         if (PageCgroupUsed(pc)) {
1768                 mem = pc->mem_cgroup;
1769                 if (mem && !css_tryget(&mem->css))
1770                         mem = NULL;
1771         } else if (PageSwapCache(page)) {
1772                 ent.val = page_private(page);
1773                 id = lookup_swap_cgroup(ent);
1774                 rcu_read_lock();
1775                 mem = mem_cgroup_lookup(id);
1776                 if (mem && !css_tryget(&mem->css))
1777                         mem = NULL;
1778                 rcu_read_unlock();
1779         }
1780         unlock_page_cgroup(pc);
1781         return mem;
1782 }
1783
1784 /*
1785  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1786  * USED state. If already USED, uncharge and return.
1787  */
1788
1789 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1790                                      struct page_cgroup *pc,
1791                                      enum charge_type ctype)
1792 {
1793         /* try_charge() can return NULL to *memcg, taking care of it. */
1794         if (!mem)
1795                 return;
1796
1797         lock_page_cgroup(pc);
1798         if (unlikely(PageCgroupUsed(pc))) {
1799                 unlock_page_cgroup(pc);
1800                 mem_cgroup_cancel_charge(mem);
1801                 return;
1802         }
1803
1804         pc->mem_cgroup = mem;
1805         /*
1806          * We access a page_cgroup asynchronously without lock_page_cgroup().
1807          * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
1808          * is accessed after testing USED bit. To make pc->mem_cgroup visible
1809          * before USED bit, we need memory barrier here.
1810          * See mem_cgroup_add_lru_list(), etc.
1811          */
1812         smp_wmb();
1813         switch (ctype) {
1814         case MEM_CGROUP_CHARGE_TYPE_CACHE:
1815         case MEM_CGROUP_CHARGE_TYPE_SHMEM:
1816                 SetPageCgroupCache(pc);
1817                 SetPageCgroupUsed(pc);
1818                 break;
1819         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1820                 ClearPageCgroupCache(pc);
1821                 SetPageCgroupUsed(pc);
1822                 break;
1823         default:
1824                 break;
1825         }
1826
1827         mem_cgroup_charge_statistics(mem, pc, true);
1828
1829         unlock_page_cgroup(pc);
1830         /*
1831          * "charge_statistics" updated event counter. Then, check it.
1832          * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1833          * if they exceeds softlimit.
1834          */
1835         memcg_check_events(mem, pc->page);
1836 }
1837
1838 /**
1839  * __mem_cgroup_move_account - move account of the page
1840  * @pc: page_cgroup of the page.
1841  * @from: mem_cgroup which the page is moved from.
1842  * @to: mem_cgroup which the page is moved to. @from != @to.
1843  * @uncharge: whether we should call uncharge and css_put against @from.
1844  *
1845  * The caller must confirm following.
1846  * - page is not on LRU (isolate_page() is useful.)
1847  * - the pc is locked, used, and ->mem_cgroup points to @from.
1848  *
1849  * This function doesn't do "charge" nor css_get to new cgroup. It should be
1850  * done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
1851  * true, this function does "uncharge" from old cgroup, but it doesn't if
1852  * @uncharge is false, so a caller should do "uncharge".
1853  */
1854
1855 static void __mem_cgroup_move_account(struct page_cgroup *pc,
1856         struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1857 {
1858         VM_BUG_ON(from == to);
1859         VM_BUG_ON(PageLRU(pc->page));
1860         VM_BUG_ON(!PageCgroupLocked(pc));
1861         VM_BUG_ON(!PageCgroupUsed(pc));
1862         VM_BUG_ON(pc->mem_cgroup != from);
1863
1864         if (PageCgroupFileMapped(pc)) {
1865                 /* Update mapped_file data for mem_cgroup */
1866                 preempt_disable();
1867                 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1868                 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
1869                 preempt_enable();
1870         }
1871         mem_cgroup_charge_statistics(from, pc, false);
1872         if (uncharge)
1873                 /* This is not "cancel", but cancel_charge does all we need. */
1874                 mem_cgroup_cancel_charge(from);
1875
1876         /* caller should have done css_get */
1877         pc->mem_cgroup = to;
1878         mem_cgroup_charge_statistics(to, pc, true);
1879         /*
1880          * We charges against "to" which may not have any tasks. Then, "to"
1881          * can be under rmdir(). But in current implementation, caller of
1882          * this function is just force_empty() and move charge, so it's
1883          * garanteed that "to" is never removed. So, we don't check rmdir
1884          * status here.
1885          */
1886 }
1887
1888 /*
1889  * check whether the @pc is valid for moving account and call
1890  * __mem_cgroup_move_account()
1891  */
1892 static int mem_cgroup_move_account(struct page_cgroup *pc,
1893                 struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
1894 {
1895         int ret = -EINVAL;
1896         lock_page_cgroup(pc);
1897         if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
1898                 __mem_cgroup_move_account(pc, from, to, uncharge);
1899                 ret = 0;
1900         }
1901         unlock_page_cgroup(pc);
1902         /*
1903          * check events
1904          */
1905         memcg_check_events(to, pc->page);
1906         memcg_check_events(from, pc->page);
1907         return ret;
1908 }
1909
1910 /*
1911  * move charges to its parent.
1912  */
1913
1914 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1915                                   struct mem_cgroup *child,
1916                                   gfp_t gfp_mask)
1917 {
1918         struct page *page = pc->page;
1919         struct cgroup *cg = child->css.cgroup;
1920         struct cgroup *pcg = cg->parent;
1921         struct mem_cgroup *parent;
1922         int ret;
1923
1924         /* Is ROOT ? */
1925         if (!pcg)
1926                 return -EINVAL;
1927
1928         ret = -EBUSY;
1929         if (!get_page_unless_zero(page))
1930                 goto out;
1931         if (isolate_lru_page(page))
1932                 goto put;
1933
1934         parent = mem_cgroup_from_cont(pcg);
1935         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1936         if (ret || !parent)
1937                 goto put_back;
1938
1939         ret = mem_cgroup_move_account(pc, child, parent, true);
1940         if (ret)
1941                 mem_cgroup_cancel_charge(parent);
1942 put_back:
1943         putback_lru_page(page);
1944 put:
1945         put_page(page);
1946 out:
1947         return ret;
1948 }
1949
1950 /*
1951  * Charge the memory controller for page usage.
1952  * Return
1953  * 0 if the charge was successful
1954  * < 0 if the cgroup is over its limit
1955  */
1956 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1957                                 gfp_t gfp_mask, enum charge_type ctype,
1958                                 struct mem_cgroup *memcg)
1959 {
1960         struct mem_cgroup *mem;
1961         struct page_cgroup *pc;
1962         int ret;
1963
1964         pc = lookup_page_cgroup(page);
1965         /* can happen at boot */
1966         if (unlikely(!pc))
1967                 return 0;
1968         prefetchw(pc);
1969
1970         mem = memcg;
1971         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1972         if (ret || !mem)
1973                 return ret;
1974
1975         __mem_cgroup_commit_charge(mem, pc, ctype);
1976         return 0;
1977 }
1978
1979 int mem_cgroup_newpage_charge(struct page *page,
1980                               struct mm_struct *mm, gfp_t gfp_mask)
1981 {
1982         if (mem_cgroup_disabled())
1983                 return 0;
1984         if (PageCompound(page))
1985                 return 0;
1986         /*
1987          * If already mapped, we don't have to account.
1988          * If page cache, page->mapping has address_space.
1989          * But page->mapping may have out-of-use anon_vma pointer,
1990          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1991          * is NULL.
1992          */
1993         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1994                 return 0;
1995         if (unlikely(!mm))
1996                 mm = &init_mm;
1997         return mem_cgroup_charge_common(page, mm, gfp_mask,
1998                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1999 }
2000
2001 static void
2002 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2003                                         enum charge_type ctype);
2004
2005 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2006                                 gfp_t gfp_mask)
2007 {
2008         struct mem_cgroup *mem = NULL;
2009         int ret;
2010
2011         if (mem_cgroup_disabled())
2012                 return 0;
2013         if (PageCompound(page))
2014                 return 0;
2015         /*
2016          * Corner case handling. This is called from add_to_page_cache()
2017          * in usual. But some FS (shmem) precharges this page before calling it
2018          * and call add_to_page_cache() with GFP_NOWAIT.
2019          *
2020          * For GFP_NOWAIT case, the page may be pre-charged before calling
2021          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2022          * charge twice. (It works but has to pay a bit larger cost.)
2023          * And when the page is SwapCache, it should take swap information
2024          * into account. This is under lock_page() now.
2025          */
2026         if (!(gfp_mask & __GFP_WAIT)) {
2027                 struct page_cgroup *pc;
2028
2029
2030                 pc = lookup_page_cgroup(page);
2031                 if (!pc)
2032                         return 0;
2033                 lock_page_cgroup(pc);
2034                 if (PageCgroupUsed(pc)) {
2035                         unlock_page_cgroup(pc);
2036                         return 0;
2037                 }
2038                 unlock_page_cgroup(pc);
2039         }
2040
2041         if (unlikely(!mm && !mem))
2042                 mm = &init_mm;
2043
2044         if (page_is_file_cache(page))
2045                 return mem_cgroup_charge_common(page, mm, gfp_mask,
2046                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
2047
2048         /* shmem */
2049         if (PageSwapCache(page)) {
2050                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2051                 if (!ret)
2052                         __mem_cgroup_commit_charge_swapin(page, mem,
2053                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
2054         } else
2055                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2056                                         MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
2057
2058         return ret;
2059 }
2060
2061 /*
2062  * While swap-in, try_charge -> commit or cancel, the page is locked.
2063  * And when try_charge() successfully returns, one refcnt to memcg without
2064  * struct page_cgroup is acquired. This refcnt will be consumed by
2065  * "commit()" or removed by "cancel()"
2066  */
2067 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2068                                  struct page *page,
2069                                  gfp_t mask, struct mem_cgroup **ptr)
2070 {
2071         struct mem_cgroup *mem;
2072         int ret;
2073
2074         if (mem_cgroup_disabled())
2075                 return 0;
2076
2077         if (!do_swap_account)
2078                 goto charge_cur_mm;
2079         /*
2080          * A racing thread's fault, or swapoff, may have already updated
2081          * the pte, and even removed page from swap cache: in those cases
2082          * do_swap_page()'s pte_same() test will fail; but there's also a
2083          * KSM case which does need to charge the page.
2084          */
2085         if (!PageSwapCache(page))
2086                 goto charge_cur_mm;
2087         mem = try_get_mem_cgroup_from_page(page);
2088         if (!mem)
2089                 goto charge_cur_mm;
2090         *ptr = mem;
2091         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
2092         /* drop extra refcnt from tryget */
2093         css_put(&mem->css);
2094         return ret;
2095 charge_cur_mm:
2096         if (unlikely(!mm))
2097                 mm = &init_mm;
2098         return __mem_cgroup_try_charge(mm, mask, ptr, true);
2099 }
2100
2101 static void
2102 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2103                                         enum charge_type ctype)
2104 {
2105         struct page_cgroup *pc;
2106
2107         if (mem_cgroup_disabled())
2108                 return;
2109         if (!ptr)
2110                 return;
2111         cgroup_exclude_rmdir(&ptr->css);
2112         pc = lookup_page_cgroup(page);
2113         mem_cgroup_lru_del_before_commit_swapcache(page);
2114         __mem_cgroup_commit_charge(ptr, pc, ctype);
2115         mem_cgroup_lru_add_after_commit_swapcache(page);
2116         /*
2117          * Now swap is on-memory. This means this page may be
2118          * counted both as mem and swap....double count.
2119          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2120          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2121          * may call delete_from_swap_cache() before reach here.
2122          */
2123         if (do_swap_account && PageSwapCache(page)) {
2124                 swp_entry_t ent = {.val = page_private(page)};
2125                 unsigned short id;
2126                 struct mem_cgroup *memcg;
2127
2128                 id = swap_cgroup_record(ent, 0);
2129                 rcu_read_lock();
2130                 memcg = mem_cgroup_lookup(id);
2131                 if (memcg) {
2132                         /*
2133                          * This recorded memcg can be obsolete one. So, avoid
2134                          * calling css_tryget
2135                          */
2136                         if (!mem_cgroup_is_root(memcg))
2137                                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2138                         mem_cgroup_swap_statistics(memcg, false);
2139                         mem_cgroup_put(memcg);
2140                 }
2141                 rcu_read_unlock();
2142         }
2143         /*
2144          * At swapin, we may charge account against cgroup which has no tasks.
2145          * So, rmdir()->pre_destroy() can be called while we do this charge.
2146          * In that case, we need to call pre_destroy() again. check it here.
2147          */
2148         cgroup_release_and_wakeup_rmdir(&ptr->css);
2149 }
2150
2151 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2152 {
2153         __mem_cgroup_commit_charge_swapin(page, ptr,
2154                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
2155 }
2156
2157 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2158 {
2159         if (mem_cgroup_disabled())
2160                 return;
2161         if (!mem)
2162                 return;
2163         mem_cgroup_cancel_charge(mem);
2164 }
2165
2166 static void
2167 __do_uncharge(struct mem_cgroup *mem, const enum charge_type ctype)
2168 {
2169         struct memcg_batch_info *batch = NULL;
2170         bool uncharge_memsw = true;
2171         /* If swapout, usage of swap doesn't decrease */
2172         if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2173                 uncharge_memsw = false;
2174
2175         batch = &current->memcg_batch;
2176         /*
2177          * In usual, we do css_get() when we remember memcg pointer.
2178          * But in this case, we keep res->usage until end of a series of
2179          * uncharges. Then, it's ok to ignore memcg's refcnt.
2180          */
2181         if (!batch->memcg)
2182                 batch->memcg = mem;
2183         /*
2184          * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2185          * In those cases, all pages freed continously can be expected to be in
2186          * the same cgroup and we have chance to coalesce uncharges.
2187          * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2188          * because we want to do uncharge as soon as possible.
2189          */
2190
2191         if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2192                 goto direct_uncharge;
2193
2194         /*
2195          * In typical case, batch->memcg == mem. This means we can
2196          * merge a series of uncharges to an uncharge of res_counter.
2197          * If not, we uncharge res_counter ony by one.
2198          */
2199         if (batch->memcg != mem)
2200                 goto direct_uncharge;
2201         /* remember freed charge and uncharge it later */
2202         batch->bytes += PAGE_SIZE;
2203         if (uncharge_memsw)
2204                 batch->memsw_bytes += PAGE_SIZE;
2205         return;
2206 direct_uncharge:
2207         res_counter_uncharge(&mem->res, PAGE_SIZE);
2208         if (uncharge_memsw)
2209                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
2210         if (unlikely(batch->memcg != mem))
2211                 memcg_oom_recover(mem);
2212         return;
2213 }
2214
2215 /*
2216  * uncharge if !page_mapped(page)
2217  */
2218 static struct mem_cgroup *
2219 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2220 {
2221         struct page_cgroup *pc;
2222         struct mem_cgroup *mem = NULL;
2223         struct mem_cgroup_per_zone *mz;
2224
2225         if (mem_cgroup_disabled())
2226                 return NULL;
2227
2228         if (PageSwapCache(page))
2229                 return NULL;
2230
2231         /*
2232          * Check if our page_cgroup is valid
2233          */
2234         pc = lookup_page_cgroup(page);
2235         if (unlikely(!pc || !PageCgroupUsed(pc)))
2236                 return NULL;
2237
2238         lock_page_cgroup(pc);
2239
2240         mem = pc->mem_cgroup;
2241
2242         if (!PageCgroupUsed(pc))
2243                 goto unlock_out;
2244
2245         switch (ctype) {
2246         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2247         case MEM_CGROUP_CHARGE_TYPE_DROP:
2248                 if (page_mapped(page))
2249                         goto unlock_out;
2250                 break;
2251         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2252                 if (!PageAnon(page)) {  /* Shared memory */
2253                         if (page->mapping && !page_is_file_cache(page))
2254                                 goto unlock_out;
2255                 } else if (page_mapped(page)) /* Anon */
2256                                 goto unlock_out;
2257                 break;
2258         default:
2259                 break;
2260         }
2261
2262         if (!mem_cgroup_is_root(mem))
2263                 __do_uncharge(mem, ctype);
2264         if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2265                 mem_cgroup_swap_statistics(mem, true);
2266         mem_cgroup_charge_statistics(mem, pc, false);
2267
2268         ClearPageCgroupUsed(pc);
2269         /*
2270          * pc->mem_cgroup is not cleared here. It will be accessed when it's
2271          * freed from LRU. This is safe because uncharged page is expected not
2272          * to be reused (freed soon). Exception is SwapCache, it's handled by
2273          * special functions.
2274          */
2275
2276         mz = page_cgroup_zoneinfo(pc);
2277         unlock_page_cgroup(pc);
2278
2279         memcg_check_events(mem, page);
2280         /* at swapout, this memcg will be accessed to record to swap */
2281         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2282                 css_put(&mem->css);
2283
2284         return mem;
2285
2286 unlock_out:
2287         unlock_page_cgroup(pc);
2288         return NULL;
2289 }
2290
2291 void mem_cgroup_uncharge_page(struct page *page)
2292 {
2293         /* early check. */
2294         if (page_mapped(page))
2295                 return;
2296         if (page->mapping && !PageAnon(page))
2297                 return;
2298         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2299 }
2300
2301 void mem_cgroup_uncharge_cache_page(struct page *page)
2302 {
2303         VM_BUG_ON(page_mapped(page));
2304         VM_BUG_ON(page->mapping);
2305         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2306 }
2307
2308 /*
2309  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2310  * In that cases, pages are freed continuously and we can expect pages
2311  * are in the same memcg. All these calls itself limits the number of
2312  * pages freed at once, then uncharge_start/end() is called properly.
2313  * This may be called prural(2) times in a context,
2314  */
2315
2316 void mem_cgroup_uncharge_start(void)
2317 {
2318         current->memcg_batch.do_batch++;
2319         /* We can do nest. */
2320         if (current->memcg_batch.do_batch == 1) {
2321                 current->memcg_batch.memcg = NULL;
2322                 current->memcg_batch.bytes = 0;
2323                 current->memcg_batch.memsw_bytes = 0;
2324         }
2325 }
2326
2327 void mem_cgroup_uncharge_end(void)
2328 {
2329         struct memcg_batch_info *batch = &current->memcg_batch;
2330
2331         if (!batch->do_batch)
2332                 return;
2333
2334         batch->do_batch--;
2335         if (batch->do_batch) /* If stacked, do nothing. */
2336                 return;
2337
2338         if (!batch->memcg)
2339                 return;
2340         /*
2341          * This "batch->memcg" is valid without any css_get/put etc...
2342          * bacause we hide charges behind us.
2343          */
2344         if (batch->bytes)
2345                 res_counter_uncharge(&batch->memcg->res, batch->bytes);
2346         if (batch->memsw_bytes)
2347                 res_counter_uncharge(&batch->memcg->memsw, batch->memsw_bytes);
2348         memcg_oom_recover(batch->memcg);
2349         /* forget this pointer (for sanity check) */
2350         batch->memcg = NULL;
2351 }
2352
2353 #ifdef CONFIG_SWAP
2354 /*
2355  * called after __delete_from_swap_cache() and drop "page" account.
2356  * memcg information is recorded to swap_cgroup of "ent"
2357  */
2358 void
2359 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2360 {
2361         struct mem_cgroup *memcg;
2362         int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2363
2364         if (!swapout) /* this was a swap cache but the swap is unused ! */
2365                 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2366
2367         memcg = __mem_cgroup_uncharge_common(page, ctype);
2368
2369         /* record memcg information */
2370         if (do_swap_account && swapout && memcg) {
2371                 swap_cgroup_record(ent, css_id(&memcg->css));
2372                 mem_cgroup_get(memcg);
2373         }
2374         if (swapout && memcg)
2375                 css_put(&memcg->css);
2376 }
2377 #endif
2378
2379 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2380 /*
2381  * called from swap_entry_free(). remove record in swap_cgroup and
2382  * uncharge "memsw" account.
2383  */
2384 void mem_cgroup_uncharge_swap(swp_entry_t ent)
2385 {
2386         struct mem_cgroup *memcg;
2387         unsigned short id;
2388
2389         if (!do_swap_account)
2390                 return;
2391
2392         id = swap_cgroup_record(ent, 0);
2393         rcu_read_lock();
2394         memcg = mem_cgroup_lookup(id);
2395         if (memcg) {
2396                 /*
2397                  * We uncharge this because swap is freed.
2398                  * This memcg can be obsolete one. We avoid calling css_tryget
2399                  */
2400                 if (!mem_cgroup_is_root(memcg))
2401                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2402                 mem_cgroup_swap_statistics(memcg, false);
2403                 mem_cgroup_put(memcg);
2404         }
2405         rcu_read_unlock();
2406 }
2407
2408 /**
2409  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2410  * @entry: swap entry to be moved
2411  * @from:  mem_cgroup which the entry is moved from
2412  * @to:  mem_cgroup which the entry is moved to
2413  * @need_fixup: whether we should fixup res_counters and refcounts.
2414  *
2415  * It succeeds only when the swap_cgroup's record for this entry is the same
2416  * as the mem_cgroup's id of @from.
2417  *
2418  * Returns 0 on success, -EINVAL on failure.
2419  *
2420  * The caller must have charged to @to, IOW, called res_counter_charge() about
2421  * both res and memsw, and called css_get().
2422  */
2423 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2424                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2425 {
2426         unsigned short old_id, new_id;
2427
2428         old_id = css_id(&from->css);
2429         new_id = css_id(&to->css);
2430
2431         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2432                 mem_cgroup_swap_statistics(from, false);
2433                 mem_cgroup_swap_statistics(to, true);
2434                 /*
2435                  * This function is only called from task migration context now.
2436                  * It postpones res_counter and refcount handling till the end
2437                  * of task migration(mem_cgroup_clear_mc()) for performance
2438                  * improvement. But we cannot postpone mem_cgroup_get(to)
2439                  * because if the process that has been moved to @to does
2440                  * swap-in, the refcount of @to might be decreased to 0.
2441                  */
2442                 mem_cgroup_get(to);
2443                 if (need_fixup) {
2444                         if (!mem_cgroup_is_root(from))
2445                                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
2446                         mem_cgroup_put(from);
2447                         /*
2448                          * we charged both to->res and to->memsw, so we should
2449                          * uncharge to->res.
2450                          */
2451                         if (!mem_cgroup_is_root(to))
2452                                 res_counter_uncharge(&to->res, PAGE_SIZE);
2453                         css_put(&to->css);
2454                 }
2455                 return 0;
2456         }
2457         return -EINVAL;
2458 }
2459 #else
2460 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2461                 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2462 {
2463         return -EINVAL;
2464 }
2465 #endif
2466
2467 /*
2468  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2469  * page belongs to.
2470  */
2471 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
2472 {
2473         struct page_cgroup *pc;
2474         struct mem_cgroup *mem = NULL;
2475         int ret = 0;
2476
2477         if (mem_cgroup_disabled())
2478                 return 0;
2479
2480         pc = lookup_page_cgroup(page);
2481         lock_page_cgroup(pc);
2482         if (PageCgroupUsed(pc)) {
2483                 mem = pc->mem_cgroup;
2484                 css_get(&mem->css);
2485         }
2486         unlock_page_cgroup(pc);
2487
2488         *ptr = mem;
2489         if (mem) {
2490                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false);
2491                 css_put(&mem->css);
2492         }
2493         return ret;
2494 }
2495
2496 /* remove redundant charge if migration failed*/
2497 void mem_cgroup_end_migration(struct mem_cgroup *mem,
2498                 struct page *oldpage, struct page *newpage)
2499 {
2500         struct page *target, *unused;
2501         struct page_cgroup *pc;
2502         enum charge_type ctype;
2503
2504         if (!mem)
2505                 return;
2506         cgroup_exclude_rmdir(&mem->css);
2507         /* at migration success, oldpage->mapping is NULL. */
2508         if (oldpage->mapping) {
2509                 target = oldpage;
2510                 unused = NULL;
2511         } else {
2512                 target = newpage;
2513                 unused = oldpage;
2514         }
2515
2516         if (PageAnon(target))
2517                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
2518         else if (page_is_file_cache(target))
2519                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
2520         else
2521                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
2522
2523         /* unused page is not on radix-tree now. */
2524         if (unused)
2525                 __mem_cgroup_uncharge_common(unused, ctype);
2526
2527         pc = lookup_page_cgroup(target);
2528         /*
2529          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
2530          * So, double-counting is effectively avoided.
2531          */
2532         __mem_cgroup_commit_charge(mem, pc, ctype);
2533
2534         /*
2535          * Both of oldpage and newpage are still under lock_page().
2536          * Then, we don't have to care about race in radix-tree.
2537          * But we have to be careful that this page is unmapped or not.
2538          *
2539          * There is a case for !page_mapped(). At the start of
2540          * migration, oldpage was mapped. But now, it's zapped.
2541          * But we know *target* page is not freed/reused under us.
2542          * mem_cgroup_uncharge_page() does all necessary checks.
2543          */
2544         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
2545                 mem_cgroup_uncharge_page(target);
2546         /*
2547          * At migration, we may charge account against cgroup which has no tasks
2548          * So, rmdir()->pre_destroy() can be called while we do this charge.
2549          * In that case, we need to call pre_destroy() again. check it here.
2550          */
2551         cgroup_release_and_wakeup_rmdir(&mem->css);
2552 }
2553
2554 /*
2555  * A call to try to shrink memory usage on charge failure at shmem's swapin.
2556  * Calling hierarchical_reclaim is not enough because we should update
2557  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
2558  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
2559  * not from the memcg which this page would be charged to.
2560  * try_charge_swapin does all of these works properly.
2561  */
2562 int mem_cgroup_shmem_charge_fallback(struct page *page,
2563                             struct mm_struct *mm,
2564                             gfp_t gfp_mask)
2565 {
2566         struct mem_cgroup *mem = NULL;
2567         int ret;
2568
2569         if (mem_cgroup_disabled())
2570                 return 0;
2571
2572         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2573         if (!ret)
2574                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
2575
2576         return ret;
2577 }
2578
2579 static DEFINE_MUTEX(set_limit_mutex);
2580
2581 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2582                                 unsigned long long val)
2583 {
2584         int retry_count;
2585         u64 memswlimit, memlimit;
2586         int ret = 0;
2587         int children = mem_cgroup_count_children(memcg);
2588         u64 curusage, oldusage;
2589         int enlarge;
2590
2591         /*
2592          * For keeping hierarchical_reclaim simple, how long we should retry
2593          * is depends on callers. We set our retry-count to be function
2594          * of # of children which we should visit in this loop.
2595          */
2596         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
2597
2598         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2599
2600         enlarge = 0;
2601         while (retry_count) {
2602                 if (signal_pending(current)) {
2603                         ret = -EINTR;
2604                         break;
2605                 }
2606                 /*
2607                  * Rather than hide all in some function, I do this in
2608                  * open coded manner. You see what this really does.
2609                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2610                  */
2611                 mutex_lock(&set_limit_mutex);
2612                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2613                 if (memswlimit < val) {
2614                         ret = -EINVAL;
2615                         mutex_unlock(&set_limit_mutex);
2616                         break;
2617                 }
2618
2619                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2620                 if (memlimit < val)
2621                         enlarge = 1;
2622
2623                 ret = res_counter_set_limit(&memcg->res, val);
2624                 if (!ret) {
2625                         if (memswlimit == val)
2626                                 memcg->memsw_is_minimum = true;
2627                         else
2628                                 memcg->memsw_is_minimum = false;
2629                 }
2630                 mutex_unlock(&set_limit_mutex);
2631
2632                 if (!ret)
2633                         break;
2634
2635                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2636                                                 MEM_CGROUP_RECLAIM_SHRINK);
2637                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
2638                 /* Usage is reduced ? */
2639                 if (curusage >= oldusage)
2640                         retry_count--;
2641                 else
2642                         oldusage = curusage;
2643         }
2644         if (!ret && enlarge)
2645                 memcg_oom_recover(memcg);
2646
2647         return ret;
2648 }
2649
2650 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2651                                         unsigned long long val)
2652 {
2653         int retry_count;
2654         u64 memlimit, memswlimit, oldusage, curusage;
2655         int children = mem_cgroup_count_children(memcg);
2656         int ret = -EBUSY;
2657         int enlarge = 0;
2658
2659         /* see mem_cgroup_resize_res_limit */
2660         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
2661         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2662         while (retry_count) {
2663                 if (signal_pending(current)) {
2664                         ret = -EINTR;
2665                         break;
2666                 }
2667                 /*
2668                  * Rather than hide all in some function, I do this in
2669                  * open coded manner. You see what this really does.
2670                  * We have to guarantee mem->res.limit < mem->memsw.limit.
2671                  */
2672                 mutex_lock(&set_limit_mutex);
2673                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2674                 if (memlimit > val) {
2675                         ret = -EINVAL;
2676                         mutex_unlock(&set_limit_mutex);
2677                         break;
2678                 }
2679                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2680                 if (memswlimit < val)
2681                         enlarge = 1;
2682                 ret = res_counter_set_limit(&memcg->memsw, val);
2683                 if (!ret) {
2684                         if (memlimit == val)
2685                                 memcg->memsw_is_minimum = true;
2686                         else
2687                                 memcg->memsw_is_minimum = false;
2688                 }
2689                 mutex_unlock(&set_limit_mutex);
2690
2691                 if (!ret)
2692                         break;
2693
2694                 mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
2695                                                 MEM_CGROUP_RECLAIM_NOSWAP |
2696                                                 MEM_CGROUP_RECLAIM_SHRINK);
2697                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
2698                 /* Usage is reduced ? */
2699                 if (curusage >= oldusage)
2700                         retry_count--;
2701                 else
2702                         oldusage = curusage;
2703         }
2704         if (!ret && enlarge)
2705                 memcg_oom_recover(memcg);
2706         return ret;
2707 }
2708
2709 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2710                                                 gfp_t gfp_mask, int nid,
2711                                                 int zid)
2712 {
2713         unsigned long nr_reclaimed = 0;
2714         struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2715         unsigned long reclaimed;
2716         int loop = 0;
2717         struct mem_cgroup_tree_per_zone *mctz;
2718         unsigned long long excess;
2719
2720         if (order > 0)
2721                 return 0;
2722
2723         mctz = soft_limit_tree_node_zone(nid, zid);
2724         /*
2725          * This loop can run a while, specially if mem_cgroup's continuously
2726          * keep exceeding their soft limit and putting the system under
2727          * pressure
2728          */
2729         do {
2730                 if (next_mz)
2731                         mz = next_mz;
2732                 else
2733                         mz = mem_cgroup_largest_soft_limit_node(mctz);
2734                 if (!mz)
2735                         break;
2736
2737                 reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
2738                                                 gfp_mask,
2739                                                 MEM_CGROUP_RECLAIM_SOFT);
2740                 nr_reclaimed += reclaimed;
2741                 spin_lock(&mctz->lock);
2742
2743                 /*
2744                  * If we failed to reclaim anything from this memory cgroup
2745                  * it is time to move on to the next cgroup
2746                  */
2747                 next_mz = NULL;
2748                 if (!reclaimed) {
2749                         do {
2750                                 /*
2751                                  * Loop until we find yet another one.
2752                                  *
2753                                  * By the time we get the soft_limit lock
2754                                  * again, someone might have aded the
2755                                  * group back on the RB tree. Iterate to
2756                                  * make sure we get a different mem.
2757                                  * mem_cgroup_largest_soft_limit_node returns
2758                                  * NULL if no other cgroup is present on
2759                                  * the tree
2760                                  */
2761                                 next_mz =
2762                                 __mem_cgroup_largest_soft_limit_node(mctz);
2763                                 if (next_mz == mz) {
2764                                         css_put(&next_mz->mem->css);
2765                                         next_mz = NULL;
2766                                 } else /* next_mz == NULL or other memcg */
2767                                         break;
2768                         } while (1);
2769                 }
2770                 __mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
2771                 excess = res_counter_soft_limit_excess(&mz->mem->res);
2772                 /*
2773                  * One school of thought says that we should not add
2774                  * back the node to the tree if reclaim returns 0.
2775                  * But our reclaim could return 0, simply because due
2776                  * to priority we are exposing a smaller subset of
2777                  * memory to reclaim from. Consider this as a longer
2778                  * term TODO.
2779                  */
2780                 /* If excess == 0, no tree ops */
2781                 __mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
2782                 spin_unlock(&mctz->lock);
2783                 css_put(&mz->mem->css);
2784                 loop++;
2785                 /*
2786                  * Could not reclaim anything and there are no more
2787                  * mem cgroups to try or we seem to be looping without
2788                  * reclaiming anything.
2789                  */
2790                 if (!nr_reclaimed &&
2791                         (next_mz == NULL ||
2792                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2793                         break;
2794         } while (!nr_reclaimed);
2795         if (next_mz)
2796                 css_put(&next_mz->mem->css);
2797         return nr_reclaimed;
2798 }
2799
2800 /*
2801  * This routine traverse page_cgroup in given list and drop them all.
2802  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
2803  */
2804 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
2805                                 int node, int zid, enum lru_list lru)
2806 {
2807         struct zone *zone;
2808         struct mem_cgroup_per_zone *mz;
2809         struct page_cgroup *pc, *busy;
2810         unsigned long flags, loop;
2811         struct list_head *list;
2812         int ret = 0;
2813
2814         zone = &NODE_DATA(node)->node_zones[zid];
2815         mz = mem_cgroup_zoneinfo(mem, node, zid);
2816         list = &mz->lists[lru];
2817
2818         loop = MEM_CGROUP_ZSTAT(mz, lru);
2819         /* give some margin against EBUSY etc...*/
2820         loop += 256;
2821         busy = NULL;
2822         while (loop--) {
2823                 ret = 0;
2824                 spin_lock_irqsave(&zone->lru_lock, flags);
2825                 if (list_empty(list)) {
2826                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2827                         break;
2828                 }
2829                 pc = list_entry(list->prev, struct page_cgroup, lru);
2830                 if (busy == pc) {
2831                         list_move(&pc->lru, list);
2832                         busy = NULL;
2833                         spin_unlock_irqrestore(&zone->lru_lock, flags);
2834                         continue;
2835                 }
2836                 spin_unlock_irqrestore(&zone->lru_lock, flags);
2837
2838                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
2839                 if (ret == -ENOMEM)
2840                         break;
2841
2842                 if (ret == -EBUSY || ret == -EINVAL) {
2843                         /* found lock contention or "pc" is obsolete. */
2844                         busy = pc;
2845                         cond_resched();
2846                 } else
2847                         busy = NULL;
2848         }
2849
2850         if (!ret && !list_empty(list))
2851                 return -EBUSY;
2852         return ret;
2853 }
2854
2855 /*
2856  * make mem_cgroup's charge to be 0 if there is no task.
2857  * This enables deleting this mem_cgroup.
2858  */
2859 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
2860 {
2861         int ret;
2862         int node, zid, shrink;
2863         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2864         struct cgroup *cgrp = mem->css.cgroup;
2865
2866         css_get(&mem->css);
2867
2868         shrink = 0;
2869         /* should free all ? */
2870         if (free_all)
2871                 goto try_to_free;
2872 move_account:
2873         do {
2874                 ret = -EBUSY;
2875                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
2876                         goto out;
2877                 ret = -EINTR;
2878                 if (signal_pending(current))
2879                         goto out;
2880                 /* This is for making all *used* pages to be on LRU. */
2881                 lru_add_drain_all();
2882                 drain_all_stock_sync();
2883                 ret = 0;
2884                 for_each_node_state(node, N_HIGH_MEMORY) {
2885                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
2886                                 enum lru_list l;
2887                                 for_each_lru(l) {
2888                                         ret = mem_cgroup_force_empty_list(mem,
2889                                                         node, zid, l);
2890                                         if (ret)
2891                                                 break;
2892                                 }
2893                         }
2894                         if (ret)
2895                                 break;
2896                 }
2897                 memcg_oom_recover(mem);
2898                 /* it seems parent cgroup doesn't have enough mem */
2899                 if (ret == -ENOMEM)
2900                         goto try_to_free;
2901                 cond_resched();
2902         /* "ret" should also be checked to ensure all lists are empty. */
2903         } while (mem->res.usage > 0 || ret);
2904 out:
2905         css_put(&mem->css);
2906         return ret;
2907
2908 try_to_free:
2909         /* returns EBUSY if there is a task or if we come here twice. */
2910         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
2911                 ret = -EBUSY;
2912                 goto out;
2913         }
2914         /* we call try-to-free pages for make this cgroup empty */
2915         lru_add_drain_all();
2916         /* try to free all pages in this cgroup */
2917         shrink = 1;
2918         while (nr_retries && mem->res.usage > 0) {
2919                 int progress;
2920
2921                 if (signal_pending(current)) {
2922                         ret = -EINTR;
2923                         goto out;
2924                 }
2925                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
2926                                                 false, get_swappiness(mem));
2927                 if (!progress) {
2928                         nr_retries--;
2929                         /* maybe some writeback is necessary */
2930                         congestion_wait(BLK_RW_ASYNC, HZ/10);
2931                 }
2932
2933         }
2934         lru_add_drain();
2935         /* try move_account...there may be some *locked* pages. */
2936         goto move_account;
2937 }
2938
2939 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
2940 {
2941         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
2942 }
2943
2944
2945 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
2946 {
2947         return mem_cgroup_from_cont(cont)->use_hierarchy;
2948 }
2949
2950 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
2951                                         u64 val)
2952 {
2953         int retval = 0;
2954         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2955         struct cgroup *parent = cont->parent;
2956         struct mem_cgroup *parent_mem = NULL;
2957
2958         if (parent)
2959                 parent_mem = mem_cgroup_from_cont(parent);
2960
2961         cgroup_lock();
2962         /*
2963          * If parent's use_hierarchy is set, we can't make any modifications
2964          * in the child subtrees. If it is unset, then the change can
2965          * occur, provided the current cgroup has no children.
2966          *
2967          * For the root cgroup, parent_mem is NULL, we allow value to be
2968          * set if there are no children.
2969          */
2970         if ((!parent_mem || !parent_mem->use_hierarchy) &&
2971                                 (val == 1 || val == 0)) {
2972                 if (list_empty(&cont->children))
2973                         mem->use_hierarchy = val;
2974                 else
2975                         retval = -EBUSY;
2976         } else
2977                 retval = -EINVAL;
2978         cgroup_unlock();
2979
2980         return retval;
2981 }
2982
2983 struct mem_cgroup_idx_data {
2984         s64 val;
2985         enum mem_cgroup_stat_index idx;
2986 };
2987
2988 static int
2989 mem_cgroup_get_idx_stat(struct mem_cgroup *mem, void *data)
2990 {
2991         struct mem_cgroup_idx_data *d = data;
2992         d->val += mem_cgroup_read_stat(mem, d->idx);
2993         return 0;
2994 }
2995
2996 static void
2997 mem_cgroup_get_recursive_idx_stat(struct mem_cgroup *mem,
2998                                 enum mem_cgroup_stat_index idx, s64 *val)
2999 {
3000         struct mem_cgroup_idx_data d;
3001         d.idx = idx;
3002         d.val = 0;
3003         mem_cgroup_walk_tree(mem, &d, mem_cgroup_get_idx_stat);
3004         *val = d.val;
3005 }
3006
3007 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3008 {
3009         u64 idx_val, val;
3010
3011         if (!mem_cgroup_is_root(mem)) {
3012                 if (!swap)
3013                         return res_counter_read_u64(&mem->res, RES_USAGE);
3014                 else
3015                         return res_counter_read_u64(&mem->memsw, RES_USAGE);
3016         }
3017
3018         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_CACHE, &idx_val);
3019         val = idx_val;
3020         mem_cgroup_get_recursive_idx_stat(mem, MEM_CGROUP_STAT_RSS, &idx_val);
3021         val += idx_val;
3022
3023         if (swap) {
3024                 mem_cgroup_get_recursive_idx_stat(mem,
3025                                 MEM_CGROUP_STAT_SWAPOUT, &idx_val);
3026                 val += idx_val;
3027         }
3028
3029         return val << PAGE_SHIFT;
3030 }
3031
3032 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3033 {
3034         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3035         u64 val;
3036         int type, name;
3037
3038         type = MEMFILE_TYPE(cft->private);
3039         name = MEMFILE_ATTR(cft->private);
3040         switch (type) {
3041         case _MEM:
3042                 if (name == RES_USAGE)
3043                         val = mem_cgroup_usage(mem, false);
3044                 else
3045                         val = res_counter_read_u64(&mem->res, name);
3046                 break;
3047         case _MEMSWAP:
3048                 if (name == RES_USAGE)
3049                         val = mem_cgroup_usage(mem, true);
3050                 else
3051                         val = res_counter_read_u64(&mem->memsw, name);
3052                 break;
3053         default:
3054                 BUG();
3055                 break;
3056         }
3057         return val;
3058 }
3059 /*
3060  * The user of this function is...
3061  * RES_LIMIT.
3062  */
3063 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3064                             const char *buffer)
3065 {
3066         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3067         int type, name;
3068         unsigned long long val;
3069         int ret;
3070
3071         type = MEMFILE_TYPE(cft->private);
3072         name = MEMFILE_ATTR(cft->private);
3073         switch (name) {
3074         case RES_LIMIT:
3075                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3076                         ret = -EINVAL;
3077                         break;
3078                 }
3079                 /* This function does all necessary parse...reuse it */
3080                 ret = res_counter_memparse_write_strategy(buffer, &val);
3081                 if (ret)
3082                         break;
3083                 if (type == _MEM)
3084                         ret = mem_cgroup_resize_limit(memcg, val);
3085                 else
3086                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
3087                 break;
3088         case RES_SOFT_LIMIT:
3089                 ret = res_counter_memparse_write_strategy(buffer, &val);
3090                 if (ret)
3091                         break;
3092                 /*
3093                  * For memsw, soft limits are hard to implement in terms
3094                  * of semantics, for now, we support soft limits for
3095                  * control without swap
3096                  */
3097                 if (type == _MEM)
3098                         ret = res_counter_set_soft_limit(&memcg->res, val);
3099                 else
3100                         ret = -EINVAL;
3101                 break;
3102         default:
3103                 ret = -EINVAL; /* should be BUG() ? */
3104                 break;
3105         }
3106         return ret;
3107 }
3108
3109 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3110                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
3111 {
3112         struct cgroup *cgroup;
3113         unsigned long long min_limit, min_memsw_limit, tmp;
3114
3115         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3116         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3117         cgroup = memcg->css.cgroup;
3118         if (!memcg->use_hierarchy)
3119                 goto out;
3120
3121         while (cgroup->parent) {
3122                 cgroup = cgroup->parent;
3123                 memcg = mem_cgroup_from_cont(cgroup);
3124                 if (!memcg->use_hierarchy)
3125                         break;
3126                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3127                 min_limit = min(min_limit, tmp);
3128                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3129                 min_memsw_limit = min(min_memsw_limit, tmp);
3130         }
3131 out:
3132         *mem_limit = min_limit;
3133         *memsw_limit = min_memsw_limit;
3134         return;
3135 }
3136
3137 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3138 {
3139         struct mem_cgroup *mem;
3140         int type, name;
3141
3142         mem = mem_cgroup_from_cont(cont);
3143         type = MEMFILE_TYPE(event);
3144         name = MEMFILE_ATTR(event);
3145         switch (name) {
3146         case RES_MAX_USAGE:
3147                 if (type == _MEM)
3148                         res_counter_reset_max(&mem->res);
3149                 else
3150                         res_counter_reset_max(&mem->memsw);
3151                 break;
3152         case RES_FAILCNT:
3153                 if (type == _MEM)
3154                         res_counter_reset_failcnt(&mem->res);
3155                 else
3156                         res_counter_reset_failcnt(&mem->memsw);
3157                 break;
3158         }
3159
3160         return 0;
3161 }
3162
3163 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3164                                         struct cftype *cft)
3165 {
3166         return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3167 }
3168
3169 #ifdef CONFIG_MMU
3170 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3171                                         struct cftype *cft, u64 val)
3172 {
3173         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3174
3175         if (val >= (1 << NR_MOVE_TYPE))
3176                 return -EINVAL;
3177         /*
3178          * We check this value several times in both in can_attach() and
3179          * attach(), so we need cgroup lock to prevent this value from being
3180          * inconsistent.
3181          */
3182         cgroup_lock();
3183         mem->move_charge_at_immigrate = val;
3184         cgroup_unlock();
3185
3186         return 0;
3187 }
3188 #else
3189 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3190                                         struct cftype *cft, u64 val)
3191 {
3192         return -ENOSYS;
3193 }
3194 #endif
3195
3196
3197 /* For read statistics */
3198 enum {
3199         MCS_CACHE,
3200         MCS_RSS,
3201         MCS_FILE_MAPPED,
3202         MCS_PGPGIN,
3203         MCS_PGPGOUT,
3204         MCS_SWAP,
3205         MCS_INACTIVE_ANON,
3206         MCS_ACTIVE_ANON,
3207         MCS_INACTIVE_FILE,
3208         MCS_ACTIVE_FILE,
3209         MCS_UNEVICTABLE,
3210         NR_MCS_STAT,
3211 };
3212
3213 struct mcs_total_stat {
3214         s64 stat[NR_MCS_STAT];
3215 };
3216
3217 struct {
3218         char *local_name;
3219         char *total_name;
3220 } memcg_stat_strings[NR_MCS_STAT] = {
3221         {"cache", "total_cache"},
3222         {"rss", "total_rss"},
3223         {"mapped_file", "total_mapped_file"},
3224         {"pgpgin", "total_pgpgin"},
3225         {"pgpgout", "total_pgpgout"},
3226         {"swap", "total_swap"},
3227         {"inactive_anon", "total_inactive_anon"},
3228         {"active_anon", "total_active_anon"},
3229         {"inactive_file", "total_inactive_file"},
3230         {"active_file", "total_active_file"},
3231         {"unevictable", "total_unevictable"}
3232 };
3233
3234
3235 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
3236 {
3237         struct mcs_total_stat *s = data;
3238         s64 val;
3239
3240         /* per cpu stat */
3241         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3242         s->stat[MCS_CACHE] += val * PAGE_SIZE;
3243         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3244         s->stat[MCS_RSS] += val * PAGE_SIZE;
3245         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3246         s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3247         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGIN_COUNT);
3248         s->stat[MCS_PGPGIN] += val;
3249         val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_PGPGOUT_COUNT);
3250         s->stat[MCS_PGPGOUT] += val;
3251         if (do_swap_account) {
3252                 val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3253                 s->stat[MCS_SWAP] += val * PAGE_SIZE;
3254         }
3255
3256         /* per zone stat */
3257         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3258         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3259         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3260         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3261         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3262         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3263         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3264         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3265         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3266         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3267         return 0;
3268 }
3269
3270 static void
3271 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3272 {
3273         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
3274 }
3275
3276 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3277                                  struct cgroup_map_cb *cb)
3278 {
3279         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3280         struct mcs_total_stat mystat;
3281         int i;
3282
3283         memset(&mystat, 0, sizeof(mystat));
3284         mem_cgroup_get_local_stat(mem_cont, &mystat);
3285
3286         for (i = 0; i < NR_MCS_STAT; i++) {
3287                 if (i == MCS_SWAP && !do_swap_account)
3288                         continue;
3289                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3290         }
3291
3292         /* Hierarchical information */
3293         {
3294                 unsigned long long limit, memsw_limit;
3295                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3296                 cb->fill(cb, "hierarchical_memory_limit", limit);
3297                 if (do_swap_account)
3298                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3299         }
3300
3301         memset(&mystat, 0, sizeof(mystat));
3302         mem_cgroup_get_total_stat(mem_cont, &mystat);
3303         for (i = 0; i < NR_MCS_STAT; i++) {
3304                 if (i == MCS_SWAP && !do_swap_account)
3305                         continue;
3306                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3307         }
3308
3309 #ifdef CONFIG_DEBUG_VM
3310         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3311
3312         {
3313                 int nid, zid;
3314                 struct mem_cgroup_per_zone *mz;
3315                 unsigned long recent_rotated[2] = {0, 0};
3316                 unsigned long recent_scanned[2] = {0, 0};
3317
3318                 for_each_online_node(nid)
3319                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3320                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3321
3322                                 recent_rotated[0] +=
3323                                         mz->reclaim_stat.recent_rotated[0];
3324                                 recent_rotated[1] +=
3325                                         mz->reclaim_stat.recent_rotated[1];
3326                                 recent_scanned[0] +=
3327                                         mz->reclaim_stat.recent_scanned[0];
3328                                 recent_scanned[1] +=
3329                                         mz->reclaim_stat.recent_scanned[1];
3330                         }
3331                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3332                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3333                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3334                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3335         }
3336 #endif
3337
3338         return 0;
3339 }
3340
3341 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3342 {
3343         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3344
3345         return get_swappiness(memcg);
3346 }
3347
3348 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3349                                        u64 val)
3350 {
3351         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3352         struct mem_cgroup *parent;
3353
3354         if (val > 100)
3355                 return -EINVAL;
3356
3357         if (cgrp->parent == NULL)
3358                 return -EINVAL;
3359
3360         parent = mem_cgroup_from_cont(cgrp->parent);
3361
3362         cgroup_lock();
3363
3364         /* If under hierarchy, only empty-root can set this value */
3365         if ((parent->use_hierarchy) ||
3366             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3367                 cgroup_unlock();
3368                 return -EINVAL;
3369         }
3370
3371         spin_lock(&memcg->reclaim_param_lock);
3372         memcg->swappiness = val;
3373         spin_unlock(&memcg->reclaim_param_lock);
3374
3375         cgroup_unlock();
3376
3377         return 0;
3378 }
3379
3380 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3381 {
3382         struct mem_cgroup_threshold_ary *t;
3383         u64 usage;
3384         int i;
3385
3386         rcu_read_lock();
3387         if (!swap)
3388                 t = rcu_dereference(memcg->thresholds);
3389         else
3390                 t = rcu_dereference(memcg->memsw_thresholds);
3391
3392         if (!t)
3393                 goto unlock;
3394
3395         usage = mem_cgroup_usage(memcg, swap);
3396
3397         /*
3398          * current_threshold points to threshold just below usage.
3399          * If it's not true, a threshold was crossed after last
3400          * call of __mem_cgroup_threshold().
3401          */
3402         i = atomic_read(&t->current_threshold);
3403
3404         /*
3405          * Iterate backward over array of thresholds starting from
3406          * current_threshold and check if a threshold is crossed.
3407          * If none of thresholds below usage is crossed, we read
3408          * only one element of the array here.
3409          */
3410         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3411                 eventfd_signal(t->entries[i].eventfd, 1);
3412
3413         /* i = current_threshold + 1 */
3414         i++;
3415
3416         /*
3417          * Iterate forward over array of thresholds starting from
3418          * current_threshold+1 and check if a threshold is crossed.
3419          * If none of thresholds above usage is crossed, we read
3420          * only one element of the array here.
3421          */
3422         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3423                 eventfd_signal(t->entries[i].eventfd, 1);
3424
3425         /* Update current_threshold */
3426         atomic_set(&t->current_threshold, i - 1);
3427 unlock:
3428         rcu_read_unlock();
3429 }
3430
3431 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3432 {
3433         __mem_cgroup_threshold(memcg, false);
3434         if (do_swap_account)
3435                 __mem_cgroup_threshold(memcg, true);
3436 }
3437
3438 static int compare_thresholds(const void *a, const void *b)
3439 {
3440         const struct mem_cgroup_threshold *_a = a;
3441         const struct mem_cgroup_threshold *_b = b;
3442
3443         return _a->threshold - _b->threshold;
3444 }
3445
3446 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem, void *data)
3447 {
3448         struct mem_cgroup_eventfd_list *ev;
3449
3450         list_for_each_entry(ev, &mem->oom_notify, list)
3451                 eventfd_signal(ev->eventfd, 1);
3452         return 0;
3453 }
3454
3455 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
3456 {
3457         mem_cgroup_walk_tree(mem, NULL, mem_cgroup_oom_notify_cb);
3458 }
3459
3460 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
3461         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3462 {
3463         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3464         struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
3465         int type = MEMFILE_TYPE(cft->private);
3466         u64 threshold, usage;
3467         int size;
3468         int i, ret;
3469
3470         ret = res_counter_memparse_write_strategy(args, &threshold);
3471         if (ret)
3472                 return ret;
3473
3474         mutex_lock(&memcg->thresholds_lock);
3475         if (type == _MEM)
3476                 thresholds = memcg->thresholds;
3477         else if (type == _MEMSWAP)
3478                 thresholds = memcg->memsw_thresholds;
3479         else
3480                 BUG();
3481
3482         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3483
3484         /* Check if a threshold crossed before adding a new one */
3485         if (thresholds)
3486                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3487
3488         if (thresholds)
3489                 size = thresholds->size + 1;
3490         else
3491                 size = 1;
3492
3493         /* Allocate memory for new array of thresholds */
3494         thresholds_new = kmalloc(sizeof(*thresholds_new) +
3495                         size * sizeof(struct mem_cgroup_threshold),
3496                         GFP_KERNEL);
3497         if (!thresholds_new) {
3498                 ret = -ENOMEM;
3499                 goto unlock;
3500         }
3501         thresholds_new->size = size;
3502
3503         /* Copy thresholds (if any) to new array */
3504         if (thresholds)
3505                 memcpy(thresholds_new->entries, thresholds->entries,
3506                                 thresholds->size *
3507                                 sizeof(struct mem_cgroup_threshold));
3508         /* Add new threshold */
3509         thresholds_new->entries[size - 1].eventfd = eventfd;
3510         thresholds_new->entries[size - 1].threshold = threshold;
3511
3512         /* Sort thresholds. Registering of new threshold isn't time-critical */
3513         sort(thresholds_new->entries, size,
3514                         sizeof(struct mem_cgroup_threshold),
3515                         compare_thresholds, NULL);
3516
3517         /* Find current threshold */
3518         atomic_set(&thresholds_new->current_threshold, -1);
3519         for (i = 0; i < size; i++) {
3520                 if (thresholds_new->entries[i].threshold < usage) {
3521                         /*
3522                          * thresholds_new->current_threshold will not be used
3523                          * until rcu_assign_pointer(), so it's safe to increment
3524                          * it here.
3525                          */
3526                         atomic_inc(&thresholds_new->current_threshold);
3527                 }
3528         }
3529
3530         if (type == _MEM)
3531                 rcu_assign_pointer(memcg->thresholds, thresholds_new);
3532         else
3533                 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3534
3535         /* To be sure that nobody uses thresholds before freeing it */
3536         synchronize_rcu();
3537
3538         kfree(thresholds);
3539 unlock:
3540         mutex_unlock(&memcg->thresholds_lock);
3541
3542         return ret;
3543 }
3544
3545 static int mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
3546         struct cftype *cft, struct eventfd_ctx *eventfd)
3547 {
3548         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3549         struct mem_cgroup_threshold_ary *thresholds, *thresholds_new;
3550         int type = MEMFILE_TYPE(cft->private);
3551         u64 usage;
3552         int size = 0;
3553         int i, j, ret;
3554
3555         mutex_lock(&memcg->thresholds_lock);
3556         if (type == _MEM)
3557                 thresholds = memcg->thresholds;
3558         else if (type == _MEMSWAP)
3559                 thresholds = memcg->memsw_thresholds;
3560         else
3561                 BUG();
3562
3563         /*
3564          * Something went wrong if we trying to unregister a threshold
3565          * if we don't have thresholds
3566          */
3567         BUG_ON(!thresholds);
3568
3569         usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
3570
3571         /* Check if a threshold crossed before removing */
3572         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3573
3574         /* Calculate new number of threshold */
3575         for (i = 0; i < thresholds->size; i++) {
3576                 if (thresholds->entries[i].eventfd != eventfd)
3577                         size++;
3578         }
3579
3580         /* Set thresholds array to NULL if we don't have thresholds */
3581         if (!size) {
3582                 thresholds_new = NULL;
3583                 goto assign;
3584         }
3585
3586         /* Allocate memory for new array of thresholds */
3587         thresholds_new = kmalloc(sizeof(*thresholds_new) +
3588                         size * sizeof(struct mem_cgroup_threshold),
3589                         GFP_KERNEL);
3590         if (!thresholds_new) {
3591                 ret = -ENOMEM;
3592                 goto unlock;
3593         }
3594         thresholds_new->size = size;
3595
3596         /* Copy thresholds and find current threshold */
3597         atomic_set(&thresholds_new->current_threshold, -1);
3598         for (i = 0, j = 0; i < thresholds->size; i++) {
3599                 if (thresholds->entries[i].eventfd == eventfd)
3600                         continue;
3601
3602                 thresholds_new->entries[j] = thresholds->entries[i];
3603                 if (thresholds_new->entries[j].threshold < usage) {
3604                         /*
3605                          * thresholds_new->current_threshold will not be used
3606                          * until rcu_assign_pointer(), so it's safe to increment
3607                          * it here.
3608                          */
3609                         atomic_inc(&thresholds_new->current_threshold);
3610                 }
3611                 j++;
3612         }
3613
3614 assign:
3615         if (type == _MEM)
3616                 rcu_assign_pointer(memcg->thresholds, thresholds_new);
3617         else
3618                 rcu_assign_pointer(memcg->memsw_thresholds, thresholds_new);
3619
3620         /* To be sure that nobody uses thresholds before freeing it */
3621         synchronize_rcu();
3622
3623         kfree(thresholds);
3624 unlock:
3625         mutex_unlock(&memcg->thresholds_lock);
3626
3627         return ret;
3628 }
3629
3630 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
3631         struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
3632 {
3633         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3634         struct mem_cgroup_eventfd_list *event;
3635         int type = MEMFILE_TYPE(cft->private);
3636
3637         BUG_ON(type != _OOM_TYPE);
3638         event = kmalloc(sizeof(*event), GFP_KERNEL);
3639         if (!event)
3640                 return -ENOMEM;
3641
3642         mutex_lock(&memcg_oom_mutex);
3643
3644         event->eventfd = eventfd;
3645         list_add(&event->list, &memcg->oom_notify);
3646
3647         /* already in OOM ? */
3648         if (atomic_read(&memcg->oom_lock))
3649                 eventfd_signal(eventfd, 1);
3650         mutex_unlock(&memcg_oom_mutex);
3651
3652         return 0;
3653 }
3654
3655 static int mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
3656         struct cftype *cft, struct eventfd_ctx *eventfd)
3657 {
3658         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3659         struct mem_cgroup_eventfd_list *ev, *tmp;
3660         int type = MEMFILE_TYPE(cft->private);
3661
3662         BUG_ON(type != _OOM_TYPE);
3663
3664         mutex_lock(&memcg_oom_mutex);
3665
3666         list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
3667                 if (ev->eventfd == eventfd) {
3668                         list_del(&ev->list);
3669                         kfree(ev);
3670                 }
3671         }
3672
3673         mutex_unlock(&memcg_oom_mutex);
3674
3675         return 0;
3676 }
3677
3678 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
3679         struct cftype *cft,  struct cgroup_map_cb *cb)
3680 {
3681         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3682
3683         cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
3684
3685         if (atomic_read(&mem->oom_lock))
3686                 cb->fill(cb, "under_oom", 1);
3687         else
3688                 cb->fill(cb, "under_oom", 0);
3689         return 0;
3690 }
3691
3692 /*
3693  */
3694 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
3695         struct cftype *cft, u64 val)
3696 {
3697         struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3698         struct mem_cgroup *parent;
3699
3700         /* cannot set to root cgroup and only 0 and 1 are allowed */
3701         if (!cgrp->parent || !((val == 0) || (val == 1)))
3702                 return -EINVAL;
3703
3704         parent = mem_cgroup_from_cont(cgrp->parent);
3705
3706         cgroup_lock();
3707         /* oom-kill-disable is a flag for subhierarchy. */
3708         if ((parent->use_hierarchy) ||
3709             (mem->use_hierarchy && !list_empty(&cgrp->children))) {
3710                 cgroup_unlock();
3711                 return -EINVAL;
3712         }
3713         mem->oom_kill_disable = val;
3714         cgroup_unlock();
3715         return 0;
3716 }
3717
3718 static struct cftype mem_cgroup_files[] = {
3719         {
3720                 .name = "usage_in_bytes",
3721                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3722                 .read_u64 = mem_cgroup_read,
3723                 .register_event = mem_cgroup_usage_register_event,
3724                 .unregister_event = mem_cgroup_usage_unregister_event,
3725         },
3726         {
3727                 .name = "max_usage_in_bytes",
3728                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3729                 .trigger = mem_cgroup_reset,
3730                 .read_u64 = mem_cgroup_read,
3731         },
3732         {
3733                 .name = "limit_in_bytes",
3734                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3735                 .write_string = mem_cgroup_write,
3736                 .read_u64 = mem_cgroup_read,
3737         },
3738         {
3739                 .name = "soft_limit_in_bytes",
3740                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3741                 .write_string = mem_cgroup_write,
3742                 .read_u64 = mem_cgroup_read,
3743         },
3744         {
3745                 .name = "failcnt",
3746                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3747                 .trigger = mem_cgroup_reset,
3748                 .read_u64 = mem_cgroup_read,
3749         },
3750         {
3751                 .name = "stat",
3752                 .read_map = mem_control_stat_show,
3753         },
3754         {
3755                 .name = "force_empty",
3756                 .trigger = mem_cgroup_force_empty_write,
3757         },
3758         {
3759                 .name = "use_hierarchy",
3760                 .write_u64 = mem_cgroup_hierarchy_write,
3761                 .read_u64 = mem_cgroup_hierarchy_read,
3762         },
3763         {
3764                 .name = "swappiness",
3765                 .read_u64 = mem_cgroup_swappiness_read,
3766                 .write_u64 = mem_cgroup_swappiness_write,
3767         },
3768         {
3769                 .name = "move_charge_at_immigrate",
3770                 .read_u64 = mem_cgroup_move_charge_read,
3771                 .write_u64 = mem_cgroup_move_charge_write,
3772         },
3773         {
3774                 .name = "oom_control",
3775                 .read_map = mem_cgroup_oom_control_read,
3776                 .write_u64 = mem_cgroup_oom_control_write,
3777                 .register_event = mem_cgroup_oom_register_event,
3778                 .unregister_event = mem_cgroup_oom_unregister_event,
3779                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3780         },
3781 };
3782
3783 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3784 static struct cftype memsw_cgroup_files[] = {
3785         {
3786                 .name = "memsw.usage_in_bytes",
3787                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
3788                 .read_u64 = mem_cgroup_read,
3789                 .register_event = mem_cgroup_usage_register_event,
3790                 .unregister_event = mem_cgroup_usage_unregister_event,
3791         },
3792         {
3793                 .name = "memsw.max_usage_in_bytes",
3794                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
3795                 .trigger = mem_cgroup_reset,
3796                 .read_u64 = mem_cgroup_read,
3797         },
3798         {
3799                 .name = "memsw.limit_in_bytes",
3800                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
3801                 .write_string = mem_cgroup_write,
3802                 .read_u64 = mem_cgroup_read,
3803         },
3804         {
3805                 .name = "memsw.failcnt",
3806                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
3807                 .trigger = mem_cgroup_reset,
3808                 .read_u64 = mem_cgroup_read,
3809         },
3810 };
3811
3812 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3813 {
3814         if (!do_swap_account)
3815                 return 0;
3816         return cgroup_add_files(cont, ss, memsw_cgroup_files,
3817                                 ARRAY_SIZE(memsw_cgroup_files));
3818 };
3819 #else
3820 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
3821 {
3822         return 0;
3823 }
3824 #endif
3825
3826 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3827 {
3828         struct mem_cgroup_per_node *pn;
3829         struct mem_cgroup_per_zone *mz;
3830         enum lru_list l;
3831         int zone, tmp = node;
3832         /*
3833          * This routine is called against possible nodes.
3834          * But it's BUG to call kmalloc() against offline node.
3835          *
3836          * TODO: this routine can waste much memory for nodes which will
3837          *       never be onlined. It's better to use memory hotplug callback
3838          *       function.
3839          */
3840         if (!node_state(node, N_NORMAL_MEMORY))
3841                 tmp = -1;
3842         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
3843         if (!pn)
3844                 return 1;
3845
3846         mem->info.nodeinfo[node] = pn;
3847         memset(pn, 0, sizeof(*pn));
3848
3849         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3850                 mz = &pn->zoneinfo[zone];
3851                 for_each_lru(l)
3852                         INIT_LIST_HEAD(&mz->lists[l]);
3853                 mz->usage_in_excess = 0;
3854                 mz->on_tree = false;
3855                 mz->mem = mem;
3856         }
3857         return 0;
3858 }
3859
3860 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
3861 {
3862         kfree(mem->info.nodeinfo[node]);
3863 }
3864
3865 static struct mem_cgroup *mem_cgroup_alloc(void)
3866 {
3867         struct mem_cgroup *mem;
3868         int size = sizeof(struct mem_cgroup);
3869
3870         /* Can be very big if MAX_NUMNODES is very big */
3871         if (size < PAGE_SIZE)
3872                 mem = kmalloc(size, GFP_KERNEL);
3873         else
3874                 mem = vmalloc(size);
3875
3876         if (!mem)
3877                 return NULL;
3878
3879         memset(mem, 0, size);
3880         mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
3881         if (!mem->stat) {
3882                 if (size < PAGE_SIZE)
3883                         kfree(mem);
3884                 else
3885                         vfree(mem);
3886                 mem = NULL;
3887         }
3888         return mem;
3889 }
3890
3891 /*
3892  * At destroying mem_cgroup, references from swap_cgroup can remain.
3893  * (scanning all at force_empty is too costly...)
3894  *
3895  * Instead of clearing all references at force_empty, we remember
3896  * the number of reference from swap_cgroup and free mem_cgroup when
3897  * it goes down to 0.
3898  *
3899  * Removal of cgroup itself succeeds regardless of refs from swap.
3900  */
3901
3902 static void __mem_cgroup_free(struct mem_cgroup *mem)
3903 {
3904         int node;
3905
3906         mem_cgroup_remove_from_trees(mem);
3907         free_css_id(&mem_cgroup_subsys, &mem->css);
3908
3909         for_each_node_state(node, N_POSSIBLE)
3910                 free_mem_cgroup_per_zone_info(mem, node);
3911
3912         free_percpu(mem->stat);
3913         if (sizeof(struct mem_cgroup) < PAGE_SIZE)
3914                 kfree(mem);
3915         else
3916                 vfree(mem);
3917 }
3918
3919 static void mem_cgroup_get(struct mem_cgroup *mem)
3920 {
3921         atomic_inc(&mem->refcnt);
3922 }
3923
3924 static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
3925 {
3926         if (atomic_sub_and_test(count, &mem->refcnt)) {
3927                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
3928                 __mem_cgroup_free(mem);
3929                 if (parent)
3930                         mem_cgroup_put(parent);
3931         }
3932 }
3933
3934 static void mem_cgroup_put(struct mem_cgroup *mem)
3935 {
3936         __mem_cgroup_put(mem, 1);
3937 }
3938
3939 /*
3940  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
3941  */
3942 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
3943 {
3944         if (!mem->res.parent)
3945                 return NULL;
3946         return mem_cgroup_from_res_counter(mem->res.parent, res);
3947 }
3948
3949 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
3950 static void __init enable_swap_cgroup(void)
3951 {
3952         if (!mem_cgroup_disabled() && really_do_swap_account)
3953                 do_swap_account = 1;
3954 }
3955 #else
3956 static void __init enable_swap_cgroup(void)
3957 {
3958 }
3959 #endif
3960
3961 static int mem_cgroup_soft_limit_tree_init(void)
3962 {
3963         struct mem_cgroup_tree_per_node *rtpn;
3964         struct mem_cgroup_tree_per_zone *rtpz;
3965         int tmp, node, zone;
3966
3967         for_each_node_state(node, N_POSSIBLE) {
3968                 tmp = node;
3969                 if (!node_state(node, N_NORMAL_MEMORY))
3970                         tmp = -1;
3971                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
3972                 if (!rtpn)
3973                         return 1;
3974
3975                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
3976
3977                 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
3978                         rtpz = &rtpn->rb_tree_per_zone[zone];
3979                         rtpz->rb_root = RB_ROOT;
3980                         spin_lock_init(&rtpz->lock);
3981                 }
3982         }
3983         return 0;
3984 }
3985
3986 static struct cgroup_subsys_state * __ref
3987 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
3988 {
3989         struct mem_cgroup *mem, *parent;
3990         long error = -ENOMEM;
3991         int node;
3992
3993         mem = mem_cgroup_alloc();
3994         if (!mem)
3995                 return ERR_PTR(error);
3996
3997         for_each_node_state(node, N_POSSIBLE)
3998                 if (alloc_mem_cgroup_per_zone_info(mem, node))
3999                         goto free_out;
4000
4001         /* root ? */
4002         if (cont->parent == NULL) {
4003                 int cpu;
4004                 enable_swap_cgroup();
4005                 parent = NULL;
4006                 root_mem_cgroup = mem;
4007                 if (mem_cgroup_soft_limit_tree_init())
4008                         goto free_out;
4009                 for_each_possible_cpu(cpu) {
4010                         struct memcg_stock_pcp *stock =
4011                                                 &per_cpu(memcg_stock, cpu);
4012                         INIT_WORK(&stock->work, drain_local_stock);
4013                 }
4014                 hotcpu_notifier(memcg_stock_cpu_callback, 0);
4015         } else {
4016                 parent = mem_cgroup_from_cont(cont->parent);
4017                 mem->use_hierarchy = parent->use_hierarchy;
4018                 mem->oom_kill_disable = parent->oom_kill_disable;
4019         }
4020
4021         if (parent && parent->use_hierarchy) {
4022                 res_counter_init(&mem->res, &parent->res);
4023                 res_counter_init(&mem->memsw, &parent->memsw);
4024                 /*
4025                  * We increment refcnt of the parent to ensure that we can
4026                  * safely access it on res_counter_charge/uncharge.
4027                  * This refcnt will be decremented when freeing this
4028                  * mem_cgroup(see mem_cgroup_put).
4029                  */
4030                 mem_cgroup_get(parent);
4031         } else {
4032                 res_counter_init(&mem->res, NULL);
4033                 res_counter_init(&mem->memsw, NULL);
4034         }
4035         mem->last_scanned_child = 0;
4036         spin_lock_init(&mem->reclaim_param_lock);
4037         INIT_LIST_HEAD(&mem->oom_notify);
4038
4039         if (parent)
4040                 mem->swappiness = get_swappiness(parent);
4041         atomic_set(&mem->refcnt, 1);
4042         mem->move_charge_at_immigrate = 0;
4043         mutex_init(&mem->thresholds_lock);
4044         return &mem->css;
4045 free_out:
4046         __mem_cgroup_free(mem);
4047         root_mem_cgroup = NULL;
4048         return ERR_PTR(error);
4049 }
4050
4051 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4052                                         struct cgroup *cont)
4053 {
4054         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4055
4056         return mem_cgroup_force_empty(mem, false);
4057 }
4058
4059 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4060                                 struct cgroup *cont)
4061 {
4062         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4063
4064         mem_cgroup_put(mem);
4065 }
4066
4067 static int mem_cgroup_populate(struct cgroup_subsys *ss,
4068                                 struct cgroup *cont)
4069 {
4070         int ret;
4071
4072         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4073                                 ARRAY_SIZE(mem_cgroup_files));
4074
4075         if (!ret)
4076                 ret = register_memsw_files(cont, ss);
4077         return ret;
4078 }
4079
4080 #ifdef CONFIG_MMU
4081 /* Handlers for move charge at task migration. */
4082 #define PRECHARGE_COUNT_AT_ONCE 256
4083 static int mem_cgroup_do_precharge(unsigned long count)
4084 {
4085         int ret = 0;
4086         int batch_count = PRECHARGE_COUNT_AT_ONCE;
4087         struct mem_cgroup *mem = mc.to;
4088
4089         if (mem_cgroup_is_root(mem)) {
4090                 mc.precharge += count;
4091                 /* we don't need css_get for root */
4092                 return ret;
4093         }
4094         /* try to charge at once */
4095         if (count > 1) {
4096                 struct res_counter *dummy;
4097                 /*
4098                  * "mem" cannot be under rmdir() because we've already checked
4099                  * by cgroup_lock_live_cgroup() that it is not removed and we
4100                  * are still under the same cgroup_mutex. So we can postpone
4101                  * css_get().
4102                  */
4103                 if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4104                         goto one_by_one;
4105                 if (do_swap_account && res_counter_charge(&mem->memsw,
4106                                                 PAGE_SIZE * count, &dummy)) {
4107                         res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4108                         goto one_by_one;
4109                 }
4110                 mc.precharge += count;
4111                 VM_BUG_ON(test_bit(CSS_ROOT, &mem->css.flags));
4112                 WARN_ON_ONCE(count > INT_MAX);
4113                 __css_get(&mem->css, (int)count);
4114                 return ret;
4115         }
4116 one_by_one:
4117         /* fall back to one by one charge */
4118         while (count--) {
4119                 if (signal_pending(current)) {
4120                         ret = -EINTR;
4121                         break;
4122                 }
4123                 if (!batch_count--) {
4124                         batch_count = PRECHARGE_COUNT_AT_ONCE;
4125                         cond_resched();
4126                 }
4127                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
4128                 if (ret || !mem)
4129                         /* mem_cgroup_clear_mc() will do uncharge later */
4130                         return -ENOMEM;
4131                 mc.precharge++;
4132         }
4133         return ret;
4134 }
4135
4136 /**
4137  * is_target_pte_for_mc - check a pte whether it is valid for move charge
4138  * @vma: the vma the pte to be checked belongs
4139  * @addr: the address corresponding to the pte to be checked
4140  * @ptent: the pte to be checked
4141  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4142  *
4143  * Returns
4144  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4145  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4146  *     move charge. if @target is not NULL, the page is stored in target->page
4147  *     with extra refcnt got(Callers should handle it).
4148  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4149  *     target for charge migration. if @target is not NULL, the entry is stored
4150  *     in target->ent.
4151  *
4152  * Called with pte lock held.
4153  */
4154 union mc_target {
4155         struct page     *page;
4156         swp_entry_t     ent;
4157 };
4158
4159 enum mc_target_type {
4160         MC_TARGET_NONE, /* not used */
4161         MC_TARGET_PAGE,
4162         MC_TARGET_SWAP,
4163 };
4164
4165 static int is_target_pte_for_mc(struct vm_area_struct *vma,
4166                 unsigned long addr, pte_t ptent, union mc_target *target)
4167 {
4168         struct page *page = NULL;
4169         struct page_cgroup *pc;
4170         int ret = 0;
4171         swp_entry_t ent = { .val = 0 };
4172         int usage_count = 0;
4173         bool move_anon = test_bit(MOVE_CHARGE_TYPE_ANON,
4174                                         &mc.to->move_charge_at_immigrate);
4175
4176         if (!pte_present(ptent)) {
4177                 /* TODO: handle swap of shmes/tmpfs */
4178                 if (pte_none(ptent) || pte_file(ptent))
4179                         return 0;
4180                 else if (is_swap_pte(ptent)) {
4181                         ent = pte_to_swp_entry(ptent);
4182                         if (!move_anon || non_swap_entry(ent))
4183                                 return 0;
4184                         usage_count = mem_cgroup_count_swap_user(ent, &page);
4185                 }
4186         } else {
4187                 page = vm_normal_page(vma, addr, ptent);
4188                 if (!page || !page_mapped(page))
4189                         return 0;
4190                 /*
4191                  * TODO: We don't move charges of file(including shmem/tmpfs)
4192                  * pages for now.
4193                  */
4194                 if (!move_anon || !PageAnon(page))
4195                         return 0;
4196                 if (!get_page_unless_zero(page))
4197                         return 0;
4198                 usage_count = page_mapcount(page);
4199         }
4200         if (usage_count > 1) {
4201                 /*
4202                  * TODO: We don't move charges of shared(used by multiple
4203                  * processes) pages for now.
4204                  */
4205                 if (page)
4206                         put_page(page);
4207                 return 0;
4208         }
4209         if (page) {
4210                 pc = lookup_page_cgroup(page);
4211                 /*
4212                  * Do only loose check w/o page_cgroup lock.
4213                  * mem_cgroup_move_account() checks the pc is valid or not under
4214                  * the lock.
4215                  */
4216                 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4217                         ret = MC_TARGET_PAGE;
4218                         if (target)
4219                                 target->page = page;
4220                 }
4221                 if (!ret || !target)
4222                         put_page(page);
4223         }
4224         /* throught */
4225         if (ent.val && do_swap_account && !ret &&
4226                         css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4227                 ret = MC_TARGET_SWAP;
4228                 if (target)
4229                         target->ent = ent;
4230         }
4231         return ret;
4232 }
4233
4234 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4235                                         unsigned long addr, unsigned long end,
4236                                         struct mm_walk *walk)
4237 {
4238         struct vm_area_struct *vma = walk->private;
4239         pte_t *pte;
4240         spinlock_t *ptl;
4241
4242         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4243         for (; addr != end; pte++, addr += PAGE_SIZE)
4244                 if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4245                         mc.precharge++; /* increment precharge temporarily */
4246         pte_unmap_unlock(pte - 1, ptl);
4247         cond_resched();
4248
4249         return 0;
4250 }
4251
4252 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4253 {
4254         unsigned long precharge;
4255         struct vm_area_struct *vma;
4256
4257         down_read(&mm->mmap_sem);
4258         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4259                 struct mm_walk mem_cgroup_count_precharge_walk = {
4260                         .pmd_entry = mem_cgroup_count_precharge_pte_range,
4261                         .mm = mm,
4262                         .private = vma,
4263                 };
4264                 if (is_vm_hugetlb_page(vma))
4265                         continue;
4266                 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4267                 if (vma->vm_flags & VM_SHARED)
4268                         continue;
4269                 walk_page_range(vma->vm_start, vma->vm_end,
4270                                         &mem_cgroup_count_precharge_walk);
4271         }
4272         up_read(&mm->mmap_sem);
4273
4274         precharge = mc.precharge;
4275         mc.precharge = 0;
4276
4277         return precharge;
4278 }
4279
4280 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4281 {
4282         return mem_cgroup_do_precharge(mem_cgroup_count_precharge(mm));
4283 }
4284
4285 static void mem_cgroup_clear_mc(void)
4286 {
4287         /* we must uncharge all the leftover precharges from mc.to */
4288         if (mc.precharge) {
4289                 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
4290                 mc.precharge = 0;
4291                 memcg_oom_recover(mc.to);
4292         }
4293         /*
4294          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4295          * we must uncharge here.
4296          */
4297         if (mc.moved_charge) {
4298                 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4299                 mc.moved_charge = 0;
4300                 memcg_oom_recover(mc.from);
4301         }
4302         /* we must fixup refcnts and charges */
4303         if (mc.moved_swap) {
4304                 WARN_ON_ONCE(mc.moved_swap > INT_MAX);
4305                 /* uncharge swap account from the old cgroup */
4306                 if (!mem_cgroup_is_root(mc.from))
4307                         res_counter_uncharge(&mc.from->memsw,
4308                                                 PAGE_SIZE * mc.moved_swap);
4309                 __mem_cgroup_put(mc.from, mc.moved_swap);
4310
4311                 if (!mem_cgroup_is_root(mc.to)) {
4312                         /*
4313                          * we charged both to->res and to->memsw, so we should
4314                          * uncharge to->res.
4315                          */
4316                         res_counter_uncharge(&mc.to->res,
4317                                                 PAGE_SIZE * mc.moved_swap);
4318                         VM_BUG_ON(test_bit(CSS_ROOT, &mc.to->css.flags));
4319                         __css_put(&mc.to->css, mc.moved_swap);
4320                 }
4321                 /* we've already done mem_cgroup_get(mc.to) */
4322
4323                 mc.moved_swap = 0;
4324         }
4325         mc.from = NULL;
4326         mc.to = NULL;
4327         mc.moving_task = NULL;
4328         wake_up_all(&mc.waitq);
4329 }
4330
4331 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4332                                 struct cgroup *cgroup,
4333                                 struct task_struct *p,
4334                                 bool threadgroup)
4335 {
4336         int ret = 0;
4337         struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4338
4339         if (mem->move_charge_at_immigrate) {
4340                 struct mm_struct *mm;
4341                 struct mem_cgroup *from = mem_cgroup_from_task(p);
4342
4343                 VM_BUG_ON(from == mem);
4344
4345                 mm = get_task_mm(p);
4346                 if (!mm)
4347                         return 0;
4348                 /* We move charges only when we move a owner of the mm */
4349                 if (mm->owner == p) {
4350                         VM_BUG_ON(mc.from);
4351                         VM_BUG_ON(mc.to);
4352                         VM_BUG_ON(mc.precharge);
4353                         VM_BUG_ON(mc.moved_charge);
4354                         VM_BUG_ON(mc.moved_swap);
4355                         VM_BUG_ON(mc.moving_task);
4356                         mc.from = from;
4357                         mc.to = mem;
4358                         mc.precharge = 0;
4359                         mc.moved_charge = 0;
4360                         mc.moved_swap = 0;
4361                         mc.moving_task = current;
4362
4363                         ret = mem_cgroup_precharge_mc(mm);
4364                         if (ret)
4365                                 mem_cgroup_clear_mc();
4366                 }
4367                 mmput(mm);
4368         }
4369         return ret;
4370 }
4371
4372 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4373                                 struct cgroup *cgroup,
4374                                 struct task_struct *p,
4375                                 bool threadgroup)
4376 {
4377         mem_cgroup_clear_mc();
4378 }
4379
4380 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4381                                 unsigned long addr, unsigned long end,
4382                                 struct mm_walk *walk)
4383 {
4384         int ret = 0;
4385         struct vm_area_struct *vma = walk->private;
4386         pte_t *pte;
4387         spinlock_t *ptl;
4388
4389 retry:
4390         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4391         for (; addr != end; addr += PAGE_SIZE) {
4392                 pte_t ptent = *(pte++);
4393                 union mc_target target;
4394                 int type;
4395                 struct page *page;
4396                 struct page_cgroup *pc;
4397                 swp_entry_t ent;
4398
4399                 if (!mc.precharge)
4400                         break;
4401
4402                 type = is_target_pte_for_mc(vma, addr, ptent, &target);
4403                 switch (type) {
4404                 case MC_TARGET_PAGE:
4405                         page = target.page;
4406                         if (isolate_lru_page(page))
4407                                 goto put;
4408                         pc = lookup_page_cgroup(page);
4409                         if (!mem_cgroup_move_account(pc,
4410                                                 mc.from, mc.to, false)) {
4411                                 mc.precharge--;
4412                                 /* we uncharge from mc.from later. */
4413                                 mc.moved_charge++;
4414                         }
4415                         putback_lru_page(page);
4416 put:                    /* is_target_pte_for_mc() gets the page */
4417                         put_page(page);
4418                         break;
4419                 case MC_TARGET_SWAP:
4420                         ent = target.ent;
4421                         if (!mem_cgroup_move_swap_account(ent,
4422                                                 mc.from, mc.to, false)) {
4423                                 mc.precharge--;
4424                                 /* we fixup refcnts and charges later. */
4425                                 mc.moved_swap++;
4426                         }
4427                         break;
4428                 default:
4429                         break;
4430                 }
4431         }
4432         pte_unmap_unlock(pte - 1, ptl);
4433         cond_resched();
4434
4435         if (addr != end) {
4436                 /*
4437                  * We have consumed all precharges we got in can_attach().
4438                  * We try charge one by one, but don't do any additional
4439                  * charges to mc.to if we have failed in charge once in attach()
4440                  * phase.
4441                  */
4442                 ret = mem_cgroup_do_precharge(1);
4443                 if (!ret)
4444                         goto retry;
4445         }
4446
4447         return ret;
4448 }
4449
4450 static void mem_cgroup_move_charge(struct mm_struct *mm)
4451 {
4452         struct vm_area_struct *vma;
4453
4454         lru_add_drain_all();
4455         down_read(&mm->mmap_sem);
4456         for (vma = mm->mmap; vma; vma = vma->vm_next) {
4457                 int ret;
4458                 struct mm_walk mem_cgroup_move_charge_walk = {
4459                         .pmd_entry = mem_cgroup_move_charge_pte_range,
4460                         .mm = mm,
4461                         .private = vma,
4462                 };
4463                 if (is_vm_hugetlb_page(vma))
4464                         continue;
4465                 /* TODO: We don't move charges of shmem/tmpfs pages for now. */
4466                 if (vma->vm_flags & VM_SHARED)
4467                         continue;
4468                 ret = walk_page_range(vma->vm_start, vma->vm_end,
4469                                                 &mem_cgroup_move_charge_walk);
4470                 if (ret)
4471                         /*
4472                          * means we have consumed all precharges and failed in
4473                          * doing additional charge. Just abandon here.
4474                          */
4475                         break;
4476         }
4477         up_read(&mm->mmap_sem);
4478 }
4479
4480 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4481                                 struct cgroup *cont,
4482                                 struct cgroup *old_cont,
4483                                 struct task_struct *p,
4484                                 bool threadgroup)
4485 {
4486         struct mm_struct *mm;
4487
4488         if (!mc.to)
4489                 /* no need to move charge */
4490                 return;
4491
4492         mm = get_task_mm(p);
4493         if (mm) {
4494                 mem_cgroup_move_charge(mm);
4495                 mmput(mm);
4496         }
4497         mem_cgroup_clear_mc();
4498 }
4499 #else   /* !CONFIG_MMU */
4500 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4501                                 struct cgroup *cgroup,
4502                                 struct task_struct *p,
4503                                 bool threadgroup)
4504 {
4505         return 0;
4506 }
4507 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4508                                 struct cgroup *cgroup,
4509                                 struct task_struct *p,
4510                                 bool threadgroup)
4511 {
4512 }
4513 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4514                                 struct cgroup *cont,
4515                                 struct cgroup *old_cont,
4516                                 struct task_struct *p,
4517                                 bool threadgroup)
4518 {
4519 }
4520 #endif
4521
4522 struct cgroup_subsys mem_cgroup_subsys = {
4523         .name = "memory",
4524         .subsys_id = mem_cgroup_subsys_id,
4525         .create = mem_cgroup_create,
4526         .pre_destroy = mem_cgroup_pre_destroy,
4527         .destroy = mem_cgroup_destroy,
4528         .populate = mem_cgroup_populate,
4529         .can_attach = mem_cgroup_can_attach,
4530         .cancel_attach = mem_cgroup_cancel_attach,
4531         .attach = mem_cgroup_move_task,
4532         .early_init = 0,
4533         .use_id = 1,
4534 };
4535
4536 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4537
4538 static int __init disable_swap_account(char *s)
4539 {
4540         really_do_swap_account = 0;
4541         return 1;
4542 }
4543 __setup("noswapaccount", disable_swap_account);
4544 #endif