]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - mm/memcontrol.c
memcg: remove some redundant checks
[linux-3.10.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/limits.h>
31 #include <linux/mutex.h>
32 #include <linux/slab.h>
33 #include <linux/swap.h>
34 #include <linux/spinlock.h>
35 #include <linux/fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/vmalloc.h>
38 #include <linux/mm_inline.h>
39 #include <linux/page_cgroup.h>
40 #include "internal.h"
41
42 #include <asm/uaccess.h>
43
44 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
45 #define MEM_CGROUP_RECLAIM_RETRIES      5
46
47 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
48 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
49 int do_swap_account __read_mostly;
50 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
51 #else
52 #define do_swap_account         (0)
53 #endif
54
55 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
56
57 /*
58  * Statistics for memory cgroup.
59  */
60 enum mem_cgroup_stat_index {
61         /*
62          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
63          */
64         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
65         MEM_CGROUP_STAT_RSS,       /* # of pages charged as anon rss */
66         MEM_CGROUP_STAT_MAPPED_FILE,  /* # of pages charged as file rss */
67         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
68         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
69
70         MEM_CGROUP_STAT_NSTATS,
71 };
72
73 struct mem_cgroup_stat_cpu {
74         s64 count[MEM_CGROUP_STAT_NSTATS];
75 } ____cacheline_aligned_in_smp;
76
77 struct mem_cgroup_stat {
78         struct mem_cgroup_stat_cpu cpustat[0];
79 };
80
81 /*
82  * For accounting under irq disable, no need for increment preempt count.
83  */
84 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
85                 enum mem_cgroup_stat_index idx, int val)
86 {
87         stat->count[idx] += val;
88 }
89
90 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
91                 enum mem_cgroup_stat_index idx)
92 {
93         int cpu;
94         s64 ret = 0;
95         for_each_possible_cpu(cpu)
96                 ret += stat->cpustat[cpu].count[idx];
97         return ret;
98 }
99
100 static s64 mem_cgroup_local_usage(struct mem_cgroup_stat *stat)
101 {
102         s64 ret;
103
104         ret = mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_CACHE);
105         ret += mem_cgroup_read_stat(stat, MEM_CGROUP_STAT_RSS);
106         return ret;
107 }
108
109 /*
110  * per-zone information in memory controller.
111  */
112 struct mem_cgroup_per_zone {
113         /*
114          * spin_lock to protect the per cgroup LRU
115          */
116         struct list_head        lists[NR_LRU_LISTS];
117         unsigned long           count[NR_LRU_LISTS];
118
119         struct zone_reclaim_stat reclaim_stat;
120 };
121 /* Macro for accessing counter */
122 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
123
124 struct mem_cgroup_per_node {
125         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
126 };
127
128 struct mem_cgroup_lru_info {
129         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
130 };
131
132 /*
133  * The memory controller data structure. The memory controller controls both
134  * page cache and RSS per cgroup. We would eventually like to provide
135  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
136  * to help the administrator determine what knobs to tune.
137  *
138  * TODO: Add a water mark for the memory controller. Reclaim will begin when
139  * we hit the water mark. May be even add a low water mark, such that
140  * no reclaim occurs from a cgroup at it's low water mark, this is
141  * a feature that will be implemented much later in the future.
142  */
143 struct mem_cgroup {
144         struct cgroup_subsys_state css;
145         /*
146          * the counter to account for memory usage
147          */
148         struct res_counter res;
149         /*
150          * the counter to account for mem+swap usage.
151          */
152         struct res_counter memsw;
153         /*
154          * Per cgroup active and inactive list, similar to the
155          * per zone LRU lists.
156          */
157         struct mem_cgroup_lru_info info;
158
159         /*
160           protect against reclaim related member.
161         */
162         spinlock_t reclaim_param_lock;
163
164         int     prev_priority;  /* for recording reclaim priority */
165
166         /*
167          * While reclaiming in a hiearchy, we cache the last child we
168          * reclaimed from.
169          */
170         int last_scanned_child;
171         /*
172          * Should the accounting and control be hierarchical, per subtree?
173          */
174         bool use_hierarchy;
175         unsigned long   last_oom_jiffies;
176         atomic_t        refcnt;
177
178         unsigned int    swappiness;
179
180         /*
181          * statistics. This must be placed at the end of memcg.
182          */
183         struct mem_cgroup_stat stat;
184 };
185
186 enum charge_type {
187         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
188         MEM_CGROUP_CHARGE_TYPE_MAPPED,
189         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
190         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
191         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
192         NR_CHARGE_TYPE,
193 };
194
195 /* only for here (for easy reading.) */
196 #define PCGF_CACHE      (1UL << PCG_CACHE)
197 #define PCGF_USED       (1UL << PCG_USED)
198 #define PCGF_LOCK       (1UL << PCG_LOCK)
199 static const unsigned long
200 pcg_default_flags[NR_CHARGE_TYPE] = {
201         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
202         PCGF_USED | PCGF_LOCK, /* Anon */
203         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
204         0, /* FORCE */
205 };
206
207 /* for encoding cft->private value on file */
208 #define _MEM                    (0)
209 #define _MEMSWAP                (1)
210 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
211 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
212 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
213
214 static void mem_cgroup_get(struct mem_cgroup *mem);
215 static void mem_cgroup_put(struct mem_cgroup *mem);
216 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
217
218 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
219                                          struct page_cgroup *pc,
220                                          bool charge)
221 {
222         int val = (charge)? 1 : -1;
223         struct mem_cgroup_stat *stat = &mem->stat;
224         struct mem_cgroup_stat_cpu *cpustat;
225         int cpu = get_cpu();
226
227         cpustat = &stat->cpustat[cpu];
228         if (PageCgroupCache(pc))
229                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
230         else
231                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
232
233         if (charge)
234                 __mem_cgroup_stat_add_safe(cpustat,
235                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
236         else
237                 __mem_cgroup_stat_add_safe(cpustat,
238                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
239         put_cpu();
240 }
241
242 static struct mem_cgroup_per_zone *
243 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
244 {
245         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
246 }
247
248 static struct mem_cgroup_per_zone *
249 page_cgroup_zoneinfo(struct page_cgroup *pc)
250 {
251         struct mem_cgroup *mem = pc->mem_cgroup;
252         int nid = page_cgroup_nid(pc);
253         int zid = page_cgroup_zid(pc);
254
255         if (!mem)
256                 return NULL;
257
258         return mem_cgroup_zoneinfo(mem, nid, zid);
259 }
260
261 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
262                                         enum lru_list idx)
263 {
264         int nid, zid;
265         struct mem_cgroup_per_zone *mz;
266         u64 total = 0;
267
268         for_each_online_node(nid)
269                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
270                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
271                         total += MEM_CGROUP_ZSTAT(mz, idx);
272                 }
273         return total;
274 }
275
276 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
277 {
278         return container_of(cgroup_subsys_state(cont,
279                                 mem_cgroup_subsys_id), struct mem_cgroup,
280                                 css);
281 }
282
283 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
284 {
285         /*
286          * mm_update_next_owner() may clear mm->owner to NULL
287          * if it races with swapoff, page migration, etc.
288          * So this can be called with p == NULL.
289          */
290         if (unlikely(!p))
291                 return NULL;
292
293         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
294                                 struct mem_cgroup, css);
295 }
296
297 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
298 {
299         struct mem_cgroup *mem = NULL;
300
301         if (!mm)
302                 return NULL;
303         /*
304          * Because we have no locks, mm->owner's may be being moved to other
305          * cgroup. We use css_tryget() here even if this looks
306          * pessimistic (rather than adding locks here).
307          */
308         rcu_read_lock();
309         do {
310                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
311                 if (unlikely(!mem))
312                         break;
313         } while (!css_tryget(&mem->css));
314         rcu_read_unlock();
315         return mem;
316 }
317
318 /*
319  * Call callback function against all cgroup under hierarchy tree.
320  */
321 static int mem_cgroup_walk_tree(struct mem_cgroup *root, void *data,
322                           int (*func)(struct mem_cgroup *, void *))
323 {
324         int found, ret, nextid;
325         struct cgroup_subsys_state *css;
326         struct mem_cgroup *mem;
327
328         if (!root->use_hierarchy)
329                 return (*func)(root, data);
330
331         nextid = 1;
332         do {
333                 ret = 0;
334                 mem = NULL;
335
336                 rcu_read_lock();
337                 css = css_get_next(&mem_cgroup_subsys, nextid, &root->css,
338                                    &found);
339                 if (css && css_tryget(css))
340                         mem = container_of(css, struct mem_cgroup, css);
341                 rcu_read_unlock();
342
343                 if (mem) {
344                         ret = (*func)(mem, data);
345                         css_put(&mem->css);
346                 }
347                 nextid = found + 1;
348         } while (!ret && css);
349
350         return ret;
351 }
352
353 /*
354  * Following LRU functions are allowed to be used without PCG_LOCK.
355  * Operations are called by routine of global LRU independently from memcg.
356  * What we have to take care of here is validness of pc->mem_cgroup.
357  *
358  * Changes to pc->mem_cgroup happens when
359  * 1. charge
360  * 2. moving account
361  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
362  * It is added to LRU before charge.
363  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
364  * When moving account, the page is not on LRU. It's isolated.
365  */
366
367 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
368 {
369         struct page_cgroup *pc;
370         struct mem_cgroup *mem;
371         struct mem_cgroup_per_zone *mz;
372
373         if (mem_cgroup_disabled())
374                 return;
375         pc = lookup_page_cgroup(page);
376         /* can happen while we handle swapcache. */
377         if (list_empty(&pc->lru) || !pc->mem_cgroup)
378                 return;
379         /*
380          * We don't check PCG_USED bit. It's cleared when the "page" is finally
381          * removed from global LRU.
382          */
383         mz = page_cgroup_zoneinfo(pc);
384         mem = pc->mem_cgroup;
385         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
386         list_del_init(&pc->lru);
387         return;
388 }
389
390 void mem_cgroup_del_lru(struct page *page)
391 {
392         mem_cgroup_del_lru_list(page, page_lru(page));
393 }
394
395 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
396 {
397         struct mem_cgroup_per_zone *mz;
398         struct page_cgroup *pc;
399
400         if (mem_cgroup_disabled())
401                 return;
402
403         pc = lookup_page_cgroup(page);
404         /*
405          * Used bit is set without atomic ops but after smp_wmb().
406          * For making pc->mem_cgroup visible, insert smp_rmb() here.
407          */
408         smp_rmb();
409         /* unused page is not rotated. */
410         if (!PageCgroupUsed(pc))
411                 return;
412         mz = page_cgroup_zoneinfo(pc);
413         list_move(&pc->lru, &mz->lists[lru]);
414 }
415
416 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
417 {
418         struct page_cgroup *pc;
419         struct mem_cgroup_per_zone *mz;
420
421         if (mem_cgroup_disabled())
422                 return;
423         pc = lookup_page_cgroup(page);
424         /*
425          * Used bit is set without atomic ops but after smp_wmb().
426          * For making pc->mem_cgroup visible, insert smp_rmb() here.
427          */
428         smp_rmb();
429         if (!PageCgroupUsed(pc))
430                 return;
431
432         mz = page_cgroup_zoneinfo(pc);
433         MEM_CGROUP_ZSTAT(mz, lru) += 1;
434         list_add(&pc->lru, &mz->lists[lru]);
435 }
436
437 /*
438  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
439  * lru because the page may.be reused after it's fully uncharged (because of
440  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
441  * it again. This function is only used to charge SwapCache. It's done under
442  * lock_page and expected that zone->lru_lock is never held.
443  */
444 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
445 {
446         unsigned long flags;
447         struct zone *zone = page_zone(page);
448         struct page_cgroup *pc = lookup_page_cgroup(page);
449
450         spin_lock_irqsave(&zone->lru_lock, flags);
451         /*
452          * Forget old LRU when this page_cgroup is *not* used. This Used bit
453          * is guarded by lock_page() because the page is SwapCache.
454          */
455         if (!PageCgroupUsed(pc))
456                 mem_cgroup_del_lru_list(page, page_lru(page));
457         spin_unlock_irqrestore(&zone->lru_lock, flags);
458 }
459
460 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
461 {
462         unsigned long flags;
463         struct zone *zone = page_zone(page);
464         struct page_cgroup *pc = lookup_page_cgroup(page);
465
466         spin_lock_irqsave(&zone->lru_lock, flags);
467         /* link when the page is linked to LRU but page_cgroup isn't */
468         if (PageLRU(page) && list_empty(&pc->lru))
469                 mem_cgroup_add_lru_list(page, page_lru(page));
470         spin_unlock_irqrestore(&zone->lru_lock, flags);
471 }
472
473
474 void mem_cgroup_move_lists(struct page *page,
475                            enum lru_list from, enum lru_list to)
476 {
477         if (mem_cgroup_disabled())
478                 return;
479         mem_cgroup_del_lru_list(page, from);
480         mem_cgroup_add_lru_list(page, to);
481 }
482
483 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
484 {
485         int ret;
486         struct mem_cgroup *curr = NULL;
487
488         task_lock(task);
489         rcu_read_lock();
490         curr = try_get_mem_cgroup_from_mm(task->mm);
491         rcu_read_unlock();
492         task_unlock(task);
493         if (!curr)
494                 return 0;
495         if (curr->use_hierarchy)
496                 ret = css_is_ancestor(&curr->css, &mem->css);
497         else
498                 ret = (curr == mem);
499         css_put(&curr->css);
500         return ret;
501 }
502
503 /*
504  * prev_priority control...this will be used in memory reclaim path.
505  */
506 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
507 {
508         int prev_priority;
509
510         spin_lock(&mem->reclaim_param_lock);
511         prev_priority = mem->prev_priority;
512         spin_unlock(&mem->reclaim_param_lock);
513
514         return prev_priority;
515 }
516
517 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
518 {
519         spin_lock(&mem->reclaim_param_lock);
520         if (priority < mem->prev_priority)
521                 mem->prev_priority = priority;
522         spin_unlock(&mem->reclaim_param_lock);
523 }
524
525 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
526 {
527         spin_lock(&mem->reclaim_param_lock);
528         mem->prev_priority = priority;
529         spin_unlock(&mem->reclaim_param_lock);
530 }
531
532 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
533 {
534         unsigned long active;
535         unsigned long inactive;
536         unsigned long gb;
537         unsigned long inactive_ratio;
538
539         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
540         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
541
542         gb = (inactive + active) >> (30 - PAGE_SHIFT);
543         if (gb)
544                 inactive_ratio = int_sqrt(10 * gb);
545         else
546                 inactive_ratio = 1;
547
548         if (present_pages) {
549                 present_pages[0] = inactive;
550                 present_pages[1] = active;
551         }
552
553         return inactive_ratio;
554 }
555
556 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
557 {
558         unsigned long active;
559         unsigned long inactive;
560         unsigned long present_pages[2];
561         unsigned long inactive_ratio;
562
563         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
564
565         inactive = present_pages[0];
566         active = present_pages[1];
567
568         if (inactive * inactive_ratio < active)
569                 return 1;
570
571         return 0;
572 }
573
574 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
575 {
576         unsigned long active;
577         unsigned long inactive;
578
579         inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
580         active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
581
582         return (active > inactive);
583 }
584
585 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
586                                        struct zone *zone,
587                                        enum lru_list lru)
588 {
589         int nid = zone->zone_pgdat->node_id;
590         int zid = zone_idx(zone);
591         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
592
593         return MEM_CGROUP_ZSTAT(mz, lru);
594 }
595
596 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
597                                                       struct zone *zone)
598 {
599         int nid = zone->zone_pgdat->node_id;
600         int zid = zone_idx(zone);
601         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
602
603         return &mz->reclaim_stat;
604 }
605
606 struct zone_reclaim_stat *
607 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
608 {
609         struct page_cgroup *pc;
610         struct mem_cgroup_per_zone *mz;
611
612         if (mem_cgroup_disabled())
613                 return NULL;
614
615         pc = lookup_page_cgroup(page);
616         /*
617          * Used bit is set without atomic ops but after smp_wmb().
618          * For making pc->mem_cgroup visible, insert smp_rmb() here.
619          */
620         smp_rmb();
621         if (!PageCgroupUsed(pc))
622                 return NULL;
623
624         mz = page_cgroup_zoneinfo(pc);
625         if (!mz)
626                 return NULL;
627
628         return &mz->reclaim_stat;
629 }
630
631 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
632                                         struct list_head *dst,
633                                         unsigned long *scanned, int order,
634                                         int mode, struct zone *z,
635                                         struct mem_cgroup *mem_cont,
636                                         int active, int file)
637 {
638         unsigned long nr_taken = 0;
639         struct page *page;
640         unsigned long scan;
641         LIST_HEAD(pc_list);
642         struct list_head *src;
643         struct page_cgroup *pc, *tmp;
644         int nid = z->zone_pgdat->node_id;
645         int zid = zone_idx(z);
646         struct mem_cgroup_per_zone *mz;
647         int lru = LRU_FILE * !!file + !!active;
648
649         BUG_ON(!mem_cont);
650         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
651         src = &mz->lists[lru];
652
653         scan = 0;
654         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
655                 if (scan >= nr_to_scan)
656                         break;
657
658                 page = pc->page;
659                 if (unlikely(!PageCgroupUsed(pc)))
660                         continue;
661                 if (unlikely(!PageLRU(page)))
662                         continue;
663
664                 scan++;
665                 if (__isolate_lru_page(page, mode, file) == 0) {
666                         list_move(&page->lru, dst);
667                         nr_taken++;
668                 }
669         }
670
671         *scanned = scan;
672         return nr_taken;
673 }
674
675 #define mem_cgroup_from_res_counter(counter, member)    \
676         container_of(counter, struct mem_cgroup, member)
677
678 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
679 {
680         if (do_swap_account) {
681                 if (res_counter_check_under_limit(&mem->res) &&
682                         res_counter_check_under_limit(&mem->memsw))
683                         return true;
684         } else
685                 if (res_counter_check_under_limit(&mem->res))
686                         return true;
687         return false;
688 }
689
690 static unsigned int get_swappiness(struct mem_cgroup *memcg)
691 {
692         struct cgroup *cgrp = memcg->css.cgroup;
693         unsigned int swappiness;
694
695         /* root ? */
696         if (cgrp->parent == NULL)
697                 return vm_swappiness;
698
699         spin_lock(&memcg->reclaim_param_lock);
700         swappiness = memcg->swappiness;
701         spin_unlock(&memcg->reclaim_param_lock);
702
703         return swappiness;
704 }
705
706 static int mem_cgroup_count_children_cb(struct mem_cgroup *mem, void *data)
707 {
708         int *val = data;
709         (*val)++;
710         return 0;
711 }
712
713 /**
714  * mem_cgroup_print_mem_info: Called from OOM with tasklist_lock held in read mode.
715  * @memcg: The memory cgroup that went over limit
716  * @p: Task that is going to be killed
717  *
718  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
719  * enabled
720  */
721 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
722 {
723         struct cgroup *task_cgrp;
724         struct cgroup *mem_cgrp;
725         /*
726          * Need a buffer in BSS, can't rely on allocations. The code relies
727          * on the assumption that OOM is serialized for memory controller.
728          * If this assumption is broken, revisit this code.
729          */
730         static char memcg_name[PATH_MAX];
731         int ret;
732
733         if (!memcg)
734                 return;
735
736
737         rcu_read_lock();
738
739         mem_cgrp = memcg->css.cgroup;
740         task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
741
742         ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
743         if (ret < 0) {
744                 /*
745                  * Unfortunately, we are unable to convert to a useful name
746                  * But we'll still print out the usage information
747                  */
748                 rcu_read_unlock();
749                 goto done;
750         }
751         rcu_read_unlock();
752
753         printk(KERN_INFO "Task in %s killed", memcg_name);
754
755         rcu_read_lock();
756         ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
757         if (ret < 0) {
758                 rcu_read_unlock();
759                 goto done;
760         }
761         rcu_read_unlock();
762
763         /*
764          * Continues from above, so we don't need an KERN_ level
765          */
766         printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
767 done:
768
769         printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
770                 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
771                 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
772                 res_counter_read_u64(&memcg->res, RES_FAILCNT));
773         printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
774                 "failcnt %llu\n",
775                 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
776                 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
777                 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
778 }
779
780 /*
781  * This function returns the number of memcg under hierarchy tree. Returns
782  * 1(self count) if no children.
783  */
784 static int mem_cgroup_count_children(struct mem_cgroup *mem)
785 {
786         int num = 0;
787         mem_cgroup_walk_tree(mem, &num, mem_cgroup_count_children_cb);
788         return num;
789 }
790
791 /*
792  * Visit the first child (need not be the first child as per the ordering
793  * of the cgroup list, since we track last_scanned_child) of @mem and use
794  * that to reclaim free pages from.
795  */
796 static struct mem_cgroup *
797 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
798 {
799         struct mem_cgroup *ret = NULL;
800         struct cgroup_subsys_state *css;
801         int nextid, found;
802
803         if (!root_mem->use_hierarchy) {
804                 css_get(&root_mem->css);
805                 ret = root_mem;
806         }
807
808         while (!ret) {
809                 rcu_read_lock();
810                 nextid = root_mem->last_scanned_child + 1;
811                 css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
812                                    &found);
813                 if (css && css_tryget(css))
814                         ret = container_of(css, struct mem_cgroup, css);
815
816                 rcu_read_unlock();
817                 /* Updates scanning parameter */
818                 spin_lock(&root_mem->reclaim_param_lock);
819                 if (!css) {
820                         /* this means start scan from ID:1 */
821                         root_mem->last_scanned_child = 0;
822                 } else
823                         root_mem->last_scanned_child = found;
824                 spin_unlock(&root_mem->reclaim_param_lock);
825         }
826
827         return ret;
828 }
829
830 /*
831  * Scan the hierarchy if needed to reclaim memory. We remember the last child
832  * we reclaimed from, so that we don't end up penalizing one child extensively
833  * based on its position in the children list.
834  *
835  * root_mem is the original ancestor that we've been reclaim from.
836  *
837  * We give up and return to the caller when we visit root_mem twice.
838  * (other groups can be removed while we're walking....)
839  *
840  * If shrink==true, for avoiding to free too much, this returns immedieately.
841  */
842 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
843                                    gfp_t gfp_mask, bool noswap, bool shrink)
844 {
845         struct mem_cgroup *victim;
846         int ret, total = 0;
847         int loop = 0;
848
849         while (loop < 2) {
850                 victim = mem_cgroup_select_victim(root_mem);
851                 if (victim == root_mem)
852                         loop++;
853                 if (!mem_cgroup_local_usage(&victim->stat)) {
854                         /* this cgroup's local usage == 0 */
855                         css_put(&victim->css);
856                         continue;
857                 }
858                 /* we use swappiness of local cgroup */
859                 ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, noswap,
860                                                    get_swappiness(victim));
861                 css_put(&victim->css);
862                 /*
863                  * At shrinking usage, we can't check we should stop here or
864                  * reclaim more. It's depends on callers. last_scanned_child
865                  * will work enough for keeping fairness under tree.
866                  */
867                 if (shrink)
868                         return ret;
869                 total += ret;
870                 if (mem_cgroup_check_under_limit(root_mem))
871                         return 1 + total;
872         }
873         return total;
874 }
875
876 bool mem_cgroup_oom_called(struct task_struct *task)
877 {
878         bool ret = false;
879         struct mem_cgroup *mem;
880         struct mm_struct *mm;
881
882         rcu_read_lock();
883         mm = task->mm;
884         if (!mm)
885                 mm = &init_mm;
886         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
887         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
888                 ret = true;
889         rcu_read_unlock();
890         return ret;
891 }
892
893 static int record_last_oom_cb(struct mem_cgroup *mem, void *data)
894 {
895         mem->last_oom_jiffies = jiffies;
896         return 0;
897 }
898
899 static void record_last_oom(struct mem_cgroup *mem)
900 {
901         mem_cgroup_walk_tree(mem, NULL, record_last_oom_cb);
902 }
903
904 /*
905  * Currently used to update mapped file statistics, but the routine can be
906  * generalized to update other statistics as well.
907  */
908 void mem_cgroup_update_mapped_file_stat(struct page *page, int val)
909 {
910         struct mem_cgroup *mem;
911         struct mem_cgroup_stat *stat;
912         struct mem_cgroup_stat_cpu *cpustat;
913         int cpu;
914         struct page_cgroup *pc;
915
916         if (!page_is_file_cache(page))
917                 return;
918
919         pc = lookup_page_cgroup(page);
920         if (unlikely(!pc))
921                 return;
922
923         lock_page_cgroup(pc);
924         mem = pc->mem_cgroup;
925         if (!mem)
926                 goto done;
927
928         if (!PageCgroupUsed(pc))
929                 goto done;
930
931         /*
932          * Preemption is already disabled, we don't need get_cpu()
933          */
934         cpu = smp_processor_id();
935         stat = &mem->stat;
936         cpustat = &stat->cpustat[cpu];
937
938         __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE, val);
939 done:
940         unlock_page_cgroup(pc);
941 }
942
943 /*
944  * Unlike exported interface, "oom" parameter is added. if oom==true,
945  * oom-killer can be invoked.
946  */
947 static int __mem_cgroup_try_charge(struct mm_struct *mm,
948                         gfp_t gfp_mask, struct mem_cgroup **memcg,
949                         bool oom)
950 {
951         struct mem_cgroup *mem, *mem_over_limit;
952         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
953         struct res_counter *fail_res;
954
955         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
956                 /* Don't account this! */
957                 *memcg = NULL;
958                 return 0;
959         }
960
961         /*
962          * We always charge the cgroup the mm_struct belongs to.
963          * The mm_struct's mem_cgroup changes on task migration if the
964          * thread group leader migrates. It's possible that mm is not
965          * set, if so charge the init_mm (happens for pagecache usage).
966          */
967         mem = *memcg;
968         if (likely(!mem)) {
969                 mem = try_get_mem_cgroup_from_mm(mm);
970                 *memcg = mem;
971         } else {
972                 css_get(&mem->css);
973         }
974         if (unlikely(!mem))
975                 return 0;
976
977         VM_BUG_ON(css_is_removed(&mem->css));
978
979         while (1) {
980                 int ret;
981                 bool noswap = false;
982
983                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
984                 if (likely(!ret)) {
985                         if (!do_swap_account)
986                                 break;
987                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
988                                                         &fail_res);
989                         if (likely(!ret))
990                                 break;
991                         /* mem+swap counter fails */
992                         res_counter_uncharge(&mem->res, PAGE_SIZE);
993                         noswap = true;
994                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
995                                                                         memsw);
996                 } else
997                         /* mem counter fails */
998                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
999                                                                         res);
1000
1001                 if (!(gfp_mask & __GFP_WAIT))
1002                         goto nomem;
1003
1004                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
1005                                                         noswap, false);
1006                 if (ret)
1007                         continue;
1008
1009                 /*
1010                  * try_to_free_mem_cgroup_pages() might not give us a full
1011                  * picture of reclaim. Some pages are reclaimed and might be
1012                  * moved to swap cache or just unmapped from the cgroup.
1013                  * Check the limit again to see if the reclaim reduced the
1014                  * current usage of the cgroup before giving up
1015                  *
1016                  */
1017                 if (mem_cgroup_check_under_limit(mem_over_limit))
1018                         continue;
1019
1020                 if (!nr_retries--) {
1021                         if (oom) {
1022                                 mutex_lock(&memcg_tasklist);
1023                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
1024                                 mutex_unlock(&memcg_tasklist);
1025                                 record_last_oom(mem_over_limit);
1026                         }
1027                         goto nomem;
1028                 }
1029         }
1030         return 0;
1031 nomem:
1032         css_put(&mem->css);
1033         return -ENOMEM;
1034 }
1035
1036
1037 /*
1038  * A helper function to get mem_cgroup from ID. must be called under
1039  * rcu_read_lock(). The caller must check css_is_removed() or some if
1040  * it's concern. (dropping refcnt from swap can be called against removed
1041  * memcg.)
1042  */
1043 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
1044 {
1045         struct cgroup_subsys_state *css;
1046
1047         /* ID 0 is unused ID */
1048         if (!id)
1049                 return NULL;
1050         css = css_lookup(&mem_cgroup_subsys, id);
1051         if (!css)
1052                 return NULL;
1053         return container_of(css, struct mem_cgroup, css);
1054 }
1055
1056 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
1057 {
1058         struct mem_cgroup *mem;
1059         struct page_cgroup *pc;
1060         unsigned short id;
1061         swp_entry_t ent;
1062
1063         VM_BUG_ON(!PageLocked(page));
1064
1065         if (!PageSwapCache(page))
1066                 return NULL;
1067
1068         pc = lookup_page_cgroup(page);
1069         lock_page_cgroup(pc);
1070         if (PageCgroupUsed(pc)) {
1071                 mem = pc->mem_cgroup;
1072                 if (mem && !css_tryget(&mem->css))
1073                         mem = NULL;
1074         } else {
1075                 ent.val = page_private(page);
1076                 id = lookup_swap_cgroup(ent);
1077                 rcu_read_lock();
1078                 mem = mem_cgroup_lookup(id);
1079                 if (mem && !css_tryget(&mem->css))
1080                         mem = NULL;
1081                 rcu_read_unlock();
1082         }
1083         unlock_page_cgroup(pc);
1084         return mem;
1085 }
1086
1087 /*
1088  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
1089  * USED state. If already USED, uncharge and return.
1090  */
1091
1092 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1093                                      struct page_cgroup *pc,
1094                                      enum charge_type ctype)
1095 {
1096         /* try_charge() can return NULL to *memcg, taking care of it. */
1097         if (!mem)
1098                 return;
1099
1100         lock_page_cgroup(pc);
1101         if (unlikely(PageCgroupUsed(pc))) {
1102                 unlock_page_cgroup(pc);
1103                 res_counter_uncharge(&mem->res, PAGE_SIZE);
1104                 if (do_swap_account)
1105                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1106                 css_put(&mem->css);
1107                 return;
1108         }
1109         pc->mem_cgroup = mem;
1110         smp_wmb();
1111         pc->flags = pcg_default_flags[ctype];
1112
1113         mem_cgroup_charge_statistics(mem, pc, true);
1114
1115         unlock_page_cgroup(pc);
1116 }
1117
1118 /**
1119  * mem_cgroup_move_account - move account of the page
1120  * @pc: page_cgroup of the page.
1121  * @from: mem_cgroup which the page is moved from.
1122  * @to: mem_cgroup which the page is moved to. @from != @to.
1123  *
1124  * The caller must confirm following.
1125  * - page is not on LRU (isolate_page() is useful.)
1126  *
1127  * returns 0 at success,
1128  * returns -EBUSY when lock is busy or "pc" is unstable.
1129  *
1130  * This function does "uncharge" from old cgroup but doesn't do "charge" to
1131  * new cgroup. It should be done by a caller.
1132  */
1133
1134 static int mem_cgroup_move_account(struct page_cgroup *pc,
1135         struct mem_cgroup *from, struct mem_cgroup *to)
1136 {
1137         struct mem_cgroup_per_zone *from_mz, *to_mz;
1138         int nid, zid;
1139         int ret = -EBUSY;
1140         struct page *page;
1141         int cpu;
1142         struct mem_cgroup_stat *stat;
1143         struct mem_cgroup_stat_cpu *cpustat;
1144
1145         VM_BUG_ON(from == to);
1146         VM_BUG_ON(PageLRU(pc->page));
1147
1148         nid = page_cgroup_nid(pc);
1149         zid = page_cgroup_zid(pc);
1150         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
1151         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
1152
1153         if (!trylock_page_cgroup(pc))
1154                 return ret;
1155
1156         if (!PageCgroupUsed(pc))
1157                 goto out;
1158
1159         if (pc->mem_cgroup != from)
1160                 goto out;
1161
1162         res_counter_uncharge(&from->res, PAGE_SIZE);
1163         mem_cgroup_charge_statistics(from, pc, false);
1164
1165         page = pc->page;
1166         if (page_is_file_cache(page) && page_mapped(page)) {
1167                 cpu = smp_processor_id();
1168                 /* Update mapped_file data for mem_cgroup "from" */
1169                 stat = &from->stat;
1170                 cpustat = &stat->cpustat[cpu];
1171                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1172                                                 -1);
1173
1174                 /* Update mapped_file data for mem_cgroup "to" */
1175                 stat = &to->stat;
1176                 cpustat = &stat->cpustat[cpu];
1177                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_MAPPED_FILE,
1178                                                 1);
1179         }
1180
1181         if (do_swap_account)
1182                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
1183         css_put(&from->css);
1184
1185         css_get(&to->css);
1186         pc->mem_cgroup = to;
1187         mem_cgroup_charge_statistics(to, pc, true);
1188         ret = 0;
1189 out:
1190         unlock_page_cgroup(pc);
1191         return ret;
1192 }
1193
1194 /*
1195  * move charges to its parent.
1196  */
1197
1198 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1199                                   struct mem_cgroup *child,
1200                                   gfp_t gfp_mask)
1201 {
1202         struct page *page = pc->page;
1203         struct cgroup *cg = child->css.cgroup;
1204         struct cgroup *pcg = cg->parent;
1205         struct mem_cgroup *parent;
1206         int ret;
1207
1208         /* Is ROOT ? */
1209         if (!pcg)
1210                 return -EINVAL;
1211
1212
1213         parent = mem_cgroup_from_cont(pcg);
1214
1215
1216         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1217         if (ret || !parent)
1218                 return ret;
1219
1220         if (!get_page_unless_zero(page)) {
1221                 ret = -EBUSY;
1222                 goto uncharge;
1223         }
1224
1225         ret = isolate_lru_page(page);
1226
1227         if (ret)
1228                 goto cancel;
1229
1230         ret = mem_cgroup_move_account(pc, child, parent);
1231
1232         putback_lru_page(page);
1233         if (!ret) {
1234                 put_page(page);
1235                 /* drop extra refcnt by try_charge() */
1236                 css_put(&parent->css);
1237                 return 0;
1238         }
1239
1240 cancel:
1241         put_page(page);
1242 uncharge:
1243         /* drop extra refcnt by try_charge() */
1244         css_put(&parent->css);
1245         /* uncharge if move fails */
1246         res_counter_uncharge(&parent->res, PAGE_SIZE);
1247         if (do_swap_account)
1248                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1249         return ret;
1250 }
1251
1252 /*
1253  * Charge the memory controller for page usage.
1254  * Return
1255  * 0 if the charge was successful
1256  * < 0 if the cgroup is over its limit
1257  */
1258 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1259                                 gfp_t gfp_mask, enum charge_type ctype,
1260                                 struct mem_cgroup *memcg)
1261 {
1262         struct mem_cgroup *mem;
1263         struct page_cgroup *pc;
1264         int ret;
1265
1266         pc = lookup_page_cgroup(page);
1267         /* can happen at boot */
1268         if (unlikely(!pc))
1269                 return 0;
1270         prefetchw(pc);
1271
1272         mem = memcg;
1273         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1274         if (ret || !mem)
1275                 return ret;
1276
1277         __mem_cgroup_commit_charge(mem, pc, ctype);
1278         return 0;
1279 }
1280
1281 int mem_cgroup_newpage_charge(struct page *page,
1282                               struct mm_struct *mm, gfp_t gfp_mask)
1283 {
1284         if (mem_cgroup_disabled())
1285                 return 0;
1286         if (PageCompound(page))
1287                 return 0;
1288         /*
1289          * If already mapped, we don't have to account.
1290          * If page cache, page->mapping has address_space.
1291          * But page->mapping may have out-of-use anon_vma pointer,
1292          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1293          * is NULL.
1294          */
1295         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1296                 return 0;
1297         if (unlikely(!mm))
1298                 mm = &init_mm;
1299         return mem_cgroup_charge_common(page, mm, gfp_mask,
1300                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1301 }
1302
1303 static void
1304 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1305                                         enum charge_type ctype);
1306
1307 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1308                                 gfp_t gfp_mask)
1309 {
1310         struct mem_cgroup *mem = NULL;
1311         int ret;
1312
1313         if (mem_cgroup_disabled())
1314                 return 0;
1315         if (PageCompound(page))
1316                 return 0;
1317         /*
1318          * Corner case handling. This is called from add_to_page_cache()
1319          * in usual. But some FS (shmem) precharges this page before calling it
1320          * and call add_to_page_cache() with GFP_NOWAIT.
1321          *
1322          * For GFP_NOWAIT case, the page may be pre-charged before calling
1323          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1324          * charge twice. (It works but has to pay a bit larger cost.)
1325          * And when the page is SwapCache, it should take swap information
1326          * into account. This is under lock_page() now.
1327          */
1328         if (!(gfp_mask & __GFP_WAIT)) {
1329                 struct page_cgroup *pc;
1330
1331
1332                 pc = lookup_page_cgroup(page);
1333                 if (!pc)
1334                         return 0;
1335                 lock_page_cgroup(pc);
1336                 if (PageCgroupUsed(pc)) {
1337                         unlock_page_cgroup(pc);
1338                         return 0;
1339                 }
1340                 unlock_page_cgroup(pc);
1341         }
1342
1343         if (unlikely(!mm && !mem))
1344                 mm = &init_mm;
1345
1346         if (page_is_file_cache(page))
1347                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1348                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1349
1350         /* shmem */
1351         if (PageSwapCache(page)) {
1352                 ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1353                 if (!ret)
1354                         __mem_cgroup_commit_charge_swapin(page, mem,
1355                                         MEM_CGROUP_CHARGE_TYPE_SHMEM);
1356         } else
1357                 ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1358                                         MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1359
1360         return ret;
1361 }
1362
1363 /*
1364  * While swap-in, try_charge -> commit or cancel, the page is locked.
1365  * And when try_charge() successfully returns, one refcnt to memcg without
1366  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1367  * "commit()" or removed by "cancel()"
1368  */
1369 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1370                                  struct page *page,
1371                                  gfp_t mask, struct mem_cgroup **ptr)
1372 {
1373         struct mem_cgroup *mem;
1374         int ret;
1375
1376         if (mem_cgroup_disabled())
1377                 return 0;
1378
1379         if (!do_swap_account)
1380                 goto charge_cur_mm;
1381         /*
1382          * A racing thread's fault, or swapoff, may have already updated
1383          * the pte, and even removed page from swap cache: return success
1384          * to go on to do_swap_page()'s pte_same() test, which should fail.
1385          */
1386         if (!PageSwapCache(page))
1387                 return 0;
1388         mem = try_get_mem_cgroup_from_swapcache(page);
1389         if (!mem)
1390                 goto charge_cur_mm;
1391         *ptr = mem;
1392         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1393         /* drop extra refcnt from tryget */
1394         css_put(&mem->css);
1395         return ret;
1396 charge_cur_mm:
1397         if (unlikely(!mm))
1398                 mm = &init_mm;
1399         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1400 }
1401
1402 static void
1403 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
1404                                         enum charge_type ctype)
1405 {
1406         struct page_cgroup *pc;
1407
1408         if (mem_cgroup_disabled())
1409                 return;
1410         if (!ptr)
1411                 return;
1412         pc = lookup_page_cgroup(page);
1413         mem_cgroup_lru_del_before_commit_swapcache(page);
1414         __mem_cgroup_commit_charge(ptr, pc, ctype);
1415         mem_cgroup_lru_add_after_commit_swapcache(page);
1416         /*
1417          * Now swap is on-memory. This means this page may be
1418          * counted both as mem and swap....double count.
1419          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1420          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1421          * may call delete_from_swap_cache() before reach here.
1422          */
1423         if (do_swap_account && PageSwapCache(page)) {
1424                 swp_entry_t ent = {.val = page_private(page)};
1425                 unsigned short id;
1426                 struct mem_cgroup *memcg;
1427
1428                 id = swap_cgroup_record(ent, 0);
1429                 rcu_read_lock();
1430                 memcg = mem_cgroup_lookup(id);
1431                 if (memcg) {
1432                         /*
1433                          * This recorded memcg can be obsolete one. So, avoid
1434                          * calling css_tryget
1435                          */
1436                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1437                         mem_cgroup_put(memcg);
1438                 }
1439                 rcu_read_unlock();
1440         }
1441         /* add this page(page_cgroup) to the LRU we want. */
1442
1443 }
1444
1445 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1446 {
1447         __mem_cgroup_commit_charge_swapin(page, ptr,
1448                                         MEM_CGROUP_CHARGE_TYPE_MAPPED);
1449 }
1450
1451 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1452 {
1453         if (mem_cgroup_disabled())
1454                 return;
1455         if (!mem)
1456                 return;
1457         res_counter_uncharge(&mem->res, PAGE_SIZE);
1458         if (do_swap_account)
1459                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1460         css_put(&mem->css);
1461 }
1462
1463
1464 /*
1465  * uncharge if !page_mapped(page)
1466  */
1467 static struct mem_cgroup *
1468 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1469 {
1470         struct page_cgroup *pc;
1471         struct mem_cgroup *mem = NULL;
1472         struct mem_cgroup_per_zone *mz;
1473
1474         if (mem_cgroup_disabled())
1475                 return NULL;
1476
1477         if (PageSwapCache(page))
1478                 return NULL;
1479
1480         /*
1481          * Check if our page_cgroup is valid
1482          */
1483         pc = lookup_page_cgroup(page);
1484         if (unlikely(!pc || !PageCgroupUsed(pc)))
1485                 return NULL;
1486
1487         lock_page_cgroup(pc);
1488
1489         mem = pc->mem_cgroup;
1490
1491         if (!PageCgroupUsed(pc))
1492                 goto unlock_out;
1493
1494         switch (ctype) {
1495         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1496                 if (page_mapped(page))
1497                         goto unlock_out;
1498                 break;
1499         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1500                 if (!PageAnon(page)) {  /* Shared memory */
1501                         if (page->mapping && !page_is_file_cache(page))
1502                                 goto unlock_out;
1503                 } else if (page_mapped(page)) /* Anon */
1504                                 goto unlock_out;
1505                 break;
1506         default:
1507                 break;
1508         }
1509
1510         res_counter_uncharge(&mem->res, PAGE_SIZE);
1511         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1512                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1513         mem_cgroup_charge_statistics(mem, pc, false);
1514
1515         ClearPageCgroupUsed(pc);
1516         /*
1517          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1518          * freed from LRU. This is safe because uncharged page is expected not
1519          * to be reused (freed soon). Exception is SwapCache, it's handled by
1520          * special functions.
1521          */
1522
1523         mz = page_cgroup_zoneinfo(pc);
1524         unlock_page_cgroup(pc);
1525
1526         /* at swapout, this memcg will be accessed to record to swap */
1527         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1528                 css_put(&mem->css);
1529
1530         return mem;
1531
1532 unlock_out:
1533         unlock_page_cgroup(pc);
1534         return NULL;
1535 }
1536
1537 void mem_cgroup_uncharge_page(struct page *page)
1538 {
1539         /* early check. */
1540         if (page_mapped(page))
1541                 return;
1542         if (page->mapping && !PageAnon(page))
1543                 return;
1544         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1545 }
1546
1547 void mem_cgroup_uncharge_cache_page(struct page *page)
1548 {
1549         VM_BUG_ON(page_mapped(page));
1550         VM_BUG_ON(page->mapping);
1551         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1552 }
1553
1554 #ifdef CONFIG_SWAP
1555 /*
1556  * called after __delete_from_swap_cache() and drop "page" account.
1557  * memcg information is recorded to swap_cgroup of "ent"
1558  */
1559 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1560 {
1561         struct mem_cgroup *memcg;
1562
1563         memcg = __mem_cgroup_uncharge_common(page,
1564                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1565         /* record memcg information */
1566         if (do_swap_account && memcg) {
1567                 swap_cgroup_record(ent, css_id(&memcg->css));
1568                 mem_cgroup_get(memcg);
1569         }
1570         if (memcg)
1571                 css_put(&memcg->css);
1572 }
1573 #endif
1574
1575 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1576 /*
1577  * called from swap_entry_free(). remove record in swap_cgroup and
1578  * uncharge "memsw" account.
1579  */
1580 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1581 {
1582         struct mem_cgroup *memcg;
1583         unsigned short id;
1584
1585         if (!do_swap_account)
1586                 return;
1587
1588         id = swap_cgroup_record(ent, 0);
1589         rcu_read_lock();
1590         memcg = mem_cgroup_lookup(id);
1591         if (memcg) {
1592                 /*
1593                  * We uncharge this because swap is freed.
1594                  * This memcg can be obsolete one. We avoid calling css_tryget
1595                  */
1596                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1597                 mem_cgroup_put(memcg);
1598         }
1599         rcu_read_unlock();
1600 }
1601 #endif
1602
1603 /*
1604  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1605  * page belongs to.
1606  */
1607 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1608 {
1609         struct page_cgroup *pc;
1610         struct mem_cgroup *mem = NULL;
1611         int ret = 0;
1612
1613         if (mem_cgroup_disabled())
1614                 return 0;
1615
1616         pc = lookup_page_cgroup(page);
1617         lock_page_cgroup(pc);
1618         if (PageCgroupUsed(pc)) {
1619                 mem = pc->mem_cgroup;
1620                 css_get(&mem->css);
1621         }
1622         unlock_page_cgroup(pc);
1623
1624         if (mem) {
1625                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1626                 css_put(&mem->css);
1627         }
1628         *ptr = mem;
1629         return ret;
1630 }
1631
1632 /* remove redundant charge if migration failed*/
1633 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1634                 struct page *oldpage, struct page *newpage)
1635 {
1636         struct page *target, *unused;
1637         struct page_cgroup *pc;
1638         enum charge_type ctype;
1639
1640         if (!mem)
1641                 return;
1642
1643         /* at migration success, oldpage->mapping is NULL. */
1644         if (oldpage->mapping) {
1645                 target = oldpage;
1646                 unused = NULL;
1647         } else {
1648                 target = newpage;
1649                 unused = oldpage;
1650         }
1651
1652         if (PageAnon(target))
1653                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1654         else if (page_is_file_cache(target))
1655                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1656         else
1657                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1658
1659         /* unused page is not on radix-tree now. */
1660         if (unused)
1661                 __mem_cgroup_uncharge_common(unused, ctype);
1662
1663         pc = lookup_page_cgroup(target);
1664         /*
1665          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1666          * So, double-counting is effectively avoided.
1667          */
1668         __mem_cgroup_commit_charge(mem, pc, ctype);
1669
1670         /*
1671          * Both of oldpage and newpage are still under lock_page().
1672          * Then, we don't have to care about race in radix-tree.
1673          * But we have to be careful that this page is unmapped or not.
1674          *
1675          * There is a case for !page_mapped(). At the start of
1676          * migration, oldpage was mapped. But now, it's zapped.
1677          * But we know *target* page is not freed/reused under us.
1678          * mem_cgroup_uncharge_page() does all necessary checks.
1679          */
1680         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1681                 mem_cgroup_uncharge_page(target);
1682 }
1683
1684 /*
1685  * A call to try to shrink memory usage on charge failure at shmem's swapin.
1686  * Calling hierarchical_reclaim is not enough because we should update
1687  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
1688  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
1689  * not from the memcg which this page would be charged to.
1690  * try_charge_swapin does all of these works properly.
1691  */
1692 int mem_cgroup_shmem_charge_fallback(struct page *page,
1693                             struct mm_struct *mm,
1694                             gfp_t gfp_mask)
1695 {
1696         struct mem_cgroup *mem = NULL;
1697         int ret;
1698
1699         if (mem_cgroup_disabled())
1700                 return 0;
1701
1702         ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
1703         if (!ret)
1704                 mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
1705
1706         return ret;
1707 }
1708
1709 static DEFINE_MUTEX(set_limit_mutex);
1710
1711 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1712                                 unsigned long long val)
1713 {
1714         int retry_count;
1715         int progress;
1716         u64 memswlimit;
1717         int ret = 0;
1718         int children = mem_cgroup_count_children(memcg);
1719         u64 curusage, oldusage;
1720
1721         /*
1722          * For keeping hierarchical_reclaim simple, how long we should retry
1723          * is depends on callers. We set our retry-count to be function
1724          * of # of children which we should visit in this loop.
1725          */
1726         retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
1727
1728         oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1729
1730         while (retry_count) {
1731                 if (signal_pending(current)) {
1732                         ret = -EINTR;
1733                         break;
1734                 }
1735                 /*
1736                  * Rather than hide all in some function, I do this in
1737                  * open coded manner. You see what this really does.
1738                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1739                  */
1740                 mutex_lock(&set_limit_mutex);
1741                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1742                 if (memswlimit < val) {
1743                         ret = -EINVAL;
1744                         mutex_unlock(&set_limit_mutex);
1745                         break;
1746                 }
1747                 ret = res_counter_set_limit(&memcg->res, val);
1748                 mutex_unlock(&set_limit_mutex);
1749
1750                 if (!ret)
1751                         break;
1752
1753                 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1754                                                    false, true);
1755                 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
1756                 /* Usage is reduced ? */
1757                 if (curusage >= oldusage)
1758                         retry_count--;
1759                 else
1760                         oldusage = curusage;
1761         }
1762
1763         return ret;
1764 }
1765
1766 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1767                                         unsigned long long val)
1768 {
1769         int retry_count;
1770         u64 memlimit, oldusage, curusage;
1771         int children = mem_cgroup_count_children(memcg);
1772         int ret = -EBUSY;
1773
1774         /* see mem_cgroup_resize_res_limit */
1775         retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
1776         oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1777         while (retry_count) {
1778                 if (signal_pending(current)) {
1779                         ret = -EINTR;
1780                         break;
1781                 }
1782                 /*
1783                  * Rather than hide all in some function, I do this in
1784                  * open coded manner. You see what this really does.
1785                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1786                  */
1787                 mutex_lock(&set_limit_mutex);
1788                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1789                 if (memlimit > val) {
1790                         ret = -EINVAL;
1791                         mutex_unlock(&set_limit_mutex);
1792                         break;
1793                 }
1794                 ret = res_counter_set_limit(&memcg->memsw, val);
1795                 mutex_unlock(&set_limit_mutex);
1796
1797                 if (!ret)
1798                         break;
1799
1800                 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true, true);
1801                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1802                 /* Usage is reduced ? */
1803                 if (curusage >= oldusage)
1804                         retry_count--;
1805                 else
1806                         oldusage = curusage;
1807         }
1808         return ret;
1809 }
1810
1811 /*
1812  * This routine traverse page_cgroup in given list and drop them all.
1813  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1814  */
1815 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1816                                 int node, int zid, enum lru_list lru)
1817 {
1818         struct zone *zone;
1819         struct mem_cgroup_per_zone *mz;
1820         struct page_cgroup *pc, *busy;
1821         unsigned long flags, loop;
1822         struct list_head *list;
1823         int ret = 0;
1824
1825         zone = &NODE_DATA(node)->node_zones[zid];
1826         mz = mem_cgroup_zoneinfo(mem, node, zid);
1827         list = &mz->lists[lru];
1828
1829         loop = MEM_CGROUP_ZSTAT(mz, lru);
1830         /* give some margin against EBUSY etc...*/
1831         loop += 256;
1832         busy = NULL;
1833         while (loop--) {
1834                 ret = 0;
1835                 spin_lock_irqsave(&zone->lru_lock, flags);
1836                 if (list_empty(list)) {
1837                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1838                         break;
1839                 }
1840                 pc = list_entry(list->prev, struct page_cgroup, lru);
1841                 if (busy == pc) {
1842                         list_move(&pc->lru, list);
1843                         busy = 0;
1844                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1845                         continue;
1846                 }
1847                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1848
1849                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1850                 if (ret == -ENOMEM)
1851                         break;
1852
1853                 if (ret == -EBUSY || ret == -EINVAL) {
1854                         /* found lock contention or "pc" is obsolete. */
1855                         busy = pc;
1856                         cond_resched();
1857                 } else
1858                         busy = NULL;
1859         }
1860
1861         if (!ret && !list_empty(list))
1862                 return -EBUSY;
1863         return ret;
1864 }
1865
1866 /*
1867  * make mem_cgroup's charge to be 0 if there is no task.
1868  * This enables deleting this mem_cgroup.
1869  */
1870 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1871 {
1872         int ret;
1873         int node, zid, shrink;
1874         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1875         struct cgroup *cgrp = mem->css.cgroup;
1876
1877         css_get(&mem->css);
1878
1879         shrink = 0;
1880         /* should free all ? */
1881         if (free_all)
1882                 goto try_to_free;
1883 move_account:
1884         while (mem->res.usage > 0) {
1885                 ret = -EBUSY;
1886                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1887                         goto out;
1888                 ret = -EINTR;
1889                 if (signal_pending(current))
1890                         goto out;
1891                 /* This is for making all *used* pages to be on LRU. */
1892                 lru_add_drain_all();
1893                 ret = 0;
1894                 for_each_node_state(node, N_HIGH_MEMORY) {
1895                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1896                                 enum lru_list l;
1897                                 for_each_lru(l) {
1898                                         ret = mem_cgroup_force_empty_list(mem,
1899                                                         node, zid, l);
1900                                         if (ret)
1901                                                 break;
1902                                 }
1903                         }
1904                         if (ret)
1905                                 break;
1906                 }
1907                 /* it seems parent cgroup doesn't have enough mem */
1908                 if (ret == -ENOMEM)
1909                         goto try_to_free;
1910                 cond_resched();
1911         }
1912         ret = 0;
1913 out:
1914         css_put(&mem->css);
1915         return ret;
1916
1917 try_to_free:
1918         /* returns EBUSY if there is a task or if we come here twice. */
1919         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1920                 ret = -EBUSY;
1921                 goto out;
1922         }
1923         /* we call try-to-free pages for make this cgroup empty */
1924         lru_add_drain_all();
1925         /* try to free all pages in this cgroup */
1926         shrink = 1;
1927         while (nr_retries && mem->res.usage > 0) {
1928                 int progress;
1929
1930                 if (signal_pending(current)) {
1931                         ret = -EINTR;
1932                         goto out;
1933                 }
1934                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1935                                                 false, get_swappiness(mem));
1936                 if (!progress) {
1937                         nr_retries--;
1938                         /* maybe some writeback is necessary */
1939                         congestion_wait(WRITE, HZ/10);
1940                 }
1941
1942         }
1943         lru_add_drain();
1944         /* try move_account...there may be some *locked* pages. */
1945         if (mem->res.usage)
1946                 goto move_account;
1947         ret = 0;
1948         goto out;
1949 }
1950
1951 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1952 {
1953         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1954 }
1955
1956
1957 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1958 {
1959         return mem_cgroup_from_cont(cont)->use_hierarchy;
1960 }
1961
1962 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1963                                         u64 val)
1964 {
1965         int retval = 0;
1966         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1967         struct cgroup *parent = cont->parent;
1968         struct mem_cgroup *parent_mem = NULL;
1969
1970         if (parent)
1971                 parent_mem = mem_cgroup_from_cont(parent);
1972
1973         cgroup_lock();
1974         /*
1975          * If parent's use_hiearchy is set, we can't make any modifications
1976          * in the child subtrees. If it is unset, then the change can
1977          * occur, provided the current cgroup has no children.
1978          *
1979          * For the root cgroup, parent_mem is NULL, we allow value to be
1980          * set if there are no children.
1981          */
1982         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1983                                 (val == 1 || val == 0)) {
1984                 if (list_empty(&cont->children))
1985                         mem->use_hierarchy = val;
1986                 else
1987                         retval = -EBUSY;
1988         } else
1989                 retval = -EINVAL;
1990         cgroup_unlock();
1991
1992         return retval;
1993 }
1994
1995 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1996 {
1997         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1998         u64 val = 0;
1999         int type, name;
2000
2001         type = MEMFILE_TYPE(cft->private);
2002         name = MEMFILE_ATTR(cft->private);
2003         switch (type) {
2004         case _MEM:
2005                 val = res_counter_read_u64(&mem->res, name);
2006                 break;
2007         case _MEMSWAP:
2008                 val = res_counter_read_u64(&mem->memsw, name);
2009                 break;
2010         default:
2011                 BUG();
2012                 break;
2013         }
2014         return val;
2015 }
2016 /*
2017  * The user of this function is...
2018  * RES_LIMIT.
2019  */
2020 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
2021                             const char *buffer)
2022 {
2023         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2024         int type, name;
2025         unsigned long long val;
2026         int ret;
2027
2028         type = MEMFILE_TYPE(cft->private);
2029         name = MEMFILE_ATTR(cft->private);
2030         switch (name) {
2031         case RES_LIMIT:
2032                 /* This function does all necessary parse...reuse it */
2033                 ret = res_counter_memparse_write_strategy(buffer, &val);
2034                 if (ret)
2035                         break;
2036                 if (type == _MEM)
2037                         ret = mem_cgroup_resize_limit(memcg, val);
2038                 else
2039                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
2040                 break;
2041         default:
2042                 ret = -EINVAL; /* should be BUG() ? */
2043                 break;
2044         }
2045         return ret;
2046 }
2047
2048 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
2049                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
2050 {
2051         struct cgroup *cgroup;
2052         unsigned long long min_limit, min_memsw_limit, tmp;
2053
2054         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
2055         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2056         cgroup = memcg->css.cgroup;
2057         if (!memcg->use_hierarchy)
2058                 goto out;
2059
2060         while (cgroup->parent) {
2061                 cgroup = cgroup->parent;
2062                 memcg = mem_cgroup_from_cont(cgroup);
2063                 if (!memcg->use_hierarchy)
2064                         break;
2065                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
2066                 min_limit = min(min_limit, tmp);
2067                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
2068                 min_memsw_limit = min(min_memsw_limit, tmp);
2069         }
2070 out:
2071         *mem_limit = min_limit;
2072         *memsw_limit = min_memsw_limit;
2073         return;
2074 }
2075
2076 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
2077 {
2078         struct mem_cgroup *mem;
2079         int type, name;
2080
2081         mem = mem_cgroup_from_cont(cont);
2082         type = MEMFILE_TYPE(event);
2083         name = MEMFILE_ATTR(event);
2084         switch (name) {
2085         case RES_MAX_USAGE:
2086                 if (type == _MEM)
2087                         res_counter_reset_max(&mem->res);
2088                 else
2089                         res_counter_reset_max(&mem->memsw);
2090                 break;
2091         case RES_FAILCNT:
2092                 if (type == _MEM)
2093                         res_counter_reset_failcnt(&mem->res);
2094                 else
2095                         res_counter_reset_failcnt(&mem->memsw);
2096                 break;
2097         }
2098         return 0;
2099 }
2100
2101
2102 /* For read statistics */
2103 enum {
2104         MCS_CACHE,
2105         MCS_RSS,
2106         MCS_MAPPED_FILE,
2107         MCS_PGPGIN,
2108         MCS_PGPGOUT,
2109         MCS_INACTIVE_ANON,
2110         MCS_ACTIVE_ANON,
2111         MCS_INACTIVE_FILE,
2112         MCS_ACTIVE_FILE,
2113         MCS_UNEVICTABLE,
2114         NR_MCS_STAT,
2115 };
2116
2117 struct mcs_total_stat {
2118         s64 stat[NR_MCS_STAT];
2119 };
2120
2121 struct {
2122         char *local_name;
2123         char *total_name;
2124 } memcg_stat_strings[NR_MCS_STAT] = {
2125         {"cache", "total_cache"},
2126         {"rss", "total_rss"},
2127         {"mapped_file", "total_mapped_file"},
2128         {"pgpgin", "total_pgpgin"},
2129         {"pgpgout", "total_pgpgout"},
2130         {"inactive_anon", "total_inactive_anon"},
2131         {"active_anon", "total_active_anon"},
2132         {"inactive_file", "total_inactive_file"},
2133         {"active_file", "total_active_file"},
2134         {"unevictable", "total_unevictable"}
2135 };
2136
2137
2138 static int mem_cgroup_get_local_stat(struct mem_cgroup *mem, void *data)
2139 {
2140         struct mcs_total_stat *s = data;
2141         s64 val;
2142
2143         /* per cpu stat */
2144         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_CACHE);
2145         s->stat[MCS_CACHE] += val * PAGE_SIZE;
2146         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
2147         s->stat[MCS_RSS] += val * PAGE_SIZE;
2148         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_MAPPED_FILE);
2149         s->stat[MCS_MAPPED_FILE] += val * PAGE_SIZE;
2150         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGIN_COUNT);
2151         s->stat[MCS_PGPGIN] += val;
2152         val = mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_PGPGOUT_COUNT);
2153         s->stat[MCS_PGPGOUT] += val;
2154
2155         /* per zone stat */
2156         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
2157         s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
2158         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
2159         s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
2160         val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
2161         s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
2162         val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
2163         s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
2164         val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
2165         s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
2166         return 0;
2167 }
2168
2169 static void
2170 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
2171 {
2172         mem_cgroup_walk_tree(mem, s, mem_cgroup_get_local_stat);
2173 }
2174
2175 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
2176                                  struct cgroup_map_cb *cb)
2177 {
2178         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
2179         struct mcs_total_stat mystat;
2180         int i;
2181
2182         memset(&mystat, 0, sizeof(mystat));
2183         mem_cgroup_get_local_stat(mem_cont, &mystat);
2184
2185         for (i = 0; i < NR_MCS_STAT; i++)
2186                 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
2187
2188         /* Hierarchical information */
2189         {
2190                 unsigned long long limit, memsw_limit;
2191                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
2192                 cb->fill(cb, "hierarchical_memory_limit", limit);
2193                 if (do_swap_account)
2194                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
2195         }
2196
2197         memset(&mystat, 0, sizeof(mystat));
2198         mem_cgroup_get_total_stat(mem_cont, &mystat);
2199         for (i = 0; i < NR_MCS_STAT; i++)
2200                 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
2201
2202
2203 #ifdef CONFIG_DEBUG_VM
2204         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
2205
2206         {
2207                 int nid, zid;
2208                 struct mem_cgroup_per_zone *mz;
2209                 unsigned long recent_rotated[2] = {0, 0};
2210                 unsigned long recent_scanned[2] = {0, 0};
2211
2212                 for_each_online_node(nid)
2213                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2214                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
2215
2216                                 recent_rotated[0] +=
2217                                         mz->reclaim_stat.recent_rotated[0];
2218                                 recent_rotated[1] +=
2219                                         mz->reclaim_stat.recent_rotated[1];
2220                                 recent_scanned[0] +=
2221                                         mz->reclaim_stat.recent_scanned[0];
2222                                 recent_scanned[1] +=
2223                                         mz->reclaim_stat.recent_scanned[1];
2224                         }
2225                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
2226                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
2227                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
2228                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
2229         }
2230 #endif
2231
2232         return 0;
2233 }
2234
2235 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
2236 {
2237         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2238
2239         return get_swappiness(memcg);
2240 }
2241
2242 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
2243                                        u64 val)
2244 {
2245         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2246         struct mem_cgroup *parent;
2247
2248         if (val > 100)
2249                 return -EINVAL;
2250
2251         if (cgrp->parent == NULL)
2252                 return -EINVAL;
2253
2254         parent = mem_cgroup_from_cont(cgrp->parent);
2255
2256         cgroup_lock();
2257
2258         /* If under hierarchy, only empty-root can set this value */
2259         if ((parent->use_hierarchy) ||
2260             (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
2261                 cgroup_unlock();
2262                 return -EINVAL;
2263         }
2264
2265         spin_lock(&memcg->reclaim_param_lock);
2266         memcg->swappiness = val;
2267         spin_unlock(&memcg->reclaim_param_lock);
2268
2269         cgroup_unlock();
2270
2271         return 0;
2272 }
2273
2274
2275 static struct cftype mem_cgroup_files[] = {
2276         {
2277                 .name = "usage_in_bytes",
2278                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
2279                 .read_u64 = mem_cgroup_read,
2280         },
2281         {
2282                 .name = "max_usage_in_bytes",
2283                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2284                 .trigger = mem_cgroup_reset,
2285                 .read_u64 = mem_cgroup_read,
2286         },
2287         {
2288                 .name = "limit_in_bytes",
2289                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2290                 .write_string = mem_cgroup_write,
2291                 .read_u64 = mem_cgroup_read,
2292         },
2293         {
2294                 .name = "failcnt",
2295                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2296                 .trigger = mem_cgroup_reset,
2297                 .read_u64 = mem_cgroup_read,
2298         },
2299         {
2300                 .name = "stat",
2301                 .read_map = mem_control_stat_show,
2302         },
2303         {
2304                 .name = "force_empty",
2305                 .trigger = mem_cgroup_force_empty_write,
2306         },
2307         {
2308                 .name = "use_hierarchy",
2309                 .write_u64 = mem_cgroup_hierarchy_write,
2310                 .read_u64 = mem_cgroup_hierarchy_read,
2311         },
2312         {
2313                 .name = "swappiness",
2314                 .read_u64 = mem_cgroup_swappiness_read,
2315                 .write_u64 = mem_cgroup_swappiness_write,
2316         },
2317 };
2318
2319 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2320 static struct cftype memsw_cgroup_files[] = {
2321         {
2322                 .name = "memsw.usage_in_bytes",
2323                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2324                 .read_u64 = mem_cgroup_read,
2325         },
2326         {
2327                 .name = "memsw.max_usage_in_bytes",
2328                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2329                 .trigger = mem_cgroup_reset,
2330                 .read_u64 = mem_cgroup_read,
2331         },
2332         {
2333                 .name = "memsw.limit_in_bytes",
2334                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2335                 .write_string = mem_cgroup_write,
2336                 .read_u64 = mem_cgroup_read,
2337         },
2338         {
2339                 .name = "memsw.failcnt",
2340                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2341                 .trigger = mem_cgroup_reset,
2342                 .read_u64 = mem_cgroup_read,
2343         },
2344 };
2345
2346 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2347 {
2348         if (!do_swap_account)
2349                 return 0;
2350         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2351                                 ARRAY_SIZE(memsw_cgroup_files));
2352 };
2353 #else
2354 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2355 {
2356         return 0;
2357 }
2358 #endif
2359
2360 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2361 {
2362         struct mem_cgroup_per_node *pn;
2363         struct mem_cgroup_per_zone *mz;
2364         enum lru_list l;
2365         int zone, tmp = node;
2366         /*
2367          * This routine is called against possible nodes.
2368          * But it's BUG to call kmalloc() against offline node.
2369          *
2370          * TODO: this routine can waste much memory for nodes which will
2371          *       never be onlined. It's better to use memory hotplug callback
2372          *       function.
2373          */
2374         if (!node_state(node, N_NORMAL_MEMORY))
2375                 tmp = -1;
2376         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2377         if (!pn)
2378                 return 1;
2379
2380         mem->info.nodeinfo[node] = pn;
2381         memset(pn, 0, sizeof(*pn));
2382
2383         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2384                 mz = &pn->zoneinfo[zone];
2385                 for_each_lru(l)
2386                         INIT_LIST_HEAD(&mz->lists[l]);
2387         }
2388         return 0;
2389 }
2390
2391 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2392 {
2393         kfree(mem->info.nodeinfo[node]);
2394 }
2395
2396 static int mem_cgroup_size(void)
2397 {
2398         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2399         return sizeof(struct mem_cgroup) + cpustat_size;
2400 }
2401
2402 static struct mem_cgroup *mem_cgroup_alloc(void)
2403 {
2404         struct mem_cgroup *mem;
2405         int size = mem_cgroup_size();
2406
2407         if (size < PAGE_SIZE)
2408                 mem = kmalloc(size, GFP_KERNEL);
2409         else
2410                 mem = vmalloc(size);
2411
2412         if (mem)
2413                 memset(mem, 0, size);
2414         return mem;
2415 }
2416
2417 /*
2418  * At destroying mem_cgroup, references from swap_cgroup can remain.
2419  * (scanning all at force_empty is too costly...)
2420  *
2421  * Instead of clearing all references at force_empty, we remember
2422  * the number of reference from swap_cgroup and free mem_cgroup when
2423  * it goes down to 0.
2424  *
2425  * Removal of cgroup itself succeeds regardless of refs from swap.
2426  */
2427
2428 static void __mem_cgroup_free(struct mem_cgroup *mem)
2429 {
2430         int node;
2431
2432         free_css_id(&mem_cgroup_subsys, &mem->css);
2433
2434         for_each_node_state(node, N_POSSIBLE)
2435                 free_mem_cgroup_per_zone_info(mem, node);
2436
2437         if (mem_cgroup_size() < PAGE_SIZE)
2438                 kfree(mem);
2439         else
2440                 vfree(mem);
2441 }
2442
2443 static void mem_cgroup_get(struct mem_cgroup *mem)
2444 {
2445         atomic_inc(&mem->refcnt);
2446 }
2447
2448 static void mem_cgroup_put(struct mem_cgroup *mem)
2449 {
2450         if (atomic_dec_and_test(&mem->refcnt)) {
2451                 struct mem_cgroup *parent = parent_mem_cgroup(mem);
2452                 __mem_cgroup_free(mem);
2453                 if (parent)
2454                         mem_cgroup_put(parent);
2455         }
2456 }
2457
2458 /*
2459  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
2460  */
2461 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
2462 {
2463         if (!mem->res.parent)
2464                 return NULL;
2465         return mem_cgroup_from_res_counter(mem->res.parent, res);
2466 }
2467
2468 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2469 static void __init enable_swap_cgroup(void)
2470 {
2471         if (!mem_cgroup_disabled() && really_do_swap_account)
2472                 do_swap_account = 1;
2473 }
2474 #else
2475 static void __init enable_swap_cgroup(void)
2476 {
2477 }
2478 #endif
2479
2480 static struct cgroup_subsys_state * __ref
2481 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2482 {
2483         struct mem_cgroup *mem, *parent;
2484         long error = -ENOMEM;
2485         int node;
2486
2487         mem = mem_cgroup_alloc();
2488         if (!mem)
2489                 return ERR_PTR(error);
2490
2491         for_each_node_state(node, N_POSSIBLE)
2492                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2493                         goto free_out;
2494         /* root ? */
2495         if (cont->parent == NULL) {
2496                 enable_swap_cgroup();
2497                 parent = NULL;
2498         } else {
2499                 parent = mem_cgroup_from_cont(cont->parent);
2500                 mem->use_hierarchy = parent->use_hierarchy;
2501         }
2502
2503         if (parent && parent->use_hierarchy) {
2504                 res_counter_init(&mem->res, &parent->res);
2505                 res_counter_init(&mem->memsw, &parent->memsw);
2506                 /*
2507                  * We increment refcnt of the parent to ensure that we can
2508                  * safely access it on res_counter_charge/uncharge.
2509                  * This refcnt will be decremented when freeing this
2510                  * mem_cgroup(see mem_cgroup_put).
2511                  */
2512                 mem_cgroup_get(parent);
2513         } else {
2514                 res_counter_init(&mem->res, NULL);
2515                 res_counter_init(&mem->memsw, NULL);
2516         }
2517         mem->last_scanned_child = 0;
2518         spin_lock_init(&mem->reclaim_param_lock);
2519
2520         if (parent)
2521                 mem->swappiness = get_swappiness(parent);
2522         atomic_set(&mem->refcnt, 1);
2523         return &mem->css;
2524 free_out:
2525         __mem_cgroup_free(mem);
2526         return ERR_PTR(error);
2527 }
2528
2529 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2530                                         struct cgroup *cont)
2531 {
2532         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2533
2534         return mem_cgroup_force_empty(mem, false);
2535 }
2536
2537 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2538                                 struct cgroup *cont)
2539 {
2540         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2541
2542         mem_cgroup_put(mem);
2543 }
2544
2545 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2546                                 struct cgroup *cont)
2547 {
2548         int ret;
2549
2550         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2551                                 ARRAY_SIZE(mem_cgroup_files));
2552
2553         if (!ret)
2554                 ret = register_memsw_files(cont, ss);
2555         return ret;
2556 }
2557
2558 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2559                                 struct cgroup *cont,
2560                                 struct cgroup *old_cont,
2561                                 struct task_struct *p)
2562 {
2563         mutex_lock(&memcg_tasklist);
2564         /*
2565          * FIXME: It's better to move charges of this process from old
2566          * memcg to new memcg. But it's just on TODO-List now.
2567          */
2568         mutex_unlock(&memcg_tasklist);
2569 }
2570
2571 struct cgroup_subsys mem_cgroup_subsys = {
2572         .name = "memory",
2573         .subsys_id = mem_cgroup_subsys_id,
2574         .create = mem_cgroup_create,
2575         .pre_destroy = mem_cgroup_pre_destroy,
2576         .destroy = mem_cgroup_destroy,
2577         .populate = mem_cgroup_populate,
2578         .attach = mem_cgroup_move_task,
2579         .early_init = 0,
2580         .use_id = 1,
2581 };
2582
2583 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2584
2585 static int __init disable_swap_account(char *s)
2586 {
2587         really_do_swap_account = 0;
2588         return 1;
2589 }
2590 __setup("noswapaccount", disable_swap_account);
2591 #endif