| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1 | /* memcontrol.c - Memory Controller | 
 | 2 |  * | 
 | 3 |  * Copyright IBM Corporation, 2007 | 
 | 4 |  * Author Balbir Singh <balbir@linux.vnet.ibm.com> | 
 | 5 |  * | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 6 |  * Copyright 2007 OpenVZ SWsoft Inc | 
 | 7 |  * Author: Pavel Emelianov <xemul@openvz.org> | 
 | 8 |  * | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 9 |  * This program is free software; you can redistribute it and/or modify | 
 | 10 |  * it under the terms of the GNU General Public License as published by | 
 | 11 |  * the Free Software Foundation; either version 2 of the License, or | 
 | 12 |  * (at your option) any later version. | 
 | 13 |  * | 
 | 14 |  * This program is distributed in the hope that it will be useful, | 
 | 15 |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 16 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
 | 17 |  * GNU General Public License for more details. | 
 | 18 |  */ | 
 | 19 |  | 
 | 20 | #include <linux/res_counter.h> | 
 | 21 | #include <linux/memcontrol.h> | 
 | 22 | #include <linux/cgroup.h> | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 24 | #include <linux/smp.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 25 | #include <linux/page-flags.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 26 | #include <linux/backing-dev.h> | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 27 | #include <linux/bit_spinlock.h> | 
 | 28 | #include <linux/rcupdate.h> | 
| Balbir Singh | b6ac57d | 2008-04-29 01:00:19 -0700 | [diff] [blame] | 29 | #include <linux/slab.h> | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 30 | #include <linux/swap.h> | 
 | 31 | #include <linux/spinlock.h> | 
 | 32 | #include <linux/fs.h> | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 33 | #include <linux/seq_file.h> | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 34 | #include <linux/vmalloc.h> | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 35 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 36 | #include <asm/uaccess.h> | 
 | 37 |  | 
| KAMEZAWA Hiroyuki | a181b0e | 2008-07-25 01:47:08 -0700 | [diff] [blame] | 38 | struct cgroup_subsys mem_cgroup_subsys __read_mostly; | 
 | 39 | static struct kmem_cache *page_cgroup_cache __read_mostly; | 
 | 40 | #define MEM_CGROUP_RECLAIM_RETRIES	5 | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 41 |  | 
 | 42 | /* | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 43 |  * Statistics for memory cgroup. | 
 | 44 |  */ | 
 | 45 | enum mem_cgroup_stat_index { | 
 | 46 | 	/* | 
 | 47 | 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. | 
 | 48 | 	 */ | 
 | 49 | 	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */ | 
 | 50 | 	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as rss */ | 
| Balaji Rao | 55e462b | 2008-05-01 04:35:12 -0700 | [diff] [blame] | 51 | 	MEM_CGROUP_STAT_PGPGIN_COUNT,	/* # of pages paged in */ | 
 | 52 | 	MEM_CGROUP_STAT_PGPGOUT_COUNT,	/* # of pages paged out */ | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 53 |  | 
 | 54 | 	MEM_CGROUP_STAT_NSTATS, | 
 | 55 | }; | 
 | 56 |  | 
 | 57 | struct mem_cgroup_stat_cpu { | 
 | 58 | 	s64 count[MEM_CGROUP_STAT_NSTATS]; | 
 | 59 | } ____cacheline_aligned_in_smp; | 
 | 60 |  | 
 | 61 | struct mem_cgroup_stat { | 
 | 62 | 	struct mem_cgroup_stat_cpu cpustat[NR_CPUS]; | 
 | 63 | }; | 
 | 64 |  | 
 | 65 | /* | 
 | 66 |  * For accounting under irq disable, no need for increment preempt count. | 
 | 67 |  */ | 
 | 68 | static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat, | 
 | 69 | 		enum mem_cgroup_stat_index idx, int val) | 
 | 70 | { | 
 | 71 | 	int cpu = smp_processor_id(); | 
 | 72 | 	stat->cpustat[cpu].count[idx] += val; | 
 | 73 | } | 
 | 74 |  | 
 | 75 | static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat, | 
 | 76 | 		enum mem_cgroup_stat_index idx) | 
 | 77 | { | 
 | 78 | 	int cpu; | 
 | 79 | 	s64 ret = 0; | 
 | 80 | 	for_each_possible_cpu(cpu) | 
 | 81 | 		ret += stat->cpustat[cpu].count[idx]; | 
 | 82 | 	return ret; | 
 | 83 | } | 
 | 84 |  | 
 | 85 | /* | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 86 |  * per-zone information in memory controller. | 
 | 87 |  */ | 
 | 88 |  | 
 | 89 | enum mem_cgroup_zstat_index { | 
 | 90 | 	MEM_CGROUP_ZSTAT_ACTIVE, | 
 | 91 | 	MEM_CGROUP_ZSTAT_INACTIVE, | 
 | 92 |  | 
 | 93 | 	NR_MEM_CGROUP_ZSTAT, | 
 | 94 | }; | 
 | 95 |  | 
 | 96 | struct mem_cgroup_per_zone { | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 97 | 	/* | 
 | 98 | 	 * spin_lock to protect the per cgroup LRU | 
 | 99 | 	 */ | 
 | 100 | 	spinlock_t		lru_lock; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 101 | 	struct list_head	active_list; | 
 | 102 | 	struct list_head	inactive_list; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 103 | 	unsigned long count[NR_MEM_CGROUP_ZSTAT]; | 
 | 104 | }; | 
 | 105 | /* Macro for accessing counter */ | 
 | 106 | #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)]) | 
 | 107 |  | 
 | 108 | struct mem_cgroup_per_node { | 
 | 109 | 	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; | 
 | 110 | }; | 
 | 111 |  | 
 | 112 | struct mem_cgroup_lru_info { | 
 | 113 | 	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES]; | 
 | 114 | }; | 
 | 115 |  | 
 | 116 | /* | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 117 |  * The memory controller data structure. The memory controller controls both | 
 | 118 |  * page cache and RSS per cgroup. We would eventually like to provide | 
 | 119 |  * statistics based on the statistics developed by Rik Van Riel for clock-pro, | 
 | 120 |  * to help the administrator determine what knobs to tune. | 
 | 121 |  * | 
 | 122 |  * TODO: Add a water mark for the memory controller. Reclaim will begin when | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 123 |  * we hit the water mark. May be even add a low water mark, such that | 
 | 124 |  * no reclaim occurs from a cgroup at it's low water mark, this is | 
 | 125 |  * a feature that will be implemented much later in the future. | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 126 |  */ | 
 | 127 | struct mem_cgroup { | 
 | 128 | 	struct cgroup_subsys_state css; | 
 | 129 | 	/* | 
 | 130 | 	 * the counter to account for memory usage | 
 | 131 | 	 */ | 
 | 132 | 	struct res_counter res; | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 133 | 	/* | 
 | 134 | 	 * Per cgroup active and inactive list, similar to the | 
 | 135 | 	 * per zone LRU lists. | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 136 | 	 */ | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 137 | 	struct mem_cgroup_lru_info info; | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 138 |  | 
| KAMEZAWA Hiroyuki | 6c48a1d | 2008-02-07 00:14:34 -0800 | [diff] [blame] | 139 | 	int	prev_priority;	/* for recording reclaim priority */ | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 140 | 	/* | 
 | 141 | 	 * statistics. | 
 | 142 | 	 */ | 
 | 143 | 	struct mem_cgroup_stat stat; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 144 | }; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 145 | static struct mem_cgroup init_mem_cgroup; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 146 |  | 
 | 147 | /* | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 148 |  * We use the lower bit of the page->page_cgroup pointer as a bit spin | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 149 |  * lock.  We need to ensure that page->page_cgroup is at least two | 
 | 150 |  * byte aligned (based on comments from Nick Piggin).  But since | 
 | 151 |  * bit_spin_lock doesn't actually set that lock bit in a non-debug | 
 | 152 |  * uniprocessor kernel, we should avoid setting it here too. | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 153 |  */ | 
 | 154 | #define PAGE_CGROUP_LOCK_BIT 	0x0 | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 155 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 
 | 156 | #define PAGE_CGROUP_LOCK 	(1 << PAGE_CGROUP_LOCK_BIT) | 
 | 157 | #else | 
 | 158 | #define PAGE_CGROUP_LOCK	0x0 | 
 | 159 | #endif | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 160 |  | 
 | 161 | /* | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 162 |  * A page_cgroup page is associated with every page descriptor. The | 
 | 163 |  * page_cgroup helps us identify information about the cgroup | 
 | 164 |  */ | 
 | 165 | struct page_cgroup { | 
 | 166 | 	struct list_head lru;		/* per cgroup LRU list */ | 
 | 167 | 	struct page *page; | 
 | 168 | 	struct mem_cgroup *mem_cgroup; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 169 | 	int flags; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 170 | }; | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 171 | #define PAGE_CGROUP_FLAG_CACHE	(0x1)	/* charged as cache */ | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 172 | #define PAGE_CGROUP_FLAG_ACTIVE (0x2)	/* page is active in this cgroup */ | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 173 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 174 | static int page_cgroup_nid(struct page_cgroup *pc) | 
| KAMEZAWA Hiroyuki | c0149530 | 2008-02-07 00:14:30 -0800 | [diff] [blame] | 175 | { | 
 | 176 | 	return page_to_nid(pc->page); | 
 | 177 | } | 
 | 178 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 179 | static enum zone_type page_cgroup_zid(struct page_cgroup *pc) | 
| KAMEZAWA Hiroyuki | c0149530 | 2008-02-07 00:14:30 -0800 | [diff] [blame] | 180 | { | 
 | 181 | 	return page_zonenum(pc->page); | 
 | 182 | } | 
 | 183 |  | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 184 | enum charge_type { | 
 | 185 | 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0, | 
 | 186 | 	MEM_CGROUP_CHARGE_TYPE_MAPPED, | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 187 | 	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */ | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 188 | }; | 
 | 189 |  | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 190 | /* | 
 | 191 |  * Always modified under lru lock. Then, not necessary to preempt_disable() | 
 | 192 |  */ | 
 | 193 | static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags, | 
 | 194 | 					bool charge) | 
 | 195 | { | 
 | 196 | 	int val = (charge)? 1 : -1; | 
 | 197 | 	struct mem_cgroup_stat *stat = &mem->stat; | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 198 |  | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 199 | 	VM_BUG_ON(!irqs_disabled()); | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 200 | 	if (flags & PAGE_CGROUP_FLAG_CACHE) | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 201 | 		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val); | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 202 | 	else | 
 | 203 | 		__mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val); | 
| Balaji Rao | 55e462b | 2008-05-01 04:35:12 -0700 | [diff] [blame] | 204 |  | 
 | 205 | 	if (charge) | 
 | 206 | 		__mem_cgroup_stat_add_safe(stat, | 
 | 207 | 				MEM_CGROUP_STAT_PGPGIN_COUNT, 1); | 
 | 208 | 	else | 
 | 209 | 		__mem_cgroup_stat_add_safe(stat, | 
 | 210 | 				MEM_CGROUP_STAT_PGPGOUT_COUNT, 1); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 211 | } | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 212 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 213 | static struct mem_cgroup_per_zone * | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 214 | mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) | 
 | 215 | { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 216 | 	return &mem->info.nodeinfo[nid]->zoneinfo[zid]; | 
 | 217 | } | 
 | 218 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 219 | static struct mem_cgroup_per_zone * | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 220 | page_cgroup_zoneinfo(struct page_cgroup *pc) | 
 | 221 | { | 
 | 222 | 	struct mem_cgroup *mem = pc->mem_cgroup; | 
 | 223 | 	int nid = page_cgroup_nid(pc); | 
 | 224 | 	int zid = page_cgroup_zid(pc); | 
 | 225 |  | 
 | 226 | 	return mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 227 | } | 
 | 228 |  | 
 | 229 | static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem, | 
 | 230 | 					enum mem_cgroup_zstat_index idx) | 
 | 231 | { | 
 | 232 | 	int nid, zid; | 
 | 233 | 	struct mem_cgroup_per_zone *mz; | 
 | 234 | 	u64 total = 0; | 
 | 235 |  | 
 | 236 | 	for_each_online_node(nid) | 
 | 237 | 		for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 
 | 238 | 			mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 239 | 			total += MEM_CGROUP_ZSTAT(mz, idx); | 
 | 240 | 		} | 
 | 241 | 	return total; | 
| KAMEZAWA Hiroyuki | d52aa41 | 2008-02-07 00:14:24 -0800 | [diff] [blame] | 242 | } | 
 | 243 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 244 | static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 245 | { | 
 | 246 | 	return container_of(cgroup_subsys_state(cont, | 
 | 247 | 				mem_cgroup_subsys_id), struct mem_cgroup, | 
 | 248 | 				css); | 
 | 249 | } | 
 | 250 |  | 
| Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 251 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 252 | { | 
 | 253 | 	return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | 
 | 254 | 				struct mem_cgroup, css); | 
 | 255 | } | 
 | 256 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 257 | static inline int page_cgroup_locked(struct page *page) | 
 | 258 | { | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 259 | 	return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 260 | } | 
 | 261 |  | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 262 | static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc) | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 263 | { | 
| Hugh Dickins | 9442ec9 | 2008-03-04 14:29:07 -0800 | [diff] [blame] | 264 | 	VM_BUG_ON(!page_cgroup_locked(page)); | 
 | 265 | 	page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK); | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 266 | } | 
 | 267 |  | 
 | 268 | struct page_cgroup *page_get_page_cgroup(struct page *page) | 
 | 269 | { | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 270 | 	return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 271 | } | 
 | 272 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 273 | static void lock_page_cgroup(struct page *page) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 274 | { | 
 | 275 | 	bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 276 | } | 
 | 277 |  | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 278 | static int try_lock_page_cgroup(struct page *page) | 
 | 279 | { | 
 | 280 | 	return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
 | 281 | } | 
 | 282 |  | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 283 | static void unlock_page_cgroup(struct page *page) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 284 | { | 
 | 285 | 	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 
 | 286 | } | 
 | 287 |  | 
| KAMEZAWA Hiroyuki | 3eae90c | 2008-04-29 01:00:22 -0700 | [diff] [blame] | 288 | static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, | 
 | 289 | 			struct page_cgroup *pc) | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 290 | { | 
 | 291 | 	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 292 |  | 
 | 293 | 	if (from) | 
 | 294 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | 
 | 295 | 	else | 
 | 296 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; | 
 | 297 |  | 
 | 298 | 	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false); | 
| KAMEZAWA Hiroyuki | 508b7be | 2008-07-25 01:47:09 -0700 | [diff] [blame] | 299 | 	list_del(&pc->lru); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 300 | } | 
 | 301 |  | 
| KAMEZAWA Hiroyuki | 3eae90c | 2008-04-29 01:00:22 -0700 | [diff] [blame] | 302 | static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, | 
 | 303 | 				struct page_cgroup *pc) | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 304 | { | 
 | 305 | 	int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 306 |  | 
 | 307 | 	if (!to) { | 
 | 308 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 309 | 		list_add(&pc->lru, &mz->inactive_list); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 310 | 	} else { | 
 | 311 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 312 | 		list_add(&pc->lru, &mz->active_list); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 313 | 	} | 
 | 314 | 	mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true); | 
 | 315 | } | 
 | 316 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 317 | static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 318 | { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 319 | 	int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 
 | 320 | 	struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | 
 | 321 |  | 
 | 322 | 	if (from) | 
 | 323 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | 
 | 324 | 	else | 
 | 325 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1; | 
 | 326 |  | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 327 | 	if (active) { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 328 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 329 | 		pc->flags |= PAGE_CGROUP_FLAG_ACTIVE; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 330 | 		list_move(&pc->lru, &mz->active_list); | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 331 | 	} else { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 332 | 		MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 333 | 		pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 334 | 		list_move(&pc->lru, &mz->inactive_list); | 
| KAMEZAWA Hiroyuki | 3564c7c | 2008-02-07 00:14:23 -0800 | [diff] [blame] | 335 | 	} | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 336 | } | 
 | 337 |  | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 338 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem) | 
 | 339 | { | 
 | 340 | 	int ret; | 
 | 341 |  | 
 | 342 | 	task_lock(task); | 
| Hugh Dickins | bd845e3 | 2008-03-04 14:29:01 -0800 | [diff] [blame] | 343 | 	ret = task->mm && mm_match_cgroup(task->mm, mem); | 
| David Rientjes | 4c4a221 | 2008-02-07 00:14:06 -0800 | [diff] [blame] | 344 | 	task_unlock(task); | 
 | 345 | 	return ret; | 
 | 346 | } | 
 | 347 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 348 | /* | 
 | 349 |  * This routine assumes that the appropriate zone's lru lock is already held | 
 | 350 |  */ | 
| Hugh Dickins | 427d541 | 2008-03-04 14:29:03 -0800 | [diff] [blame] | 351 | void mem_cgroup_move_lists(struct page *page, bool active) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 352 | { | 
| Hugh Dickins | 427d541 | 2008-03-04 14:29:03 -0800 | [diff] [blame] | 353 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 354 | 	struct mem_cgroup_per_zone *mz; | 
 | 355 | 	unsigned long flags; | 
 | 356 |  | 
| Li Zefan | cede86a | 2008-07-25 01:47:18 -0700 | [diff] [blame] | 357 | 	if (mem_cgroup_subsys.disabled) | 
 | 358 | 		return; | 
 | 359 |  | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 360 | 	/* | 
 | 361 | 	 * We cannot lock_page_cgroup while holding zone's lru_lock, | 
 | 362 | 	 * because other holders of lock_page_cgroup can be interrupted | 
 | 363 | 	 * with an attempt to rotate_reclaimable_page.  But we cannot | 
 | 364 | 	 * safely get to page_cgroup without it, so just try_lock it: | 
 | 365 | 	 * mem_cgroup_isolate_pages allows for page left on wrong list. | 
 | 366 | 	 */ | 
 | 367 | 	if (!try_lock_page_cgroup(page)) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 368 | 		return; | 
 | 369 |  | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 370 | 	pc = page_get_page_cgroup(page); | 
 | 371 | 	if (pc) { | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 372 | 		mz = page_cgroup_zoneinfo(pc); | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 373 | 		spin_lock_irqsave(&mz->lru_lock, flags); | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 374 | 		__mem_cgroup_move_lists(pc, active); | 
| Hugh Dickins | 2680eed | 2008-03-04 14:29:13 -0800 | [diff] [blame] | 375 | 		spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 376 | 	} | 
 | 377 | 	unlock_page_cgroup(page); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 378 | } | 
 | 379 |  | 
| KAMEZAWA Hiroyuki | 58ae83d | 2008-02-07 00:14:32 -0800 | [diff] [blame] | 380 | /* | 
 | 381 |  * Calculate mapped_ratio under memory controller. This will be used in | 
 | 382 |  * vmscan.c for deteremining we have to reclaim mapped pages. | 
 | 383 |  */ | 
 | 384 | int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | 
 | 385 | { | 
 | 386 | 	long total, rss; | 
 | 387 |  | 
 | 388 | 	/* | 
 | 389 | 	 * usage is recorded in bytes. But, here, we assume the number of | 
 | 390 | 	 * physical pages can be represented by "long" on any arch. | 
 | 391 | 	 */ | 
 | 392 | 	total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L; | 
 | 393 | 	rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS); | 
 | 394 | 	return (int)((rss * 100L) / total); | 
 | 395 | } | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 396 |  | 
| KAMEZAWA Hiroyuki | 5932f36 | 2008-02-07 00:14:33 -0800 | [diff] [blame] | 397 | /* | 
 | 398 |  * This function is called from vmscan.c. In page reclaiming loop. balance | 
 | 399 |  * between active and inactive list is calculated. For memory controller | 
 | 400 |  * page reclaiming, we should use using mem_cgroup's imbalance rather than | 
 | 401 |  * zone's global lru imbalance. | 
 | 402 |  */ | 
 | 403 | long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) | 
 | 404 | { | 
 | 405 | 	unsigned long active, inactive; | 
 | 406 | 	/* active and inactive are the number of pages. 'long' is ok.*/ | 
 | 407 | 	active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE); | 
 | 408 | 	inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE); | 
 | 409 | 	return (long) (active / (inactive + 1)); | 
 | 410 | } | 
| KAMEZAWA Hiroyuki | 58ae83d | 2008-02-07 00:14:32 -0800 | [diff] [blame] | 411 |  | 
| KAMEZAWA Hiroyuki | 6c48a1d | 2008-02-07 00:14:34 -0800 | [diff] [blame] | 412 | /* | 
 | 413 |  * prev_priority control...this will be used in memory reclaim path. | 
 | 414 |  */ | 
 | 415 | int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) | 
 | 416 | { | 
 | 417 | 	return mem->prev_priority; | 
 | 418 | } | 
 | 419 |  | 
 | 420 | void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority) | 
 | 421 | { | 
 | 422 | 	if (priority < mem->prev_priority) | 
 | 423 | 		mem->prev_priority = priority; | 
 | 424 | } | 
 | 425 |  | 
 | 426 | void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority) | 
 | 427 | { | 
 | 428 | 	mem->prev_priority = priority; | 
 | 429 | } | 
 | 430 |  | 
| KAMEZAWA Hiroyuki | cc38108 | 2008-02-07 00:14:35 -0800 | [diff] [blame] | 431 | /* | 
 | 432 |  * Calculate # of pages to be scanned in this priority/zone. | 
 | 433 |  * See also vmscan.c | 
 | 434 |  * | 
 | 435 |  * priority starts from "DEF_PRIORITY" and decremented in each loop. | 
 | 436 |  * (see include/linux/mmzone.h) | 
 | 437 |  */ | 
 | 438 |  | 
 | 439 | long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, | 
 | 440 | 				   struct zone *zone, int priority) | 
 | 441 | { | 
 | 442 | 	long nr_active; | 
 | 443 | 	int nid = zone->zone_pgdat->node_id; | 
 | 444 | 	int zid = zone_idx(zone); | 
 | 445 | 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 446 |  | 
 | 447 | 	nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE); | 
 | 448 | 	return (nr_active >> priority); | 
 | 449 | } | 
 | 450 |  | 
 | 451 | long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, | 
 | 452 | 					struct zone *zone, int priority) | 
 | 453 | { | 
 | 454 | 	long nr_inactive; | 
 | 455 | 	int nid = zone->zone_pgdat->node_id; | 
 | 456 | 	int zid = zone_idx(zone); | 
 | 457 | 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid); | 
 | 458 |  | 
 | 459 | 	nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE); | 
| KAMEZAWA Hiroyuki | cc38108 | 2008-02-07 00:14:35 -0800 | [diff] [blame] | 460 | 	return (nr_inactive >> priority); | 
 | 461 | } | 
 | 462 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 463 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 
 | 464 | 					struct list_head *dst, | 
 | 465 | 					unsigned long *scanned, int order, | 
 | 466 | 					int mode, struct zone *z, | 
 | 467 | 					struct mem_cgroup *mem_cont, | 
 | 468 | 					int active) | 
 | 469 | { | 
 | 470 | 	unsigned long nr_taken = 0; | 
 | 471 | 	struct page *page; | 
 | 472 | 	unsigned long scan; | 
 | 473 | 	LIST_HEAD(pc_list); | 
 | 474 | 	struct list_head *src; | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 475 | 	struct page_cgroup *pc, *tmp; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 476 | 	int nid = z->zone_pgdat->node_id; | 
 | 477 | 	int zid = zone_idx(z); | 
 | 478 | 	struct mem_cgroup_per_zone *mz; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 479 |  | 
| Balbir Singh | cf475ad | 2008-04-29 01:00:16 -0700 | [diff] [blame] | 480 | 	BUG_ON(!mem_cont); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 481 | 	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 482 | 	if (active) | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 483 | 		src = &mz->active_list; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 484 | 	else | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 485 | 		src = &mz->inactive_list; | 
 | 486 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 487 |  | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 488 | 	spin_lock(&mz->lru_lock); | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 489 | 	scan = 0; | 
 | 490 | 	list_for_each_entry_safe_reverse(pc, tmp, src, lru) { | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 491 | 		if (scan >= nr_to_scan) | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 492 | 			break; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 493 | 		page = pc->page; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 494 |  | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 495 | 		if (unlikely(!PageLRU(page))) | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 496 | 			continue; | 
| KAMEZAWA Hiroyuki | ff7283f | 2008-02-07 00:14:11 -0800 | [diff] [blame] | 497 |  | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 498 | 		if (PageActive(page) && !active) { | 
 | 499 | 			__mem_cgroup_move_lists(pc, true); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 500 | 			continue; | 
 | 501 | 		} | 
 | 502 | 		if (!PageActive(page) && active) { | 
 | 503 | 			__mem_cgroup_move_lists(pc, false); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 504 | 			continue; | 
 | 505 | 		} | 
 | 506 |  | 
| Hugh Dickins | 436c6541 | 2008-02-07 00:14:12 -0800 | [diff] [blame] | 507 | 		scan++; | 
 | 508 | 		list_move(&pc->lru, &pc_list); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 509 |  | 
 | 510 | 		if (__isolate_lru_page(page, mode) == 0) { | 
 | 511 | 			list_move(&page->lru, dst); | 
 | 512 | 			nr_taken++; | 
 | 513 | 		} | 
 | 514 | 	} | 
 | 515 |  | 
 | 516 | 	list_splice(&pc_list, src); | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 517 | 	spin_unlock(&mz->lru_lock); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 518 |  | 
 | 519 | 	*scanned = scan; | 
 | 520 | 	return nr_taken; | 
 | 521 | } | 
 | 522 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 523 | /* | 
 | 524 |  * Charge the memory controller for page usage. | 
 | 525 |  * Return | 
 | 526 |  * 0 if the charge was successful | 
 | 527 |  * < 0 if the cgroup is over its limit | 
 | 528 |  */ | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 529 | static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 530 | 				gfp_t gfp_mask, enum charge_type ctype, | 
 | 531 | 				struct mem_cgroup *memcg) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 532 | { | 
 | 533 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 9175e03 | 2008-02-07 00:14:08 -0800 | [diff] [blame] | 534 | 	struct page_cgroup *pc; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 535 | 	unsigned long flags; | 
 | 536 | 	unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 537 | 	struct mem_cgroup_per_zone *mz; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 538 |  | 
| KAMEZAWA Hiroyuki | 508b7be | 2008-07-25 01:47:09 -0700 | [diff] [blame] | 539 | 	pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); | 
| KAMEZAWA Hiroyuki | b76734e | 2008-07-25 01:47:16 -0700 | [diff] [blame] | 540 | 	if (unlikely(pc == NULL)) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 541 | 		goto err; | 
 | 542 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 543 | 	/* | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 544 | 	 * We always charge the cgroup the mm_struct belongs to. | 
 | 545 | 	 * The mm_struct's mem_cgroup changes on task migration if the | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 546 | 	 * thread group leader migrates. It's possible that mm is not | 
 | 547 | 	 * set, if so charge the init_mm (happens for pagecache usage). | 
 | 548 | 	 */ | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 549 | 	if (likely(!memcg)) { | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 550 | 		rcu_read_lock(); | 
 | 551 | 		mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 
 | 552 | 		/* | 
 | 553 | 		 * For every charge from the cgroup, increment reference count | 
 | 554 | 		 */ | 
 | 555 | 		css_get(&mem->css); | 
 | 556 | 		rcu_read_unlock(); | 
 | 557 | 	} else { | 
 | 558 | 		mem = memcg; | 
 | 559 | 		css_get(&memcg->css); | 
 | 560 | 	} | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 561 |  | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 562 | 	while (res_counter_charge(&mem->res, PAGE_SIZE)) { | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 563 | 		if (!(gfp_mask & __GFP_WAIT)) | 
 | 564 | 			goto out; | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 565 |  | 
 | 566 | 		if (try_to_free_mem_cgroup_pages(mem, gfp_mask)) | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 567 | 			continue; | 
 | 568 |  | 
 | 569 | 		/* | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 570 | 		 * try_to_free_mem_cgroup_pages() might not give us a full | 
 | 571 | 		 * picture of reclaim. Some pages are reclaimed and might be | 
 | 572 | 		 * moved to swap cache or just unmapped from the cgroup. | 
 | 573 | 		 * Check the limit again to see if the reclaim reduced the | 
 | 574 | 		 * current usage of the cgroup before giving up | 
 | 575 | 		 */ | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 576 | 		if (res_counter_check_under_limit(&mem->res)) | 
 | 577 | 			continue; | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 578 |  | 
 | 579 | 		if (!nr_retries--) { | 
 | 580 | 			mem_cgroup_out_of_memory(mem, gfp_mask); | 
 | 581 | 			goto out; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 582 | 		} | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 583 | 	} | 
 | 584 |  | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 585 | 	pc->mem_cgroup = mem; | 
 | 586 | 	pc->page = page; | 
| KAMEZAWA Hiroyuki | 508b7be | 2008-07-25 01:47:09 -0700 | [diff] [blame] | 587 | 	/* | 
 | 588 | 	 * If a page is accounted as a page cache, insert to inactive list. | 
 | 589 | 	 * If anon, insert to active list. | 
 | 590 | 	 */ | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 591 | 	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) | 
| Balbir Singh | 4a56d02 | 2008-04-29 01:00:23 -0700 | [diff] [blame] | 592 | 		pc->flags = PAGE_CGROUP_FLAG_CACHE; | 
| KAMEZAWA Hiroyuki | 508b7be | 2008-07-25 01:47:09 -0700 | [diff] [blame] | 593 | 	else | 
 | 594 | 		pc->flags = PAGE_CGROUP_FLAG_ACTIVE; | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 595 |  | 
| Hugh Dickins | 7e924aa | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 596 | 	lock_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | b76734e | 2008-07-25 01:47:16 -0700 | [diff] [blame] | 597 | 	if (unlikely(page_get_page_cgroup(page))) { | 
| Hugh Dickins | 7e924aa | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 598 | 		unlock_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | 9175e03 | 2008-02-07 00:14:08 -0800 | [diff] [blame] | 599 | 		res_counter_uncharge(&mem->res, PAGE_SIZE); | 
 | 600 | 		css_put(&mem->css); | 
| Balbir Singh | b6ac57d | 2008-04-29 01:00:19 -0700 | [diff] [blame] | 601 | 		kmem_cache_free(page_cgroup_cache, pc); | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 602 | 		goto done; | 
| KAMEZAWA Hiroyuki | 9175e03 | 2008-02-07 00:14:08 -0800 | [diff] [blame] | 603 | 	} | 
| Hugh Dickins | 7e924aa | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 604 | 	page_assign_page_cgroup(page, pc); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 605 |  | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 606 | 	mz = page_cgroup_zoneinfo(pc); | 
 | 607 | 	spin_lock_irqsave(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | 3eae90c | 2008-04-29 01:00:22 -0700 | [diff] [blame] | 608 | 	__mem_cgroup_add_list(mz, pc); | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 609 | 	spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 610 |  | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 611 | 	unlock_page_cgroup(page); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 612 | done: | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 613 | 	return 0; | 
| Hugh Dickins | 3be9127 | 2008-02-07 00:14:19 -0800 | [diff] [blame] | 614 | out: | 
 | 615 | 	css_put(&mem->css); | 
| Balbir Singh | b6ac57d | 2008-04-29 01:00:19 -0700 | [diff] [blame] | 616 | 	kmem_cache_free(page_cgroup_cache, pc); | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 617 | err: | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 618 | 	return -ENOMEM; | 
 | 619 | } | 
 | 620 |  | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 621 | int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 622 | { | 
| Li Zefan | cede86a | 2008-07-25 01:47:18 -0700 | [diff] [blame] | 623 | 	if (mem_cgroup_subsys.disabled) | 
 | 624 | 		return 0; | 
 | 625 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 626 | 	/* | 
 | 627 | 	 * If already mapped, we don't have to account. | 
 | 628 | 	 * If page cache, page->mapping has address_space. | 
 | 629 | 	 * But page->mapping may have out-of-use anon_vma pointer, | 
 | 630 | 	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping | 
 | 631 | 	 * is NULL. | 
 | 632 |   	 */ | 
 | 633 | 	if (page_mapped(page) || (page->mapping && !PageAnon(page))) | 
 | 634 | 		return 0; | 
 | 635 | 	if (unlikely(!mm)) | 
 | 636 | 		mm = &init_mm; | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 637 | 	return mem_cgroup_charge_common(page, mm, gfp_mask, | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 638 | 				MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL); | 
| KAMEZAWA Hiroyuki | 217bc31 | 2008-02-07 00:14:17 -0800 | [diff] [blame] | 639 | } | 
 | 640 |  | 
| Balbir Singh | e1a1cd5 | 2008-02-07 00:14:02 -0800 | [diff] [blame] | 641 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 
 | 642 | 				gfp_t gfp_mask) | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 643 | { | 
| Li Zefan | cede86a | 2008-07-25 01:47:18 -0700 | [diff] [blame] | 644 | 	if (mem_cgroup_subsys.disabled) | 
 | 645 | 		return 0; | 
 | 646 |  | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 647 | 	/* | 
 | 648 | 	 * Corner case handling. This is called from add_to_page_cache() | 
 | 649 | 	 * in usual. But some FS (shmem) precharges this page before calling it | 
 | 650 | 	 * and call add_to_page_cache() with GFP_NOWAIT. | 
 | 651 | 	 * | 
 | 652 | 	 * For GFP_NOWAIT case, the page may be pre-charged before calling | 
 | 653 | 	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call | 
 | 654 | 	 * charge twice. (It works but has to pay a bit larger cost.) | 
 | 655 | 	 */ | 
 | 656 | 	if (!(gfp_mask & __GFP_WAIT)) { | 
 | 657 | 		struct page_cgroup *pc; | 
 | 658 |  | 
 | 659 | 		lock_page_cgroup(page); | 
 | 660 | 		pc = page_get_page_cgroup(page); | 
 | 661 | 		if (pc) { | 
 | 662 | 			VM_BUG_ON(pc->page != page); | 
 | 663 | 			VM_BUG_ON(!pc->mem_cgroup); | 
 | 664 | 			unlock_page_cgroup(page); | 
 | 665 | 			return 0; | 
 | 666 | 		} | 
 | 667 | 		unlock_page_cgroup(page); | 
 | 668 | 	} | 
 | 669 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 670 | 	if (unlikely(!mm)) | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 671 | 		mm = &init_mm; | 
| KAMEZAWA Hiroyuki | accf163 | 2008-07-25 01:47:17 -0700 | [diff] [blame] | 672 |  | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 673 | 	return mem_cgroup_charge_common(page, mm, gfp_mask, | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 674 | 				MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); | 
 | 675 | } | 
 | 676 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 677 | /* | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 678 |  * uncharge if !page_mapped(page) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 679 |  */ | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 680 | static void | 
 | 681 | __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype) | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 682 | { | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 683 | 	struct page_cgroup *pc; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 684 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 685 | 	struct mem_cgroup_per_zone *mz; | 
| Balbir Singh | 66e1707 | 2008-02-07 00:13:56 -0800 | [diff] [blame] | 686 | 	unsigned long flags; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 687 |  | 
| Balbir Singh | 4077960 | 2008-04-04 14:29:59 -0700 | [diff] [blame] | 688 | 	if (mem_cgroup_subsys.disabled) | 
 | 689 | 		return; | 
 | 690 |  | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 691 | 	/* | 
| Balbir Singh | 3c541e1 | 2008-02-07 00:14:41 -0800 | [diff] [blame] | 692 | 	 * Check if our page_cgroup is valid | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 693 | 	 */ | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 694 | 	lock_page_cgroup(page); | 
 | 695 | 	pc = page_get_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | b76734e | 2008-07-25 01:47:16 -0700 | [diff] [blame] | 696 | 	if (unlikely(!pc)) | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 697 | 		goto unlock; | 
| Balbir Singh | 8a9f3cc | 2008-02-07 00:13:53 -0800 | [diff] [blame] | 698 |  | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 699 | 	VM_BUG_ON(pc->page != page); | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 700 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 701 | 	if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED) | 
 | 702 | 	    && ((pc->flags & PAGE_CGROUP_FLAG_CACHE) | 
 | 703 | 		|| page_mapped(page))) | 
 | 704 | 		goto unlock; | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 705 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 706 | 	mz = page_cgroup_zoneinfo(pc); | 
 | 707 | 	spin_lock_irqsave(&mz->lru_lock, flags); | 
 | 708 | 	__mem_cgroup_remove_list(mz, pc); | 
 | 709 | 	spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 710 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 711 | 	page_assign_page_cgroup(page, NULL); | 
 | 712 | 	unlock_page_cgroup(page); | 
| Hugh Dickins | 6d48ff8 | 2008-03-04 14:29:12 -0800 | [diff] [blame] | 713 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 714 | 	mem = pc->mem_cgroup; | 
 | 715 | 	res_counter_uncharge(&mem->res, PAGE_SIZE); | 
 | 716 | 	css_put(&mem->css); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 717 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 718 | 	kmem_cache_free(page_cgroup_cache, pc); | 
 | 719 | 	return; | 
| Hugh Dickins | 8289546 | 2008-03-04 14:29:08 -0800 | [diff] [blame] | 720 | unlock: | 
| Balbir Singh | 3c541e1 | 2008-02-07 00:14:41 -0800 | [diff] [blame] | 721 | 	unlock_page_cgroup(page); | 
 | 722 | } | 
 | 723 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 724 | void mem_cgroup_uncharge_page(struct page *page) | 
 | 725 | { | 
 | 726 | 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED); | 
 | 727 | } | 
 | 728 |  | 
 | 729 | void mem_cgroup_uncharge_cache_page(struct page *page) | 
 | 730 | { | 
 | 731 | 	VM_BUG_ON(page_mapped(page)); | 
 | 732 | 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE); | 
 | 733 | } | 
 | 734 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 735 | /* | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 736 |  * Before starting migration, account against new page. | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 737 |  */ | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 738 | int mem_cgroup_prepare_migration(struct page *page, struct page *newpage) | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 739 | { | 
 | 740 | 	struct page_cgroup *pc; | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 741 | 	struct mem_cgroup *mem = NULL; | 
 | 742 | 	enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; | 
 | 743 | 	int ret = 0; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 744 |  | 
| Balbir Singh | 4077960 | 2008-04-04 14:29:59 -0700 | [diff] [blame] | 745 | 	if (mem_cgroup_subsys.disabled) | 
 | 746 | 		return 0; | 
 | 747 |  | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 748 | 	lock_page_cgroup(page); | 
 | 749 | 	pc = page_get_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 750 | 	if (pc) { | 
 | 751 | 		mem = pc->mem_cgroup; | 
 | 752 | 		css_get(&mem->css); | 
 | 753 | 		if (pc->flags & PAGE_CGROUP_FLAG_CACHE) | 
 | 754 | 			ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; | 
| Hugh Dickins | b9c565d | 2008-03-04 14:29:11 -0800 | [diff] [blame] | 755 | 	} | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 756 | 	unlock_page_cgroup(page); | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 757 | 	if (mem) { | 
 | 758 | 		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL, | 
 | 759 | 			ctype, mem); | 
 | 760 | 		css_put(&mem->css); | 
 | 761 | 	} | 
 | 762 | 	return ret; | 
 | 763 | } | 
| Hugh Dickins | fb59e9f | 2008-03-04 14:29:16 -0800 | [diff] [blame] | 764 |  | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 765 | /* remove redundant charge if migration failed*/ | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 766 | void mem_cgroup_end_migration(struct page *newpage) | 
 | 767 | { | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 768 | 	/* | 
 | 769 | 	 * At success, page->mapping is not NULL. | 
 | 770 | 	 * special rollback care is necessary when | 
 | 771 | 	 * 1. at migration failure. (newpage->mapping is cleared in this case) | 
 | 772 | 	 * 2. the newpage was moved but not remapped again because the task | 
 | 773 | 	 *    exits and the newpage is obsolete. In this case, the new page | 
 | 774 | 	 *    may be a swapcache. So, we just call mem_cgroup_uncharge_page() | 
 | 775 | 	 *    always for avoiding mess. The  page_cgroup will be removed if | 
 | 776 | 	 *    unnecessary. File cache pages is still on radix-tree. Don't | 
 | 777 | 	 *    care it. | 
 | 778 | 	 */ | 
 | 779 | 	if (!newpage->mapping) | 
 | 780 | 		__mem_cgroup_uncharge_common(newpage, | 
 | 781 | 					 MEM_CGROUP_CHARGE_TYPE_FORCE); | 
 | 782 | 	else if (PageAnon(newpage)) | 
 | 783 | 		mem_cgroup_uncharge_page(newpage); | 
| KAMEZAWA Hiroyuki | ae41be3 | 2008-02-07 00:14:10 -0800 | [diff] [blame] | 784 | } | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 785 |  | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 786 | /* | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 787 |  * A call to try to shrink memory usage under specified resource controller. | 
 | 788 |  * This is typically used for page reclaiming for shmem for reducing side | 
 | 789 |  * effect of page allocation from shmem, which is used by some mem_cgroup. | 
 | 790 |  */ | 
 | 791 | int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) | 
 | 792 | { | 
 | 793 | 	struct mem_cgroup *mem; | 
 | 794 | 	int progress = 0; | 
 | 795 | 	int retry = MEM_CGROUP_RECLAIM_RETRIES; | 
 | 796 |  | 
| Li Zefan | cede86a | 2008-07-25 01:47:18 -0700 | [diff] [blame] | 797 | 	if (mem_cgroup_subsys.disabled) | 
 | 798 | 		return 0; | 
 | 799 |  | 
| KAMEZAWA Hiroyuki | c9b0ed5 | 2008-07-25 01:47:15 -0700 | [diff] [blame] | 800 | 	rcu_read_lock(); | 
 | 801 | 	mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 
 | 802 | 	css_get(&mem->css); | 
 | 803 | 	rcu_read_unlock(); | 
 | 804 |  | 
 | 805 | 	do { | 
 | 806 | 		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); | 
 | 807 | 	} while (!progress && --retry); | 
 | 808 |  | 
 | 809 | 	css_put(&mem->css); | 
 | 810 | 	if (!retry) | 
 | 811 | 		return -ENOMEM; | 
 | 812 | 	return 0; | 
 | 813 | } | 
 | 814 |  | 
 | 815 | /* | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 816 |  * This routine traverse page_cgroup in given list and drop them all. | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 817 |  * *And* this routine doesn't reclaim page itself, just removes page_cgroup. | 
 | 818 |  */ | 
 | 819 | #define FORCE_UNCHARGE_BATCH	(128) | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 820 | static void mem_cgroup_force_empty_list(struct mem_cgroup *mem, | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 821 | 			    struct mem_cgroup_per_zone *mz, | 
 | 822 | 			    int active) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 823 | { | 
 | 824 | 	struct page_cgroup *pc; | 
 | 825 | 	struct page *page; | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 826 | 	int count = FORCE_UNCHARGE_BATCH; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 827 | 	unsigned long flags; | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 828 | 	struct list_head *list; | 
 | 829 |  | 
 | 830 | 	if (active) | 
 | 831 | 		list = &mz->active_list; | 
 | 832 | 	else | 
 | 833 | 		list = &mz->inactive_list; | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 834 |  | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 835 | 	spin_lock_irqsave(&mz->lru_lock, flags); | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 836 | 	while (!list_empty(list)) { | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 837 | 		pc = list_entry(list->prev, struct page_cgroup, lru); | 
 | 838 | 		page = pc->page; | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 839 | 		get_page(page); | 
 | 840 | 		spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 841 | 		/* | 
 | 842 | 		 * Check if this page is on LRU. !LRU page can be found | 
 | 843 | 		 * if it's under page migration. | 
 | 844 | 		 */ | 
 | 845 | 		if (PageLRU(page)) { | 
| KAMEZAWA Hiroyuki | 69029cd | 2008-07-25 01:47:14 -0700 | [diff] [blame] | 846 | 			__mem_cgroup_uncharge_common(page, | 
 | 847 | 					MEM_CGROUP_CHARGE_TYPE_FORCE); | 
| KAMEZAWA Hiroyuki | e8589cc | 2008-07-25 01:47:10 -0700 | [diff] [blame] | 848 | 			put_page(page); | 
 | 849 | 			if (--count <= 0) { | 
 | 850 | 				count = FORCE_UNCHARGE_BATCH; | 
 | 851 | 				cond_resched(); | 
 | 852 | 			} | 
 | 853 | 		} else | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 854 | 			cond_resched(); | 
| Hirokazu Takahashi | 9b3c0a0 | 2008-03-04 14:29:15 -0800 | [diff] [blame] | 855 | 		spin_lock_irqsave(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 856 | 	} | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 857 | 	spin_unlock_irqrestore(&mz->lru_lock, flags); | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 858 | } | 
 | 859 |  | 
 | 860 | /* | 
 | 861 |  * make mem_cgroup's charge to be 0 if there is no task. | 
 | 862 |  * This enables deleting this mem_cgroup. | 
 | 863 |  */ | 
| Hugh Dickins | d5b69e3 | 2008-03-04 14:29:10 -0800 | [diff] [blame] | 864 | static int mem_cgroup_force_empty(struct mem_cgroup *mem) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 865 | { | 
 | 866 | 	int ret = -EBUSY; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 867 | 	int node, zid; | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 868 |  | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 869 | 	css_get(&mem->css); | 
 | 870 | 	/* | 
 | 871 | 	 * page reclaim code (kswapd etc..) will move pages between | 
| Hugh Dickins | 8869b8f | 2008-03-04 14:29:09 -0800 | [diff] [blame] | 872 | 	 * active_list <-> inactive_list while we don't take a lock. | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 873 | 	 * So, we have to do loop here until all lists are empty. | 
 | 874 | 	 */ | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 875 | 	while (mem->res.usage > 0) { | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 876 | 		if (atomic_read(&mem->css.cgroup->count) > 0) | 
 | 877 | 			goto out; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 878 | 		for_each_node_state(node, N_POSSIBLE) | 
 | 879 | 			for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 
 | 880 | 				struct mem_cgroup_per_zone *mz; | 
 | 881 | 				mz = mem_cgroup_zoneinfo(mem, node, zid); | 
 | 882 | 				/* drop all page_cgroup in active_list */ | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 883 | 				mem_cgroup_force_empty_list(mem, mz, 1); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 884 | 				/* drop all page_cgroup in inactive_list */ | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 885 | 				mem_cgroup_force_empty_list(mem, mz, 0); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 886 | 			} | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 887 | 	} | 
 | 888 | 	ret = 0; | 
 | 889 | out: | 
 | 890 | 	css_put(&mem->css); | 
 | 891 | 	return ret; | 
 | 892 | } | 
 | 893 |  | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 894 | static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 895 | { | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 896 | 	return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, | 
 | 897 | 				    cft->private); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 898 | } | 
 | 899 |  | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 900 | static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | 
 | 901 | 			    const char *buffer) | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 902 | { | 
 | 903 | 	return res_counter_write(&mem_cgroup_from_cont(cont)->res, | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 904 | 				 cft->private, buffer, | 
 | 905 | 				 res_counter_memparse_write_strategy); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 906 | } | 
 | 907 |  | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 908 | static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 909 | { | 
 | 910 | 	struct mem_cgroup *mem; | 
 | 911 |  | 
 | 912 | 	mem = mem_cgroup_from_cont(cont); | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 913 | 	switch (event) { | 
 | 914 | 	case RES_MAX_USAGE: | 
 | 915 | 		res_counter_reset_max(&mem->res); | 
 | 916 | 		break; | 
 | 917 | 	case RES_FAILCNT: | 
 | 918 | 		res_counter_reset_failcnt(&mem->res); | 
 | 919 | 		break; | 
 | 920 | 	} | 
| Pavel Emelyanov | 85cc59d | 2008-04-29 01:00:20 -0700 | [diff] [blame] | 921 | 	return 0; | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 922 | } | 
 | 923 |  | 
| Pavel Emelyanov | 85cc59d | 2008-04-29 01:00:20 -0700 | [diff] [blame] | 924 | static int mem_force_empty_write(struct cgroup *cont, unsigned int event) | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 925 | { | 
| Pavel Emelyanov | 85cc59d | 2008-04-29 01:00:20 -0700 | [diff] [blame] | 926 | 	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont)); | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 927 | } | 
 | 928 |  | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 929 | static const struct mem_cgroup_stat_desc { | 
 | 930 | 	const char *msg; | 
 | 931 | 	u64 unit; | 
 | 932 | } mem_cgroup_stat_desc[] = { | 
 | 933 | 	[MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, }, | 
 | 934 | 	[MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, | 
| Balaji Rao | 55e462b | 2008-05-01 04:35:12 -0700 | [diff] [blame] | 935 | 	[MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, }, | 
 | 936 | 	[MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, }, | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 937 | }; | 
 | 938 |  | 
| Paul Menage | c64745c | 2008-04-29 01:00:02 -0700 | [diff] [blame] | 939 | static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, | 
 | 940 | 				 struct cgroup_map_cb *cb) | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 941 | { | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 942 | 	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); | 
 | 943 | 	struct mem_cgroup_stat *stat = &mem_cont->stat; | 
 | 944 | 	int i; | 
 | 945 |  | 
 | 946 | 	for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) { | 
 | 947 | 		s64 val; | 
 | 948 |  | 
 | 949 | 		val = mem_cgroup_read_stat(stat, i); | 
 | 950 | 		val *= mem_cgroup_stat_desc[i].unit; | 
| Paul Menage | c64745c | 2008-04-29 01:00:02 -0700 | [diff] [blame] | 951 | 		cb->fill(cb, mem_cgroup_stat_desc[i].msg, val); | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 952 | 	} | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 953 | 	/* showing # of active pages */ | 
 | 954 | 	{ | 
 | 955 | 		unsigned long active, inactive; | 
 | 956 |  | 
 | 957 | 		inactive = mem_cgroup_get_all_zonestat(mem_cont, | 
 | 958 | 						MEM_CGROUP_ZSTAT_INACTIVE); | 
 | 959 | 		active = mem_cgroup_get_all_zonestat(mem_cont, | 
 | 960 | 						MEM_CGROUP_ZSTAT_ACTIVE); | 
| Paul Menage | c64745c | 2008-04-29 01:00:02 -0700 | [diff] [blame] | 961 | 		cb->fill(cb, "active", (active) * PAGE_SIZE); | 
 | 962 | 		cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 963 | 	} | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 964 | 	return 0; | 
 | 965 | } | 
 | 966 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 967 | static struct cftype mem_cgroup_files[] = { | 
 | 968 | 	{ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 969 | 		.name = "usage_in_bytes", | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 970 | 		.private = RES_USAGE, | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 971 | 		.read_u64 = mem_cgroup_read, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 972 | 	}, | 
 | 973 | 	{ | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 974 | 		.name = "max_usage_in_bytes", | 
 | 975 | 		.private = RES_MAX_USAGE, | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 976 | 		.trigger = mem_cgroup_reset, | 
| Pavel Emelyanov | c84872e | 2008-04-29 01:00:17 -0700 | [diff] [blame] | 977 | 		.read_u64 = mem_cgroup_read, | 
 | 978 | 	}, | 
 | 979 | 	{ | 
| Balbir Singh | 0eea103 | 2008-02-07 00:13:57 -0800 | [diff] [blame] | 980 | 		.name = "limit_in_bytes", | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 981 | 		.private = RES_LIMIT, | 
| Paul Menage | 856c13a | 2008-07-25 01:47:04 -0700 | [diff] [blame] | 982 | 		.write_string = mem_cgroup_write, | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 983 | 		.read_u64 = mem_cgroup_read, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 984 | 	}, | 
 | 985 | 	{ | 
 | 986 | 		.name = "failcnt", | 
 | 987 | 		.private = RES_FAILCNT, | 
| Pavel Emelyanov | 29f2a4d | 2008-04-29 01:00:21 -0700 | [diff] [blame] | 988 | 		.trigger = mem_cgroup_reset, | 
| Paul Menage | 2c3daa7 | 2008-04-29 00:59:58 -0700 | [diff] [blame] | 989 | 		.read_u64 = mem_cgroup_read, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 990 | 	}, | 
| Balbir Singh | 8697d33 | 2008-02-07 00:13:59 -0800 | [diff] [blame] | 991 | 	{ | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 992 | 		.name = "force_empty", | 
| Pavel Emelyanov | 85cc59d | 2008-04-29 01:00:20 -0700 | [diff] [blame] | 993 | 		.trigger = mem_force_empty_write, | 
| KAMEZAWA Hiroyuki | cc84758 | 2008-02-07 00:14:16 -0800 | [diff] [blame] | 994 | 	}, | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 995 | 	{ | 
 | 996 | 		.name = "stat", | 
| Paul Menage | c64745c | 2008-04-29 01:00:02 -0700 | [diff] [blame] | 997 | 		.read_map = mem_control_stat_show, | 
| KAMEZAWA Hiroyuki | d2ceb9b | 2008-02-07 00:14:25 -0800 | [diff] [blame] | 998 | 	}, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 999 | }; | 
 | 1000 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1001 | static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 
 | 1002 | { | 
 | 1003 | 	struct mem_cgroup_per_node *pn; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1004 | 	struct mem_cgroup_per_zone *mz; | 
| KAMEZAWA Hiroyuki | 41e3355 | 2008-04-08 17:41:54 -0700 | [diff] [blame] | 1005 | 	int zone, tmp = node; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1006 | 	/* | 
 | 1007 | 	 * This routine is called against possible nodes. | 
 | 1008 | 	 * But it's BUG to call kmalloc() against offline node. | 
 | 1009 | 	 * | 
 | 1010 | 	 * TODO: this routine can waste much memory for nodes which will | 
 | 1011 | 	 *       never be onlined. It's better to use memory hotplug callback | 
 | 1012 | 	 *       function. | 
 | 1013 | 	 */ | 
| KAMEZAWA Hiroyuki | 41e3355 | 2008-04-08 17:41:54 -0700 | [diff] [blame] | 1014 | 	if (!node_state(node, N_NORMAL_MEMORY)) | 
 | 1015 | 		tmp = -1; | 
 | 1016 | 	pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1017 | 	if (!pn) | 
 | 1018 | 		return 1; | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1019 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1020 | 	mem->info.nodeinfo[node] = pn; | 
 | 1021 | 	memset(pn, 0, sizeof(*pn)); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1022 |  | 
 | 1023 | 	for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 
 | 1024 | 		mz = &pn->zoneinfo[zone]; | 
 | 1025 | 		INIT_LIST_HEAD(&mz->active_list); | 
 | 1026 | 		INIT_LIST_HEAD(&mz->inactive_list); | 
| KAMEZAWA Hiroyuki | 072c56c1 | 2008-02-07 00:14:39 -0800 | [diff] [blame] | 1027 | 		spin_lock_init(&mz->lru_lock); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1028 | 	} | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1029 | 	return 0; | 
 | 1030 | } | 
 | 1031 |  | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1032 | static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | 
 | 1033 | { | 
 | 1034 | 	kfree(mem->info.nodeinfo[node]); | 
 | 1035 | } | 
 | 1036 |  | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 1037 | static struct mem_cgroup *mem_cgroup_alloc(void) | 
 | 1038 | { | 
 | 1039 | 	struct mem_cgroup *mem; | 
 | 1040 |  | 
 | 1041 | 	if (sizeof(*mem) < PAGE_SIZE) | 
 | 1042 | 		mem = kmalloc(sizeof(*mem), GFP_KERNEL); | 
 | 1043 | 	else | 
 | 1044 | 		mem = vmalloc(sizeof(*mem)); | 
 | 1045 |  | 
 | 1046 | 	if (mem) | 
 | 1047 | 		memset(mem, 0, sizeof(*mem)); | 
 | 1048 | 	return mem; | 
 | 1049 | } | 
 | 1050 |  | 
 | 1051 | static void mem_cgroup_free(struct mem_cgroup *mem) | 
 | 1052 | { | 
 | 1053 | 	if (sizeof(*mem) < PAGE_SIZE) | 
 | 1054 | 		kfree(mem); | 
 | 1055 | 	else | 
 | 1056 | 		vfree(mem); | 
 | 1057 | } | 
 | 1058 |  | 
 | 1059 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1060 | static struct cgroup_subsys_state * | 
 | 1061 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 
 | 1062 | { | 
 | 1063 | 	struct mem_cgroup *mem; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1064 | 	int node; | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1065 |  | 
| Balbir Singh | b6ac57d | 2008-04-29 01:00:19 -0700 | [diff] [blame] | 1066 | 	if (unlikely((cont->parent) == NULL)) { | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 1067 | 		mem = &init_mem_cgroup; | 
| Balbir Singh | b6ac57d | 2008-04-29 01:00:19 -0700 | [diff] [blame] | 1068 | 		page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC); | 
 | 1069 | 	} else { | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 1070 | 		mem = mem_cgroup_alloc(); | 
 | 1071 | 		if (!mem) | 
 | 1072 | 			return ERR_PTR(-ENOMEM); | 
| Balbir Singh | b6ac57d | 2008-04-29 01:00:19 -0700 | [diff] [blame] | 1073 | 	} | 
| Pavel Emelianov | 78fb746 | 2008-02-07 00:13:51 -0800 | [diff] [blame] | 1074 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1075 | 	res_counter_init(&mem->res); | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1076 |  | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1077 | 	for_each_node_state(node, N_POSSIBLE) | 
 | 1078 | 		if (alloc_mem_cgroup_per_zone_info(mem, node)) | 
 | 1079 | 			goto free_out; | 
 | 1080 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1081 | 	return &mem->css; | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1082 | free_out: | 
 | 1083 | 	for_each_node_state(node, N_POSSIBLE) | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1084 | 		free_mem_cgroup_per_zone_info(mem, node); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1085 | 	if (cont->parent != NULL) | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 1086 | 		mem_cgroup_free(mem); | 
| Li Zefan | 2dda81c | 2008-02-23 15:24:14 -0800 | [diff] [blame] | 1087 | 	return ERR_PTR(-ENOMEM); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1088 | } | 
 | 1089 |  | 
| KAMEZAWA Hiroyuki | df878fb | 2008-02-07 00:14:28 -0800 | [diff] [blame] | 1090 | static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, | 
 | 1091 | 					struct cgroup *cont) | 
 | 1092 | { | 
 | 1093 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
 | 1094 | 	mem_cgroup_force_empty(mem); | 
 | 1095 | } | 
 | 1096 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1097 | static void mem_cgroup_destroy(struct cgroup_subsys *ss, | 
 | 1098 | 				struct cgroup *cont) | 
 | 1099 | { | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1100 | 	int node; | 
 | 1101 | 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 
 | 1102 |  | 
 | 1103 | 	for_each_node_state(node, N_POSSIBLE) | 
| KAMEZAWA Hiroyuki | 1ecaab2 | 2008-02-07 00:14:38 -0800 | [diff] [blame] | 1104 | 		free_mem_cgroup_per_zone_info(mem, node); | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1105 |  | 
| KAMEZAWA Hiroyuki | 3332794 | 2008-04-29 01:00:24 -0700 | [diff] [blame] | 1106 | 	mem_cgroup_free(mem_cgroup_from_cont(cont)); | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1107 | } | 
 | 1108 |  | 
 | 1109 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | 
 | 1110 | 				struct cgroup *cont) | 
 | 1111 | { | 
 | 1112 | 	return cgroup_add_files(cont, ss, mem_cgroup_files, | 
 | 1113 | 					ARRAY_SIZE(mem_cgroup_files)); | 
 | 1114 | } | 
 | 1115 |  | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1116 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 
 | 1117 | 				struct cgroup *cont, | 
 | 1118 | 				struct cgroup *old_cont, | 
 | 1119 | 				struct task_struct *p) | 
 | 1120 | { | 
 | 1121 | 	struct mm_struct *mm; | 
 | 1122 | 	struct mem_cgroup *mem, *old_mem; | 
 | 1123 |  | 
 | 1124 | 	mm = get_task_mm(p); | 
 | 1125 | 	if (mm == NULL) | 
 | 1126 | 		return; | 
 | 1127 |  | 
 | 1128 | 	mem = mem_cgroup_from_cont(cont); | 
 | 1129 | 	old_mem = mem_cgroup_from_cont(old_cont); | 
 | 1130 |  | 
 | 1131 | 	if (mem == old_mem) | 
 | 1132 | 		goto out; | 
 | 1133 |  | 
 | 1134 | 	/* | 
 | 1135 | 	 * Only thread group leaders are allowed to migrate, the mm_struct is | 
 | 1136 | 	 * in effect owned by the leader | 
 | 1137 | 	 */ | 
| Pavel Emelyanov | 52ea27e | 2008-03-19 17:00:45 -0700 | [diff] [blame] | 1138 | 	if (!thread_group_leader(p)) | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1139 | 		goto out; | 
 | 1140 |  | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1141 | out: | 
 | 1142 | 	mmput(mm); | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1143 | } | 
 | 1144 |  | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1145 | struct cgroup_subsys mem_cgroup_subsys = { | 
 | 1146 | 	.name = "memory", | 
 | 1147 | 	.subsys_id = mem_cgroup_subsys_id, | 
 | 1148 | 	.create = mem_cgroup_create, | 
| KAMEZAWA Hiroyuki | df878fb | 2008-02-07 00:14:28 -0800 | [diff] [blame] | 1149 | 	.pre_destroy = mem_cgroup_pre_destroy, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1150 | 	.destroy = mem_cgroup_destroy, | 
 | 1151 | 	.populate = mem_cgroup_populate, | 
| Balbir Singh | 67e465a | 2008-02-07 00:13:54 -0800 | [diff] [blame] | 1152 | 	.attach = mem_cgroup_move_task, | 
| KAMEZAWA Hiroyuki | 6d12e2d | 2008-02-07 00:14:31 -0800 | [diff] [blame] | 1153 | 	.early_init = 0, | 
| Balbir Singh | 8cdea7c | 2008-02-07 00:13:50 -0800 | [diff] [blame] | 1154 | }; |