3dd4a909a1de8d0c04147c8e08c647127a386c98
[linux-3.10.git] / mm / page_cgroup.c
1 #include <linux/mm.h>
2 #include <linux/mmzone.h>
3 #include <linux/bootmem.h>
4 #include <linux/bit_spinlock.h>
5 #include <linux/page_cgroup.h>
6 #include <linux/hash.h>
7 #include <linux/slab.h>
8 #include <linux/memory.h>
9 #include <linux/vmalloc.h>
10 #include <linux/cgroup.h>
11 #include <linux/swapops.h>
12
13 static void __meminit
14 __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
15 {
16         pc->flags = 0;
17         pc->mem_cgroup = NULL;
18         pc->page = pfn_to_page(pfn);
19         INIT_LIST_HEAD(&pc->lru);
20 }
21 static unsigned long total_usage;
22
23 #if !defined(CONFIG_SPARSEMEM)
24
25
26 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
27 {
28         pgdat->node_page_cgroup = NULL;
29 }
30
31 struct page_cgroup *lookup_page_cgroup(struct page *page)
32 {
33         unsigned long pfn = page_to_pfn(page);
34         unsigned long offset;
35         struct page_cgroup *base;
36
37         base = NODE_DATA(page_to_nid(page))->node_page_cgroup;
38         if (unlikely(!base))
39                 return NULL;
40
41         offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn;
42         return base + offset;
43 }
44
45 static int __init alloc_node_page_cgroup(int nid)
46 {
47         struct page_cgroup *base, *pc;
48         unsigned long table_size;
49         unsigned long start_pfn, nr_pages, index;
50         struct page *page;
51         unsigned int order;
52
53         start_pfn = NODE_DATA(nid)->node_start_pfn;
54         nr_pages = NODE_DATA(nid)->node_spanned_pages;
55
56         if (!nr_pages)
57                 return 0;
58
59         table_size = sizeof(struct page_cgroup) * nr_pages;
60         order = get_order(table_size);
61         page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order);
62         if (!page)
63                 page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order);
64         if (!page)
65                 return -ENOMEM;
66         base = page_address(page);
67         for (index = 0; index < nr_pages; index++) {
68                 pc = base + index;
69                 __init_page_cgroup(pc, start_pfn + index);
70         }
71         NODE_DATA(nid)->node_page_cgroup = base;
72         total_usage += table_size;
73         return 0;
74 }
75
76 void __init page_cgroup_init(void)
77 {
78
79         int nid, fail;
80
81         if (mem_cgroup_disabled())
82                 return;
83
84         for_each_online_node(nid)  {
85                 fail = alloc_node_page_cgroup(nid);
86                 if (fail)
87                         goto fail;
88         }
89         printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
90         printk(KERN_INFO "please try cgroup_disable=memory option if you"
91         " don't want\n");
92         return;
93 fail:
94         printk(KERN_CRIT "allocation of page_cgroup was failed.\n");
95         printk(KERN_CRIT "please try cgroup_disable=memory boot option\n");
96         panic("Out of memory");
97 }
98
99 #else /* CONFIG_FLAT_NODE_MEM_MAP */
100
101 struct page_cgroup *lookup_page_cgroup(struct page *page)
102 {
103         unsigned long pfn = page_to_pfn(page);
104         struct mem_section *section = __pfn_to_section(pfn);
105
106         return section->page_cgroup + pfn;
107 }
108
109 /* __alloc_bootmem...() is protected by !slab_available() */
110 static int __init_refok init_section_page_cgroup(unsigned long pfn)
111 {
112         struct mem_section *section = __pfn_to_section(pfn);
113         struct page_cgroup *base, *pc;
114         unsigned long table_size;
115         int nid, index;
116
117         if (!section->page_cgroup) {
118                 nid = page_to_nid(pfn_to_page(pfn));
119                 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
120                 if (slab_is_available()) {
121                         base = kmalloc_node(table_size,
122                                         GFP_KERNEL | __GFP_NOWARN, nid);
123                         if (!base)
124                                 base = vmalloc_node(table_size, nid);
125                 } else {
126                         base = __alloc_bootmem_node_nopanic(NODE_DATA(nid),
127                                 table_size,
128                                 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
129                 }
130         } else {
131                 /*
132                  * We don't have to allocate page_cgroup again, but
133                  * address of memmap may be changed. So, we have to initialize
134                  * again.
135                  */
136                 base = section->page_cgroup + pfn;
137                 table_size = 0;
138                 /* check address of memmap is changed or not. */
139                 if (base->page == pfn_to_page(pfn))
140                         return 0;
141         }
142
143         if (!base) {
144                 printk(KERN_ERR "page cgroup allocation failure\n");
145                 return -ENOMEM;
146         }
147
148         for (index = 0; index < PAGES_PER_SECTION; index++) {
149                 pc = base + index;
150                 __init_page_cgroup(pc, pfn + index);
151         }
152
153         section->page_cgroup = base - pfn;
154         total_usage += table_size;
155         return 0;
156 }
157 #ifdef CONFIG_MEMORY_HOTPLUG
158 void __free_page_cgroup(unsigned long pfn)
159 {
160         struct mem_section *ms;
161         struct page_cgroup *base;
162
163         ms = __pfn_to_section(pfn);
164         if (!ms || !ms->page_cgroup)
165                 return;
166         base = ms->page_cgroup + pfn;
167         if (is_vmalloc_addr(base)) {
168                 vfree(base);
169                 ms->page_cgroup = NULL;
170         } else {
171                 struct page *page = virt_to_page(base);
172                 if (!PageReserved(page)) { /* Is bootmem ? */
173                         kfree(base);
174                         ms->page_cgroup = NULL;
175                 }
176         }
177 }
178
179 int __meminit online_page_cgroup(unsigned long start_pfn,
180                         unsigned long nr_pages,
181                         int nid)
182 {
183         unsigned long start, end, pfn;
184         int fail = 0;
185
186         start = start_pfn & ~(PAGES_PER_SECTION - 1);
187         end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
188
189         for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
190                 if (!pfn_present(pfn))
191                         continue;
192                 fail = init_section_page_cgroup(pfn);
193         }
194         if (!fail)
195                 return 0;
196
197         /* rollback */
198         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
199                 __free_page_cgroup(pfn);
200
201         return -ENOMEM;
202 }
203
204 int __meminit offline_page_cgroup(unsigned long start_pfn,
205                 unsigned long nr_pages, int nid)
206 {
207         unsigned long start, end, pfn;
208
209         start = start_pfn & ~(PAGES_PER_SECTION - 1);
210         end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
211
212         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
213                 __free_page_cgroup(pfn);
214         return 0;
215
216 }
217
218 static int __meminit page_cgroup_callback(struct notifier_block *self,
219                                unsigned long action, void *arg)
220 {
221         struct memory_notify *mn = arg;
222         int ret = 0;
223         switch (action) {
224         case MEM_GOING_ONLINE:
225                 ret = online_page_cgroup(mn->start_pfn,
226                                    mn->nr_pages, mn->status_change_nid);
227                 break;
228         case MEM_OFFLINE:
229                 offline_page_cgroup(mn->start_pfn,
230                                 mn->nr_pages, mn->status_change_nid);
231                 break;
232         case MEM_CANCEL_ONLINE:
233         case MEM_GOING_OFFLINE:
234                 break;
235         case MEM_ONLINE:
236         case MEM_CANCEL_OFFLINE:
237                 break;
238         }
239
240         if (ret)
241                 ret = notifier_from_errno(ret);
242         else
243                 ret = NOTIFY_OK;
244
245         return ret;
246 }
247
248 #endif
249
250 void __init page_cgroup_init(void)
251 {
252         unsigned long pfn;
253         int fail = 0;
254
255         if (mem_cgroup_disabled())
256                 return;
257
258         for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
259                 if (!pfn_present(pfn))
260                         continue;
261                 fail = init_section_page_cgroup(pfn);
262         }
263         if (fail) {
264                 printk(KERN_CRIT "try cgroup_disable=memory boot option\n");
265                 panic("Out of memory");
266         } else {
267                 hotplug_memory_notifier(page_cgroup_callback, 0);
268         }
269         printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
270         printk(KERN_INFO "please try cgroup_disable=memory option if you don't"
271         " want\n");
272 }
273
274 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
275 {
276         return;
277 }
278
279 #endif
280
281
282 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
283
284 static DEFINE_MUTEX(swap_cgroup_mutex);
285 struct swap_cgroup_ctrl {
286         struct page **map;
287         unsigned long length;
288 };
289
290 struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
291
292 struct swap_cgroup {
293         unsigned short          id;
294 };
295 #define SC_PER_PAGE     (PAGE_SIZE/sizeof(struct swap_cgroup))
296 #define SC_POS_MASK     (SC_PER_PAGE - 1)
297
298 /*
299  * SwapCgroup implements "lookup" and "exchange" operations.
300  * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
301  * against SwapCache. At swap_free(), this is accessed directly from swap.
302  *
303  * This means,
304  *  - we have no race in "exchange" when we're accessed via SwapCache because
305  *    SwapCache(and its swp_entry) is under lock.
306  *  - When called via swap_free(), there is no user of this entry and no race.
307  * Then, we don't need lock around "exchange".
308  *
309  * TODO: we can push these buffers out to HIGHMEM.
310  */
311
312 /*
313  * allocate buffer for swap_cgroup.
314  */
315 static int swap_cgroup_prepare(int type)
316 {
317         struct page *page;
318         struct swap_cgroup_ctrl *ctrl;
319         unsigned long idx, max;
320
321         if (!do_swap_account)
322                 return 0;
323         ctrl = &swap_cgroup_ctrl[type];
324
325         for (idx = 0; idx < ctrl->length; idx++) {
326                 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
327                 if (!page)
328                         goto not_enough_page;
329                 ctrl->map[idx] = page;
330         }
331         return 0;
332 not_enough_page:
333         max = idx;
334         for (idx = 0; idx < max; idx++)
335                 __free_page(ctrl->map[idx]);
336
337         return -ENOMEM;
338 }
339
340 /**
341  * swap_cgroup_record - record mem_cgroup for this swp_entry.
342  * @ent: swap entry to be recorded into
343  * @mem: mem_cgroup to be recorded
344  *
345  * Returns old value at success, 0 at failure.
346  * (Of course, old value can be 0.)
347  */
348 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
349 {
350         int type = swp_type(ent);
351         unsigned long offset = swp_offset(ent);
352         unsigned long idx = offset / SC_PER_PAGE;
353         unsigned long pos = offset & SC_POS_MASK;
354         struct swap_cgroup_ctrl *ctrl;
355         struct page *mappage;
356         struct swap_cgroup *sc;
357         unsigned short old;
358
359         if (!do_swap_account)
360                 return 0;
361
362         ctrl = &swap_cgroup_ctrl[type];
363
364         mappage = ctrl->map[idx];
365         sc = page_address(mappage);
366         sc += pos;
367         old = sc->id;
368         sc->id = id;
369
370         return old;
371 }
372
373 /**
374  * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
375  * @ent: swap entry to be looked up.
376  *
377  * Returns CSS ID of mem_cgroup at success. 0 at failure. (0 is invalid ID)
378  */
379 unsigned short lookup_swap_cgroup(swp_entry_t ent)
380 {
381         int type = swp_type(ent);
382         unsigned long offset = swp_offset(ent);
383         unsigned long idx = offset / SC_PER_PAGE;
384         unsigned long pos = offset & SC_POS_MASK;
385         struct swap_cgroup_ctrl *ctrl;
386         struct page *mappage;
387         struct swap_cgroup *sc;
388         unsigned short ret;
389
390         if (!do_swap_account)
391                 return 0;
392
393         ctrl = &swap_cgroup_ctrl[type];
394         mappage = ctrl->map[idx];
395         sc = page_address(mappage);
396         sc += pos;
397         ret = sc->id;
398         return ret;
399 }
400
401 int swap_cgroup_swapon(int type, unsigned long max_pages)
402 {
403         void *array;
404         unsigned long array_size;
405         unsigned long length;
406         struct swap_cgroup_ctrl *ctrl;
407
408         if (!do_swap_account)
409                 return 0;
410
411         length = ((max_pages/SC_PER_PAGE) + 1);
412         array_size = length * sizeof(void *);
413
414         array = vmalloc(array_size);
415         if (!array)
416                 goto nomem;
417
418         memset(array, 0, array_size);
419         ctrl = &swap_cgroup_ctrl[type];
420         mutex_lock(&swap_cgroup_mutex);
421         ctrl->length = length;
422         ctrl->map = array;
423         if (swap_cgroup_prepare(type)) {
424                 /* memory shortage */
425                 ctrl->map = NULL;
426                 ctrl->length = 0;
427                 vfree(array);
428                 mutex_unlock(&swap_cgroup_mutex);
429                 goto nomem;
430         }
431         mutex_unlock(&swap_cgroup_mutex);
432
433         return 0;
434 nomem:
435         printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
436         printk(KERN_INFO
437                 "swap_cgroup can be disabled by noswapaccount boot option\n");
438         return -ENOMEM;
439 }
440
441 void swap_cgroup_swapoff(int type)
442 {
443         int i;
444         struct swap_cgroup_ctrl *ctrl;
445
446         if (!do_swap_account)
447                 return;
448
449         mutex_lock(&swap_cgroup_mutex);
450         ctrl = &swap_cgroup_ctrl[type];
451         if (ctrl->map) {
452                 for (i = 0; i < ctrl->length; i++) {
453                         struct page *page = ctrl->map[i];
454                         if (page)
455                                 __free_page(page);
456                 }
457                 vfree(ctrl->map);
458                 ctrl->map = NULL;
459                 ctrl->length = 0;
460         }
461         mutex_unlock(&swap_cgroup_mutex);
462 }
463
464 #endif