mm: add VM counters for transparent hugepages
[linux-2.6.git] / mm / vmstat.c
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *              Christoph Lameter <christoph@lameter.com>
10  */
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/cpu.h>
17 #include <linux/vmstat.h>
18 #include <linux/sched.h>
19 #include <linux/math64.h>
20 #include <linux/writeback.h>
21 #include <linux/compaction.h>
22
23 #ifdef CONFIG_VM_EVENT_COUNTERS
24 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
25 EXPORT_PER_CPU_SYMBOL(vm_event_states);
26
27 static void sum_vm_events(unsigned long *ret)
28 {
29         int cpu;
30         int i;
31
32         memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
33
34         for_each_online_cpu(cpu) {
35                 struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
36
37                 for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
38                         ret[i] += this->event[i];
39         }
40 }
41
42 /*
43  * Accumulate the vm event counters across all CPUs.
44  * The result is unavoidably approximate - it can change
45  * during and after execution of this function.
46 */
47 void all_vm_events(unsigned long *ret)
48 {
49         get_online_cpus();
50         sum_vm_events(ret);
51         put_online_cpus();
52 }
53 EXPORT_SYMBOL_GPL(all_vm_events);
54
55 #ifdef CONFIG_HOTPLUG
56 /*
57  * Fold the foreign cpu events into our own.
58  *
59  * This is adding to the events on one processor
60  * but keeps the global counts constant.
61  */
62 void vm_events_fold_cpu(int cpu)
63 {
64         struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
65         int i;
66
67         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
68                 count_vm_events(i, fold_state->event[i]);
69                 fold_state->event[i] = 0;
70         }
71 }
72 #endif /* CONFIG_HOTPLUG */
73
74 #endif /* CONFIG_VM_EVENT_COUNTERS */
75
76 /*
77  * Manage combined zone based / global counters
78  *
79  * vm_stat contains the global counters
80  */
81 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
82 EXPORT_SYMBOL(vm_stat);
83
84 #ifdef CONFIG_SMP
85
86 int calculate_pressure_threshold(struct zone *zone)
87 {
88         int threshold;
89         int watermark_distance;
90
91         /*
92          * As vmstats are not up to date, there is drift between the estimated
93          * and real values. For high thresholds and a high number of CPUs, it
94          * is possible for the min watermark to be breached while the estimated
95          * value looks fine. The pressure threshold is a reduced value such
96          * that even the maximum amount of drift will not accidentally breach
97          * the min watermark
98          */
99         watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
100         threshold = max(1, (int)(watermark_distance / num_online_cpus()));
101
102         /*
103          * Maximum threshold is 125
104          */
105         threshold = min(125, threshold);
106
107         return threshold;
108 }
109
110 int calculate_normal_threshold(struct zone *zone)
111 {
112         int threshold;
113         int mem;        /* memory in 128 MB units */
114
115         /*
116          * The threshold scales with the number of processors and the amount
117          * of memory per zone. More memory means that we can defer updates for
118          * longer, more processors could lead to more contention.
119          * fls() is used to have a cheap way of logarithmic scaling.
120          *
121          * Some sample thresholds:
122          *
123          * Threshold    Processors      (fls)   Zonesize        fls(mem+1)
124          * ------------------------------------------------------------------
125          * 8            1               1       0.9-1 GB        4
126          * 16           2               2       0.9-1 GB        4
127          * 20           2               2       1-2 GB          5
128          * 24           2               2       2-4 GB          6
129          * 28           2               2       4-8 GB          7
130          * 32           2               2       8-16 GB         8
131          * 4            2               2       <128M           1
132          * 30           4               3       2-4 GB          5
133          * 48           4               3       8-16 GB         8
134          * 32           8               4       1-2 GB          4
135          * 32           8               4       0.9-1GB         4
136          * 10           16              5       <128M           1
137          * 40           16              5       900M            4
138          * 70           64              7       2-4 GB          5
139          * 84           64              7       4-8 GB          6
140          * 108          512             9       4-8 GB          6
141          * 125          1024            10      8-16 GB         8
142          * 125          1024            10      16-32 GB        9
143          */
144
145         mem = zone->present_pages >> (27 - PAGE_SHIFT);
146
147         threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
148
149         /*
150          * Maximum threshold is 125
151          */
152         threshold = min(125, threshold);
153
154         return threshold;
155 }
156
157 /*
158  * Refresh the thresholds for each zone.
159  */
160 static void refresh_zone_stat_thresholds(void)
161 {
162         struct zone *zone;
163         int cpu;
164         int threshold;
165
166         for_each_populated_zone(zone) {
167                 unsigned long max_drift, tolerate_drift;
168
169                 threshold = calculate_normal_threshold(zone);
170
171                 for_each_online_cpu(cpu)
172                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
173                                                         = threshold;
174
175                 /*
176                  * Only set percpu_drift_mark if there is a danger that
177                  * NR_FREE_PAGES reports the low watermark is ok when in fact
178                  * the min watermark could be breached by an allocation
179                  */
180                 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
181                 max_drift = num_online_cpus() * threshold;
182                 if (max_drift > tolerate_drift)
183                         zone->percpu_drift_mark = high_wmark_pages(zone) +
184                                         max_drift;
185         }
186 }
187
188 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
189                                 int (*calculate_pressure)(struct zone *))
190 {
191         struct zone *zone;
192         int cpu;
193         int threshold;
194         int i;
195
196         for (i = 0; i < pgdat->nr_zones; i++) {
197                 zone = &pgdat->node_zones[i];
198                 if (!zone->percpu_drift_mark)
199                         continue;
200
201                 threshold = (*calculate_pressure)(zone);
202                 for_each_possible_cpu(cpu)
203                         per_cpu_ptr(zone->pageset, cpu)->stat_threshold
204                                                         = threshold;
205         }
206 }
207
208 /*
209  * For use when we know that interrupts are disabled.
210  */
211 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
212                                 int delta)
213 {
214         struct per_cpu_pageset __percpu *pcp = zone->pageset;
215         s8 __percpu *p = pcp->vm_stat_diff + item;
216         long x;
217         long t;
218
219         x = delta + __this_cpu_read(*p);
220
221         t = __this_cpu_read(pcp->stat_threshold);
222
223         if (unlikely(x > t || x < -t)) {
224                 zone_page_state_add(x, zone, item);
225                 x = 0;
226         }
227         __this_cpu_write(*p, x);
228 }
229 EXPORT_SYMBOL(__mod_zone_page_state);
230
231 /*
232  * Optimized increment and decrement functions.
233  *
234  * These are only for a single page and therefore can take a struct page *
235  * argument instead of struct zone *. This allows the inclusion of the code
236  * generated for page_zone(page) into the optimized functions.
237  *
238  * No overflow check is necessary and therefore the differential can be
239  * incremented or decremented in place which may allow the compilers to
240  * generate better code.
241  * The increment or decrement is known and therefore one boundary check can
242  * be omitted.
243  *
244  * NOTE: These functions are very performance sensitive. Change only
245  * with care.
246  *
247  * Some processors have inc/dec instructions that are atomic vs an interrupt.
248  * However, the code must first determine the differential location in a zone
249  * based on the processor number and then inc/dec the counter. There is no
250  * guarantee without disabling preemption that the processor will not change
251  * in between and therefore the atomicity vs. interrupt cannot be exploited
252  * in a useful way here.
253  */
254 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
255 {
256         struct per_cpu_pageset __percpu *pcp = zone->pageset;
257         s8 __percpu *p = pcp->vm_stat_diff + item;
258         s8 v, t;
259
260         v = __this_cpu_inc_return(*p);
261         t = __this_cpu_read(pcp->stat_threshold);
262         if (unlikely(v > t)) {
263                 s8 overstep = t >> 1;
264
265                 zone_page_state_add(v + overstep, zone, item);
266                 __this_cpu_write(*p, -overstep);
267         }
268 }
269
270 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
271 {
272         __inc_zone_state(page_zone(page), item);
273 }
274 EXPORT_SYMBOL(__inc_zone_page_state);
275
276 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
277 {
278         struct per_cpu_pageset __percpu *pcp = zone->pageset;
279         s8 __percpu *p = pcp->vm_stat_diff + item;
280         s8 v, t;
281
282         v = __this_cpu_dec_return(*p);
283         t = __this_cpu_read(pcp->stat_threshold);
284         if (unlikely(v < - t)) {
285                 s8 overstep = t >> 1;
286
287                 zone_page_state_add(v - overstep, zone, item);
288                 __this_cpu_write(*p, overstep);
289         }
290 }
291
292 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
293 {
294         __dec_zone_state(page_zone(page), item);
295 }
296 EXPORT_SYMBOL(__dec_zone_page_state);
297
298 #ifdef CONFIG_CMPXCHG_LOCAL
299 /*
300  * If we have cmpxchg_local support then we do not need to incur the overhead
301  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
302  *
303  * mod_state() modifies the zone counter state through atomic per cpu
304  * operations.
305  *
306  * Overstep mode specifies how overstep should handled:
307  *     0       No overstepping
308  *     1       Overstepping half of threshold
309  *     -1      Overstepping minus half of threshold
310 */
311 static inline void mod_state(struct zone *zone,
312        enum zone_stat_item item, int delta, int overstep_mode)
313 {
314         struct per_cpu_pageset __percpu *pcp = zone->pageset;
315         s8 __percpu *p = pcp->vm_stat_diff + item;
316         long o, n, t, z;
317
318         do {
319                 z = 0;  /* overflow to zone counters */
320
321                 /*
322                  * The fetching of the stat_threshold is racy. We may apply
323                  * a counter threshold to the wrong the cpu if we get
324                  * rescheduled while executing here. However, the next
325                  * counter update will apply the threshold again and
326                  * therefore bring the counter under the threshold again.
327                  *
328                  * Most of the time the thresholds are the same anyways
329                  * for all cpus in a zone.
330                  */
331                 t = this_cpu_read(pcp->stat_threshold);
332
333                 o = this_cpu_read(*p);
334                 n = delta + o;
335
336                 if (n > t || n < -t) {
337                         int os = overstep_mode * (t >> 1) ;
338
339                         /* Overflow must be added to zone counters */
340                         z = n + os;
341                         n = -os;
342                 }
343         } while (this_cpu_cmpxchg(*p, o, n) != o);
344
345         if (z)
346                 zone_page_state_add(z, zone, item);
347 }
348
349 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
350                                         int delta)
351 {
352         mod_state(zone, item, delta, 0);
353 }
354 EXPORT_SYMBOL(mod_zone_page_state);
355
356 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
357 {
358         mod_state(zone, item, 1, 1);
359 }
360
361 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
362 {
363         mod_state(page_zone(page), item, 1, 1);
364 }
365 EXPORT_SYMBOL(inc_zone_page_state);
366
367 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
368 {
369         mod_state(page_zone(page), item, -1, -1);
370 }
371 EXPORT_SYMBOL(dec_zone_page_state);
372 #else
373 /*
374  * Use interrupt disable to serialize counter updates
375  */
376 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
377                                         int delta)
378 {
379         unsigned long flags;
380
381         local_irq_save(flags);
382         __mod_zone_page_state(zone, item, delta);
383         local_irq_restore(flags);
384 }
385 EXPORT_SYMBOL(mod_zone_page_state);
386
387 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
388 {
389         unsigned long flags;
390
391         local_irq_save(flags);
392         __inc_zone_state(zone, item);
393         local_irq_restore(flags);
394 }
395
396 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
397 {
398         unsigned long flags;
399         struct zone *zone;
400
401         zone = page_zone(page);
402         local_irq_save(flags);
403         __inc_zone_state(zone, item);
404         local_irq_restore(flags);
405 }
406 EXPORT_SYMBOL(inc_zone_page_state);
407
408 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
409 {
410         unsigned long flags;
411
412         local_irq_save(flags);
413         __dec_zone_page_state(page, item);
414         local_irq_restore(flags);
415 }
416 EXPORT_SYMBOL(dec_zone_page_state);
417 #endif
418
419 /*
420  * Update the zone counters for one cpu.
421  *
422  * The cpu specified must be either the current cpu or a processor that
423  * is not online. If it is the current cpu then the execution thread must
424  * be pinned to the current cpu.
425  *
426  * Note that refresh_cpu_vm_stats strives to only access
427  * node local memory. The per cpu pagesets on remote zones are placed
428  * in the memory local to the processor using that pageset. So the
429  * loop over all zones will access a series of cachelines local to
430  * the processor.
431  *
432  * The call to zone_page_state_add updates the cachelines with the
433  * statistics in the remote zone struct as well as the global cachelines
434  * with the global counters. These could cause remote node cache line
435  * bouncing and will have to be only done when necessary.
436  */
437 void refresh_cpu_vm_stats(int cpu)
438 {
439         struct zone *zone;
440         int i;
441         int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
442
443         for_each_populated_zone(zone) {
444                 struct per_cpu_pageset *p;
445
446                 p = per_cpu_ptr(zone->pageset, cpu);
447
448                 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
449                         if (p->vm_stat_diff[i]) {
450                                 unsigned long flags;
451                                 int v;
452
453                                 local_irq_save(flags);
454                                 v = p->vm_stat_diff[i];
455                                 p->vm_stat_diff[i] = 0;
456                                 local_irq_restore(flags);
457                                 atomic_long_add(v, &zone->vm_stat[i]);
458                                 global_diff[i] += v;
459 #ifdef CONFIG_NUMA
460                                 /* 3 seconds idle till flush */
461                                 p->expire = 3;
462 #endif
463                         }
464                 cond_resched();
465 #ifdef CONFIG_NUMA
466                 /*
467                  * Deal with draining the remote pageset of this
468                  * processor
469                  *
470                  * Check if there are pages remaining in this pageset
471                  * if not then there is nothing to expire.
472                  */
473                 if (!p->expire || !p->pcp.count)
474                         continue;
475
476                 /*
477                  * We never drain zones local to this processor.
478                  */
479                 if (zone_to_nid(zone) == numa_node_id()) {
480                         p->expire = 0;
481                         continue;
482                 }
483
484                 p->expire--;
485                 if (p->expire)
486                         continue;
487
488                 if (p->pcp.count)
489                         drain_zone_pages(zone, &p->pcp);
490 #endif
491         }
492
493         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
494                 if (global_diff[i])
495                         atomic_long_add(global_diff[i], &vm_stat[i]);
496 }
497
498 #endif
499
500 #ifdef CONFIG_NUMA
501 /*
502  * zonelist = the list of zones passed to the allocator
503  * z        = the zone from which the allocation occurred.
504  *
505  * Must be called with interrupts disabled.
506  *
507  * When __GFP_OTHER_NODE is set assume the node of the preferred
508  * zone is the local node. This is useful for daemons who allocate
509  * memory on behalf of other processes.
510  */
511 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
512 {
513         if (z->zone_pgdat == preferred_zone->zone_pgdat) {
514                 __inc_zone_state(z, NUMA_HIT);
515         } else {
516                 __inc_zone_state(z, NUMA_MISS);
517                 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
518         }
519         if (z->node == ((flags & __GFP_OTHER_NODE) ?
520                         preferred_zone->node : numa_node_id()))
521                 __inc_zone_state(z, NUMA_LOCAL);
522         else
523                 __inc_zone_state(z, NUMA_OTHER);
524 }
525 #endif
526
527 #ifdef CONFIG_COMPACTION
528
529 struct contig_page_info {
530         unsigned long free_pages;
531         unsigned long free_blocks_total;
532         unsigned long free_blocks_suitable;
533 };
534
535 /*
536  * Calculate the number of free pages in a zone, how many contiguous
537  * pages are free and how many are large enough to satisfy an allocation of
538  * the target size. Note that this function makes no attempt to estimate
539  * how many suitable free blocks there *might* be if MOVABLE pages were
540  * migrated. Calculating that is possible, but expensive and can be
541  * figured out from userspace
542  */
543 static void fill_contig_page_info(struct zone *zone,
544                                 unsigned int suitable_order,
545                                 struct contig_page_info *info)
546 {
547         unsigned int order;
548
549         info->free_pages = 0;
550         info->free_blocks_total = 0;
551         info->free_blocks_suitable = 0;
552
553         for (order = 0; order < MAX_ORDER; order++) {
554                 unsigned long blocks;
555
556                 /* Count number of free blocks */
557                 blocks = zone->free_area[order].nr_free;
558                 info->free_blocks_total += blocks;
559
560                 /* Count free base pages */
561                 info->free_pages += blocks << order;
562
563                 /* Count the suitable free blocks */
564                 if (order >= suitable_order)
565                         info->free_blocks_suitable += blocks <<
566                                                 (order - suitable_order);
567         }
568 }
569
570 /*
571  * A fragmentation index only makes sense if an allocation of a requested
572  * size would fail. If that is true, the fragmentation index indicates
573  * whether external fragmentation or a lack of memory was the problem.
574  * The value can be used to determine if page reclaim or compaction
575  * should be used
576  */
577 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
578 {
579         unsigned long requested = 1UL << order;
580
581         if (!info->free_blocks_total)
582                 return 0;
583
584         /* Fragmentation index only makes sense when a request would fail */
585         if (info->free_blocks_suitable)
586                 return -1000;
587
588         /*
589          * Index is between 0 and 1 so return within 3 decimal places
590          *
591          * 0 => allocation would fail due to lack of memory
592          * 1 => allocation would fail due to fragmentation
593          */
594         return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
595 }
596
597 /* Same as __fragmentation index but allocs contig_page_info on stack */
598 int fragmentation_index(struct zone *zone, unsigned int order)
599 {
600         struct contig_page_info info;
601
602         fill_contig_page_info(zone, order, &info);
603         return __fragmentation_index(order, &info);
604 }
605 #endif
606
607 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
608 #include <linux/proc_fs.h>
609 #include <linux/seq_file.h>
610
611 static char * const migratetype_names[MIGRATE_TYPES] = {
612         "Unmovable",
613         "Reclaimable",
614         "Movable",
615         "Reserve",
616         "Isolate",
617 };
618
619 static void *frag_start(struct seq_file *m, loff_t *pos)
620 {
621         pg_data_t *pgdat;
622         loff_t node = *pos;
623         for (pgdat = first_online_pgdat();
624              pgdat && node;
625              pgdat = next_online_pgdat(pgdat))
626                 --node;
627
628         return pgdat;
629 }
630
631 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
632 {
633         pg_data_t *pgdat = (pg_data_t *)arg;
634
635         (*pos)++;
636         return next_online_pgdat(pgdat);
637 }
638
639 static void frag_stop(struct seq_file *m, void *arg)
640 {
641 }
642
643 /* Walk all the zones in a node and print using a callback */
644 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
645                 void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
646 {
647         struct zone *zone;
648         struct zone *node_zones = pgdat->node_zones;
649         unsigned long flags;
650
651         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
652                 if (!populated_zone(zone))
653                         continue;
654
655                 spin_lock_irqsave(&zone->lock, flags);
656                 print(m, pgdat, zone);
657                 spin_unlock_irqrestore(&zone->lock, flags);
658         }
659 }
660 #endif
661
662 #ifdef CONFIG_PROC_FS
663 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
664                                                 struct zone *zone)
665 {
666         int order;
667
668         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
669         for (order = 0; order < MAX_ORDER; ++order)
670                 seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
671         seq_putc(m, '\n');
672 }
673
674 /*
675  * This walks the free areas for each zone.
676  */
677 static int frag_show(struct seq_file *m, void *arg)
678 {
679         pg_data_t *pgdat = (pg_data_t *)arg;
680         walk_zones_in_node(m, pgdat, frag_show_print);
681         return 0;
682 }
683
684 static void pagetypeinfo_showfree_print(struct seq_file *m,
685                                         pg_data_t *pgdat, struct zone *zone)
686 {
687         int order, mtype;
688
689         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
690                 seq_printf(m, "Node %4d, zone %8s, type %12s ",
691                                         pgdat->node_id,
692                                         zone->name,
693                                         migratetype_names[mtype]);
694                 for (order = 0; order < MAX_ORDER; ++order) {
695                         unsigned long freecount = 0;
696                         struct free_area *area;
697                         struct list_head *curr;
698
699                         area = &(zone->free_area[order]);
700
701                         list_for_each(curr, &area->free_list[mtype])
702                                 freecount++;
703                         seq_printf(m, "%6lu ", freecount);
704                 }
705                 seq_putc(m, '\n');
706         }
707 }
708
709 /* Print out the free pages at each order for each migatetype */
710 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
711 {
712         int order;
713         pg_data_t *pgdat = (pg_data_t *)arg;
714
715         /* Print header */
716         seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
717         for (order = 0; order < MAX_ORDER; ++order)
718                 seq_printf(m, "%6d ", order);
719         seq_putc(m, '\n');
720
721         walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
722
723         return 0;
724 }
725
726 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
727                                         pg_data_t *pgdat, struct zone *zone)
728 {
729         int mtype;
730         unsigned long pfn;
731         unsigned long start_pfn = zone->zone_start_pfn;
732         unsigned long end_pfn = start_pfn + zone->spanned_pages;
733         unsigned long count[MIGRATE_TYPES] = { 0, };
734
735         for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
736                 struct page *page;
737
738                 if (!pfn_valid(pfn))
739                         continue;
740
741                 page = pfn_to_page(pfn);
742
743                 /* Watch for unexpected holes punched in the memmap */
744                 if (!memmap_valid_within(pfn, page, zone))
745                         continue;
746
747                 mtype = get_pageblock_migratetype(page);
748
749                 if (mtype < MIGRATE_TYPES)
750                         count[mtype]++;
751         }
752
753         /* Print counts */
754         seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
755         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
756                 seq_printf(m, "%12lu ", count[mtype]);
757         seq_putc(m, '\n');
758 }
759
760 /* Print out the free pages at each order for each migratetype */
761 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
762 {
763         int mtype;
764         pg_data_t *pgdat = (pg_data_t *)arg;
765
766         seq_printf(m, "\n%-23s", "Number of blocks type ");
767         for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
768                 seq_printf(m, "%12s ", migratetype_names[mtype]);
769         seq_putc(m, '\n');
770         walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
771
772         return 0;
773 }
774
775 /*
776  * This prints out statistics in relation to grouping pages by mobility.
777  * It is expensive to collect so do not constantly read the file.
778  */
779 static int pagetypeinfo_show(struct seq_file *m, void *arg)
780 {
781         pg_data_t *pgdat = (pg_data_t *)arg;
782
783         /* check memoryless node */
784         if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
785                 return 0;
786
787         seq_printf(m, "Page block order: %d\n", pageblock_order);
788         seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
789         seq_putc(m, '\n');
790         pagetypeinfo_showfree(m, pgdat);
791         pagetypeinfo_showblockcount(m, pgdat);
792
793         return 0;
794 }
795
796 static const struct seq_operations fragmentation_op = {
797         .start  = frag_start,
798         .next   = frag_next,
799         .stop   = frag_stop,
800         .show   = frag_show,
801 };
802
803 static int fragmentation_open(struct inode *inode, struct file *file)
804 {
805         return seq_open(file, &fragmentation_op);
806 }
807
808 static const struct file_operations fragmentation_file_operations = {
809         .open           = fragmentation_open,
810         .read           = seq_read,
811         .llseek         = seq_lseek,
812         .release        = seq_release,
813 };
814
815 static const struct seq_operations pagetypeinfo_op = {
816         .start  = frag_start,
817         .next   = frag_next,
818         .stop   = frag_stop,
819         .show   = pagetypeinfo_show,
820 };
821
822 static int pagetypeinfo_open(struct inode *inode, struct file *file)
823 {
824         return seq_open(file, &pagetypeinfo_op);
825 }
826
827 static const struct file_operations pagetypeinfo_file_ops = {
828         .open           = pagetypeinfo_open,
829         .read           = seq_read,
830         .llseek         = seq_lseek,
831         .release        = seq_release,
832 };
833
834 #ifdef CONFIG_ZONE_DMA
835 #define TEXT_FOR_DMA(xx) xx "_dma",
836 #else
837 #define TEXT_FOR_DMA(xx)
838 #endif
839
840 #ifdef CONFIG_ZONE_DMA32
841 #define TEXT_FOR_DMA32(xx) xx "_dma32",
842 #else
843 #define TEXT_FOR_DMA32(xx)
844 #endif
845
846 #ifdef CONFIG_HIGHMEM
847 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
848 #else
849 #define TEXT_FOR_HIGHMEM(xx)
850 #endif
851
852 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
853                                         TEXT_FOR_HIGHMEM(xx) xx "_movable",
854
855 static const char * const vmstat_text[] = {
856         /* Zoned VM counters */
857         "nr_free_pages",
858         "nr_inactive_anon",
859         "nr_active_anon",
860         "nr_inactive_file",
861         "nr_active_file",
862         "nr_unevictable",
863         "nr_mlock",
864         "nr_anon_pages",
865         "nr_mapped",
866         "nr_file_pages",
867         "nr_dirty",
868         "nr_writeback",
869         "nr_slab_reclaimable",
870         "nr_slab_unreclaimable",
871         "nr_page_table_pages",
872         "nr_kernel_stack",
873         "nr_unstable",
874         "nr_bounce",
875         "nr_vmscan_write",
876         "nr_writeback_temp",
877         "nr_isolated_anon",
878         "nr_isolated_file",
879         "nr_shmem",
880         "nr_dirtied",
881         "nr_written",
882
883 #ifdef CONFIG_NUMA
884         "numa_hit",
885         "numa_miss",
886         "numa_foreign",
887         "numa_interleave",
888         "numa_local",
889         "numa_other",
890 #endif
891         "nr_anon_transparent_hugepages",
892         "nr_dirty_threshold",
893         "nr_dirty_background_threshold",
894
895 #ifdef CONFIG_VM_EVENT_COUNTERS
896         "pgpgin",
897         "pgpgout",
898         "pswpin",
899         "pswpout",
900
901         TEXTS_FOR_ZONES("pgalloc")
902
903         "pgfree",
904         "pgactivate",
905         "pgdeactivate",
906
907         "pgfault",
908         "pgmajfault",
909
910         TEXTS_FOR_ZONES("pgrefill")
911         TEXTS_FOR_ZONES("pgsteal")
912         TEXTS_FOR_ZONES("pgscan_kswapd")
913         TEXTS_FOR_ZONES("pgscan_direct")
914
915 #ifdef CONFIG_NUMA
916         "zone_reclaim_failed",
917 #endif
918         "pginodesteal",
919         "slabs_scanned",
920         "kswapd_steal",
921         "kswapd_inodesteal",
922         "kswapd_low_wmark_hit_quickly",
923         "kswapd_high_wmark_hit_quickly",
924         "kswapd_skip_congestion_wait",
925         "pageoutrun",
926         "allocstall",
927
928         "pgrotated",
929
930 #ifdef CONFIG_COMPACTION
931         "compact_blocks_moved",
932         "compact_pages_moved",
933         "compact_pagemigrate_failed",
934         "compact_stall",
935         "compact_fail",
936         "compact_success",
937 #endif
938
939 #ifdef CONFIG_HUGETLB_PAGE
940         "htlb_buddy_alloc_success",
941         "htlb_buddy_alloc_fail",
942 #endif
943         "unevictable_pgs_culled",
944         "unevictable_pgs_scanned",
945         "unevictable_pgs_rescued",
946         "unevictable_pgs_mlocked",
947         "unevictable_pgs_munlocked",
948         "unevictable_pgs_cleared",
949         "unevictable_pgs_stranded",
950         "unevictable_pgs_mlockfreed",
951
952 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
953         "thp_fault_alloc",
954         "thp_fault_fallback",
955         "thp_collapse_alloc",
956         "thp_collapse_alloc_failed",
957         "thp_split",
958 #endif
959
960 #endif /* CONFIG_VM_EVENTS_COUNTERS */
961 };
962
963 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
964                                                         struct zone *zone)
965 {
966         int i;
967         seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
968         seq_printf(m,
969                    "\n  pages free     %lu"
970                    "\n        min      %lu"
971                    "\n        low      %lu"
972                    "\n        high     %lu"
973                    "\n        scanned  %lu"
974                    "\n        spanned  %lu"
975                    "\n        present  %lu",
976                    zone_page_state(zone, NR_FREE_PAGES),
977                    min_wmark_pages(zone),
978                    low_wmark_pages(zone),
979                    high_wmark_pages(zone),
980                    zone->pages_scanned,
981                    zone->spanned_pages,
982                    zone->present_pages);
983
984         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
985                 seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
986                                 zone_page_state(zone, i));
987
988         seq_printf(m,
989                    "\n        protection: (%lu",
990                    zone->lowmem_reserve[0]);
991         for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
992                 seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
993         seq_printf(m,
994                    ")"
995                    "\n  pagesets");
996         for_each_online_cpu(i) {
997                 struct per_cpu_pageset *pageset;
998
999                 pageset = per_cpu_ptr(zone->pageset, i);
1000                 seq_printf(m,
1001                            "\n    cpu: %i"
1002                            "\n              count: %i"
1003                            "\n              high:  %i"
1004                            "\n              batch: %i",
1005                            i,
1006                            pageset->pcp.count,
1007                            pageset->pcp.high,
1008                            pageset->pcp.batch);
1009 #ifdef CONFIG_SMP
1010                 seq_printf(m, "\n  vm stats threshold: %d",
1011                                 pageset->stat_threshold);
1012 #endif
1013         }
1014         seq_printf(m,
1015                    "\n  all_unreclaimable: %u"
1016                    "\n  start_pfn:         %lu"
1017                    "\n  inactive_ratio:    %u",
1018                    zone->all_unreclaimable,
1019                    zone->zone_start_pfn,
1020                    zone->inactive_ratio);
1021         seq_putc(m, '\n');
1022 }
1023
1024 /*
1025  * Output information about zones in @pgdat.
1026  */
1027 static int zoneinfo_show(struct seq_file *m, void *arg)
1028 {
1029         pg_data_t *pgdat = (pg_data_t *)arg;
1030         walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1031         return 0;
1032 }
1033
1034 static const struct seq_operations zoneinfo_op = {
1035         .start  = frag_start, /* iterate over all zones. The same as in
1036                                * fragmentation. */
1037         .next   = frag_next,
1038         .stop   = frag_stop,
1039         .show   = zoneinfo_show,
1040 };
1041
1042 static int zoneinfo_open(struct inode *inode, struct file *file)
1043 {
1044         return seq_open(file, &zoneinfo_op);
1045 }
1046
1047 static const struct file_operations proc_zoneinfo_file_operations = {
1048         .open           = zoneinfo_open,
1049         .read           = seq_read,
1050         .llseek         = seq_lseek,
1051         .release        = seq_release,
1052 };
1053
1054 enum writeback_stat_item {
1055         NR_DIRTY_THRESHOLD,
1056         NR_DIRTY_BG_THRESHOLD,
1057         NR_VM_WRITEBACK_STAT_ITEMS,
1058 };
1059
1060 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1061 {
1062         unsigned long *v;
1063         int i, stat_items_size;
1064
1065         if (*pos >= ARRAY_SIZE(vmstat_text))
1066                 return NULL;
1067         stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1068                           NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1069
1070 #ifdef CONFIG_VM_EVENT_COUNTERS
1071         stat_items_size += sizeof(struct vm_event_state);
1072 #endif
1073
1074         v = kmalloc(stat_items_size, GFP_KERNEL);
1075         m->private = v;
1076         if (!v)
1077                 return ERR_PTR(-ENOMEM);
1078         for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1079                 v[i] = global_page_state(i);
1080         v += NR_VM_ZONE_STAT_ITEMS;
1081
1082         global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1083                             v + NR_DIRTY_THRESHOLD);
1084         v += NR_VM_WRITEBACK_STAT_ITEMS;
1085
1086 #ifdef CONFIG_VM_EVENT_COUNTERS
1087         all_vm_events(v);
1088         v[PGPGIN] /= 2;         /* sectors -> kbytes */
1089         v[PGPGOUT] /= 2;
1090 #endif
1091         return (unsigned long *)m->private + *pos;
1092 }
1093
1094 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1095 {
1096         (*pos)++;
1097         if (*pos >= ARRAY_SIZE(vmstat_text))
1098                 return NULL;
1099         return (unsigned long *)m->private + *pos;
1100 }
1101
1102 static int vmstat_show(struct seq_file *m, void *arg)
1103 {
1104         unsigned long *l = arg;
1105         unsigned long off = l - (unsigned long *)m->private;
1106
1107         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1108         return 0;
1109 }
1110
1111 static void vmstat_stop(struct seq_file *m, void *arg)
1112 {
1113         kfree(m->private);
1114         m->private = NULL;
1115 }
1116
1117 static const struct seq_operations vmstat_op = {
1118         .start  = vmstat_start,
1119         .next   = vmstat_next,
1120         .stop   = vmstat_stop,
1121         .show   = vmstat_show,
1122 };
1123
1124 static int vmstat_open(struct inode *inode, struct file *file)
1125 {
1126         return seq_open(file, &vmstat_op);
1127 }
1128
1129 static const struct file_operations proc_vmstat_file_operations = {
1130         .open           = vmstat_open,
1131         .read           = seq_read,
1132         .llseek         = seq_lseek,
1133         .release        = seq_release,
1134 };
1135 #endif /* CONFIG_PROC_FS */
1136
1137 #ifdef CONFIG_SMP
1138 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1139 int sysctl_stat_interval __read_mostly = HZ;
1140
1141 static void vmstat_update(struct work_struct *w)
1142 {
1143         refresh_cpu_vm_stats(smp_processor_id());
1144         schedule_delayed_work(&__get_cpu_var(vmstat_work),
1145                 round_jiffies_relative(sysctl_stat_interval));
1146 }
1147
1148 static void __cpuinit start_cpu_timer(int cpu)
1149 {
1150         struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1151
1152         INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
1153         schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1154 }
1155
1156 /*
1157  * Use the cpu notifier to insure that the thresholds are recalculated
1158  * when necessary.
1159  */
1160 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
1161                 unsigned long action,
1162                 void *hcpu)
1163 {
1164         long cpu = (long)hcpu;
1165
1166         switch (action) {
1167         case CPU_ONLINE:
1168         case CPU_ONLINE_FROZEN:
1169                 refresh_zone_stat_thresholds();
1170                 start_cpu_timer(cpu);
1171                 node_set_state(cpu_to_node(cpu), N_CPU);
1172                 break;
1173         case CPU_DOWN_PREPARE:
1174         case CPU_DOWN_PREPARE_FROZEN:
1175                 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1176                 per_cpu(vmstat_work, cpu).work.func = NULL;
1177                 break;
1178         case CPU_DOWN_FAILED:
1179         case CPU_DOWN_FAILED_FROZEN:
1180                 start_cpu_timer(cpu);
1181                 break;
1182         case CPU_DEAD:
1183         case CPU_DEAD_FROZEN:
1184                 refresh_zone_stat_thresholds();
1185                 break;
1186         default:
1187                 break;
1188         }
1189         return NOTIFY_OK;
1190 }
1191
1192 static struct notifier_block __cpuinitdata vmstat_notifier =
1193         { &vmstat_cpuup_callback, NULL, 0 };
1194 #endif
1195
1196 static int __init setup_vmstat(void)
1197 {
1198 #ifdef CONFIG_SMP
1199         int cpu;
1200
1201         refresh_zone_stat_thresholds();
1202         register_cpu_notifier(&vmstat_notifier);
1203
1204         for_each_online_cpu(cpu)
1205                 start_cpu_timer(cpu);
1206 #endif
1207 #ifdef CONFIG_PROC_FS
1208         proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1209         proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1210         proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1211         proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1212 #endif
1213         return 0;
1214 }
1215 module_init(setup_vmstat)
1216
1217 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1218 #include <linux/debugfs.h>
1219
1220 static struct dentry *extfrag_debug_root;
1221
1222 /*
1223  * Return an index indicating how much of the available free memory is
1224  * unusable for an allocation of the requested size.
1225  */
1226 static int unusable_free_index(unsigned int order,
1227                                 struct contig_page_info *info)
1228 {
1229         /* No free memory is interpreted as all free memory is unusable */
1230         if (info->free_pages == 0)
1231                 return 1000;
1232
1233         /*
1234          * Index should be a value between 0 and 1. Return a value to 3
1235          * decimal places.
1236          *
1237          * 0 => no fragmentation
1238          * 1 => high fragmentation
1239          */
1240         return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1241
1242 }
1243
1244 static void unusable_show_print(struct seq_file *m,
1245                                         pg_data_t *pgdat, struct zone *zone)
1246 {
1247         unsigned int order;
1248         int index;
1249         struct contig_page_info info;
1250
1251         seq_printf(m, "Node %d, zone %8s ",
1252                                 pgdat->node_id,
1253                                 zone->name);
1254         for (order = 0; order < MAX_ORDER; ++order) {
1255                 fill_contig_page_info(zone, order, &info);
1256                 index = unusable_free_index(order, &info);
1257                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1258         }
1259
1260         seq_putc(m, '\n');
1261 }
1262
1263 /*
1264  * Display unusable free space index
1265  *
1266  * The unusable free space index measures how much of the available free
1267  * memory cannot be used to satisfy an allocation of a given size and is a
1268  * value between 0 and 1. The higher the value, the more of free memory is
1269  * unusable and by implication, the worse the external fragmentation is. This
1270  * can be expressed as a percentage by multiplying by 100.
1271  */
1272 static int unusable_show(struct seq_file *m, void *arg)
1273 {
1274         pg_data_t *pgdat = (pg_data_t *)arg;
1275
1276         /* check memoryless node */
1277         if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
1278                 return 0;
1279
1280         walk_zones_in_node(m, pgdat, unusable_show_print);
1281
1282         return 0;
1283 }
1284
1285 static const struct seq_operations unusable_op = {
1286         .start  = frag_start,
1287         .next   = frag_next,
1288         .stop   = frag_stop,
1289         .show   = unusable_show,
1290 };
1291
1292 static int unusable_open(struct inode *inode, struct file *file)
1293 {
1294         return seq_open(file, &unusable_op);
1295 }
1296
1297 static const struct file_operations unusable_file_ops = {
1298         .open           = unusable_open,
1299         .read           = seq_read,
1300         .llseek         = seq_lseek,
1301         .release        = seq_release,
1302 };
1303
1304 static void extfrag_show_print(struct seq_file *m,
1305                                         pg_data_t *pgdat, struct zone *zone)
1306 {
1307         unsigned int order;
1308         int index;
1309
1310         /* Alloc on stack as interrupts are disabled for zone walk */
1311         struct contig_page_info info;
1312
1313         seq_printf(m, "Node %d, zone %8s ",
1314                                 pgdat->node_id,
1315                                 zone->name);
1316         for (order = 0; order < MAX_ORDER; ++order) {
1317                 fill_contig_page_info(zone, order, &info);
1318                 index = __fragmentation_index(order, &info);
1319                 seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1320         }
1321
1322         seq_putc(m, '\n');
1323 }
1324
1325 /*
1326  * Display fragmentation index for orders that allocations would fail for
1327  */
1328 static int extfrag_show(struct seq_file *m, void *arg)
1329 {
1330         pg_data_t *pgdat = (pg_data_t *)arg;
1331
1332         walk_zones_in_node(m, pgdat, extfrag_show_print);
1333
1334         return 0;
1335 }
1336
1337 static const struct seq_operations extfrag_op = {
1338         .start  = frag_start,
1339         .next   = frag_next,
1340         .stop   = frag_stop,
1341         .show   = extfrag_show,
1342 };
1343
1344 static int extfrag_open(struct inode *inode, struct file *file)
1345 {
1346         return seq_open(file, &extfrag_op);
1347 }
1348
1349 static const struct file_operations extfrag_file_ops = {
1350         .open           = extfrag_open,
1351         .read           = seq_read,
1352         .llseek         = seq_lseek,
1353         .release        = seq_release,
1354 };
1355
1356 static int __init extfrag_debug_init(void)
1357 {
1358         extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1359         if (!extfrag_debug_root)
1360                 return -ENOMEM;
1361
1362         if (!debugfs_create_file("unusable_index", 0444,
1363                         extfrag_debug_root, NULL, &unusable_file_ops))
1364                 return -ENOMEM;
1365
1366         if (!debugfs_create_file("extfrag_index", 0444,
1367                         extfrag_debug_root, NULL, &extfrag_file_ops))
1368                 return -ENOMEM;
1369
1370         return 0;
1371 }
1372
1373 module_init(extfrag_debug_init);
1374 #endif