[PATCH] zoned vm counters: conversion of nr_slab to per zone counter
[linux-2.6.git] / include / linux / vmstat.h
1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/config.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9
10 /*
11  * Global page accounting.  One instance per CPU.  Only unsigned longs are
12  * allowed.
13  *
14  * - Fields can be modified with xxx_page_state and xxx_page_state_zone at
15  * any time safely (which protects the instance from modification by
16  * interrupt.
17  * - The __xxx_page_state variants can be used safely when interrupts are
18  * disabled.
19  * - The __xxx_page_state variants can be used if the field is only
20  * modified from process context and protected from preemption, or only
21  * modified from interrupt context.  In this case, the field should be
22  * commented here.
23  */
24 struct page_state {
25         unsigned long nr_dirty;         /* Dirty writeable pages */
26         unsigned long nr_writeback;     /* Pages under writeback */
27         unsigned long nr_unstable;      /* NFS unstable pages */
28         unsigned long nr_page_table_pages;/* Pages used for pagetables */
29 #define GET_PAGE_STATE_LAST nr_page_table_pages
30
31         /*
32          * The below are zeroed by get_page_state().  Use get_full_page_state()
33          * to add up all these.
34          */
35         unsigned long pgpgin;           /* Disk reads */
36         unsigned long pgpgout;          /* Disk writes */
37         unsigned long pswpin;           /* swap reads */
38         unsigned long pswpout;          /* swap writes */
39
40         unsigned long pgalloc_high;     /* page allocations */
41         unsigned long pgalloc_normal;
42         unsigned long pgalloc_dma32;
43         unsigned long pgalloc_dma;
44
45         unsigned long pgfree;           /* page freeings */
46         unsigned long pgactivate;       /* pages moved inactive->active */
47         unsigned long pgdeactivate;     /* pages moved active->inactive */
48
49         unsigned long pgfault;          /* faults (major+minor) */
50         unsigned long pgmajfault;       /* faults (major only) */
51
52         unsigned long pgrefill_high;    /* inspected in refill_inactive_zone */
53         unsigned long pgrefill_normal;
54         unsigned long pgrefill_dma32;
55         unsigned long pgrefill_dma;
56
57         unsigned long pgsteal_high;     /* total highmem pages reclaimed */
58         unsigned long pgsteal_normal;
59         unsigned long pgsteal_dma32;
60         unsigned long pgsteal_dma;
61
62         unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
63         unsigned long pgscan_kswapd_normal;
64         unsigned long pgscan_kswapd_dma32;
65         unsigned long pgscan_kswapd_dma;
66
67         unsigned long pgscan_direct_high;/* total highmem pages scanned */
68         unsigned long pgscan_direct_normal;
69         unsigned long pgscan_direct_dma32;
70         unsigned long pgscan_direct_dma;
71
72         unsigned long pginodesteal;     /* pages reclaimed via inode freeing */
73         unsigned long slabs_scanned;    /* slab objects scanned */
74         unsigned long kswapd_steal;     /* pages reclaimed by kswapd */
75         unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
76         unsigned long pageoutrun;       /* kswapd's calls to page reclaim */
77         unsigned long allocstall;       /* direct reclaim calls */
78
79         unsigned long pgrotated;        /* pages rotated to tail of the LRU */
80         unsigned long nr_bounce;        /* pages for bounce buffers */
81 };
82
83 extern void get_page_state(struct page_state *ret);
84 extern void get_page_state_node(struct page_state *ret, int node);
85 extern void get_full_page_state(struct page_state *ret);
86 extern unsigned long read_page_state_offset(unsigned long offset);
87 extern void mod_page_state_offset(unsigned long offset, unsigned long delta);
88 extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
89
90 #define read_page_state(member) \
91         read_page_state_offset(offsetof(struct page_state, member))
92
93 #define mod_page_state(member, delta)   \
94         mod_page_state_offset(offsetof(struct page_state, member), (delta))
95
96 #define __mod_page_state(member, delta) \
97         __mod_page_state_offset(offsetof(struct page_state, member), (delta))
98
99 #define inc_page_state(member)          mod_page_state(member, 1UL)
100 #define dec_page_state(member)          mod_page_state(member, 0UL - 1)
101 #define add_page_state(member,delta)    mod_page_state(member, (delta))
102 #define sub_page_state(member,delta)    mod_page_state(member, 0UL - (delta))
103
104 #define __inc_page_state(member)        __mod_page_state(member, 1UL)
105 #define __dec_page_state(member)        __mod_page_state(member, 0UL - 1)
106 #define __add_page_state(member,delta)  __mod_page_state(member, (delta))
107 #define __sub_page_state(member,delta)  __mod_page_state(member, 0UL - (delta))
108
109 #define page_state(member) (*__page_state(offsetof(struct page_state, member)))
110
111 #define state_zone_offset(zone, member)                                 \
112 ({                                                                      \
113         unsigned offset;                                                \
114         if (is_highmem(zone))                                           \
115                 offset = offsetof(struct page_state, member##_high);    \
116         else if (is_normal(zone))                                       \
117                 offset = offsetof(struct page_state, member##_normal);  \
118         else if (is_dma32(zone))                                        \
119                 offset = offsetof(struct page_state, member##_dma32);   \
120         else                                                            \
121                 offset = offsetof(struct page_state, member##_dma);     \
122         offset;                                                         \
123 })
124
125 #define __mod_page_state_zone(zone, member, delta)                      \
126  do {                                                                   \
127         __mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
128  } while (0)
129
130 #define mod_page_state_zone(zone, member, delta)                        \
131  do {                                                                   \
132         mod_page_state_offset(state_zone_offset(zone, member), (delta)); \
133  } while (0)
134
135 DECLARE_PER_CPU(struct page_state, page_states);
136
137 /*
138  * Zone based page accounting with per cpu differentials.
139  */
140 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
141
142 static inline void zone_page_state_add(long x, struct zone *zone,
143                                  enum zone_stat_item item)
144 {
145         atomic_long_add(x, &zone->vm_stat[item]);
146         atomic_long_add(x, &vm_stat[item]);
147 }
148
149 static inline unsigned long global_page_state(enum zone_stat_item item)
150 {
151         long x = atomic_long_read(&vm_stat[item]);
152 #ifdef CONFIG_SMP
153         if (x < 0)
154                 x = 0;
155 #endif
156         return x;
157 }
158
159 static inline unsigned long zone_page_state(struct zone *zone,
160                                         enum zone_stat_item item)
161 {
162         long x = atomic_long_read(&zone->vm_stat[item]);
163 #ifdef CONFIG_SMP
164         if (x < 0)
165                 x = 0;
166 #endif
167         return x;
168 }
169
170 #ifdef CONFIG_NUMA
171 /*
172  * Determine the per node value of a stat item. This function
173  * is called frequently in a NUMA machine, so try to be as
174  * frugal as possible.
175  */
176 static inline unsigned long node_page_state(int node,
177                                  enum zone_stat_item item)
178 {
179         struct zone *zones = NODE_DATA(node)->node_zones;
180
181         return
182 #ifndef CONFIG_DMA_IS_NORMAL
183 #if !defined(CONFIG_DMA_IS_DMA32) && BITS_PER_LONG >= 64
184                 zone_page_state(&zones[ZONE_DMA32], item) +
185 #endif
186                 zone_page_state(&zones[ZONE_NORMAL], item) +
187 #endif
188 #ifdef CONFIG_HIGHMEM
189                 zone_page_state(&zones[ZONE_HIGHMEM], item) +
190 #endif
191                 zone_page_state(&zones[ZONE_DMA], item);
192 }
193 #else
194 #define node_page_state(node, item) global_page_state(item)
195 #endif
196
197 #define __add_zone_page_state(__z, __i, __d)    \
198                 __mod_zone_page_state(__z, __i, __d)
199 #define __sub_zone_page_state(__z, __i, __d)    \
200                 __mod_zone_page_state(__z, __i,-(__d))
201
202 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
203 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
204
205 static inline void zap_zone_vm_stats(struct zone *zone)
206 {
207         memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
208 }
209
210 #ifdef CONFIG_SMP
211 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
212 void __inc_zone_page_state(struct page *, enum zone_stat_item);
213 void __dec_zone_page_state(struct page *, enum zone_stat_item);
214
215 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
216 void inc_zone_page_state(struct page *, enum zone_stat_item);
217 void dec_zone_page_state(struct page *, enum zone_stat_item);
218
219 extern void inc_zone_state(struct zone *, enum zone_stat_item);
220
221 void refresh_cpu_vm_stats(int);
222 void refresh_vm_stats(void);
223
224 #else /* CONFIG_SMP */
225
226 /*
227  * We do not maintain differentials in a single processor configuration.
228  * The functions directly modify the zone and global counters.
229  */
230 static inline void __mod_zone_page_state(struct zone *zone,
231                         enum zone_stat_item item, int delta)
232 {
233         zone_page_state_add(delta, zone, item);
234 }
235
236 static inline void __inc_zone_page_state(struct page *page,
237                         enum zone_stat_item item)
238 {
239         atomic_long_inc(&page_zone(page)->vm_stat[item]);
240         atomic_long_inc(&vm_stat[item]);
241 }
242
243 static inline void __dec_zone_page_state(struct page *page,
244                         enum zone_stat_item item)
245 {
246         atomic_long_dec(&page_zone(page)->vm_stat[item]);
247         atomic_long_dec(&vm_stat[item]);
248 }
249
250 /*
251  * We only use atomic operations to update counters. So there is no need to
252  * disable interrupts.
253  */
254 #define inc_zone_page_state __inc_zone_page_state
255 #define dec_zone_page_state __dec_zone_page_state
256 #define mod_zone_page_state __mod_zone_page_state
257
258 static inline void refresh_cpu_vm_stats(int cpu) { }
259 static inline void refresh_vm_stats(void) { }
260 #endif
261
262 #endif /* _LINUX_VMSTAT_H */