mmc: core: SDXC speed class support
[linux-2.6.git] / include / linux / vmstat.h
index ff4696c..65efb92 100644 (file)
@@ -5,52 +5,8 @@
 #include <linux/percpu.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
-#include <asm/atomic.h>
-
-#ifdef CONFIG_ZONE_DMA
-#define DMA_ZONE(xx) xx##_DMA,
-#else
-#define DMA_ZONE(xx)
-#endif
-
-#ifdef CONFIG_ZONE_DMA32
-#define DMA32_ZONE(xx) xx##_DMA32,
-#else
-#define DMA32_ZONE(xx)
-#endif
-
-#ifdef CONFIG_HIGHMEM
-#define HIGHMEM_ZONE(xx) , xx##_HIGH
-#else
-#define HIGHMEM_ZONE(xx)
-#endif
-
-
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
-
-enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
-               FOR_ALL_ZONES(PGALLOC),
-               PGFREE, PGACTIVATE, PGDEACTIVATE,
-               PGFAULT, PGMAJFAULT,
-               FOR_ALL_ZONES(PGREFILL),
-               FOR_ALL_ZONES(PGSTEAL),
-               FOR_ALL_ZONES(PGSCAN_KSWAPD),
-               FOR_ALL_ZONES(PGSCAN_DIRECT),
-               PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
-               PAGEOUTRUN, ALLOCSTALL, PGROTATED,
-#ifdef CONFIG_HUGETLB_PAGE
-               HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
-#endif
-               UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
-               UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
-               UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
-               UNEVICTABLE_PGMLOCKED,
-               UNEVICTABLE_PGMUNLOCKED,
-               UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
-               UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
-               UNEVICTABLE_MLOCKFREED,
-               NR_VM_EVENT_ITEMS
-};
+#include <linux/vm_event_item.h>
+#include <linux/atomic.h>
 
 extern int sysctl_stat_interval;
 
@@ -73,24 +29,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
 
 static inline void __count_vm_event(enum vm_event_item item)
 {
-       __get_cpu_var(vm_event_states).event[item]++;
+       __this_cpu_inc(vm_event_states.event[item]);
 }
 
 static inline void count_vm_event(enum vm_event_item item)
 {
-       get_cpu_var(vm_event_states).event[item]++;
-       put_cpu();
+       this_cpu_inc(vm_event_states.event[item]);
 }
 
 static inline void __count_vm_events(enum vm_event_item item, long delta)
 {
-       __get_cpu_var(vm_event_states).event[item] += delta;
+       __this_cpu_add(vm_event_states.event[item], delta);
 }
 
 static inline void count_vm_events(enum vm_event_item item, long delta)
 {
-       get_cpu_var(vm_event_states).event[item] += delta;
-       put_cpu();
+       this_cpu_add(vm_event_states.event[item], delta);
 }
 
 extern void all_vm_events(unsigned long *);
@@ -163,16 +117,31 @@ static inline unsigned long zone_page_state(struct zone *zone,
        return x;
 }
 
-extern unsigned long global_lru_pages(void);
-
-static inline unsigned long zone_lru_pages(struct zone *zone)
+/*
+ * More accurate version that also considers the currently pending
+ * deltas. For that we need to loop over all cpus to find the current
+ * deltas. There is no synchronization so the result cannot be
+ * exactly accurate either.
+ */
+static inline unsigned long zone_page_state_snapshot(struct zone *zone,
+                                       enum zone_stat_item item)
 {
-       return (zone_page_state(zone, NR_ACTIVE_ANON)
-               + zone_page_state(zone, NR_ACTIVE_FILE)
-               + zone_page_state(zone, NR_INACTIVE_ANON)
-               + zone_page_state(zone, NR_INACTIVE_FILE));
+       long x = atomic_long_read(&zone->vm_stat[item]);
+
+#ifdef CONFIG_SMP
+       int cpu;
+       for_each_online_cpu(cpu)
+               x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
+
+       if (x < 0)
+               x = 0;
+#endif
+       return x;
 }
 
+extern unsigned long global_reclaimable_pages(void);
+extern unsigned long zone_reclaimable_pages(struct zone *zone);
+
 #ifdef CONFIG_NUMA
 /*
  * Determine the per node value of a stat item. This function
@@ -198,20 +167,15 @@ static inline unsigned long node_page_state(int node,
                zone_page_state(&zones[ZONE_MOVABLE], item);
 }
 
-extern void zone_statistics(struct zone *, struct zone *);
+extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
 
 #else
 
 #define node_page_state(node, item) global_page_state(item)
-#define zone_statistics(_zl,_z) do { } while (0)
+#define zone_statistics(_zl, _z, gfp) do { } while (0)
 
 #endif /* CONFIG_NUMA */
 
-#define __add_zone_page_state(__z, __i, __d)   \
-               __mod_zone_page_state(__z, __i, __d)
-#define __sub_zone_page_state(__z, __i, __d)   \
-               __mod_zone_page_state(__z, __i,-(__d))
-
 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
 
@@ -237,6 +201,12 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item);
 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
 
 void refresh_cpu_vm_stats(int);
+void refresh_zone_stat_thresholds(void);
+
+int calculate_pressure_threshold(struct zone *zone);
+int calculate_normal_threshold(struct zone *zone);
+void set_pgdat_percpu_threshold(pg_data_t *pgdat,
+                               int (*calculate_pressure)(struct zone *));
 #else /* CONFIG_SMP */
 
 /*
@@ -281,7 +251,13 @@ static inline void __dec_zone_page_state(struct page *page,
 #define dec_zone_page_state __dec_zone_page_state
 #define mod_zone_page_state __mod_zone_page_state
 
+#define set_pgdat_percpu_threshold(pgdat, callback) { }
+
 static inline void refresh_cpu_vm_stats(int cpu) { }
-#endif
+static inline void refresh_zone_stat_thresholds(void) { }
+
+#endif         /* CONFIG_SMP */
+
+extern const char * const vmstat_text[];
 
 #endif /* _LINUX_VMSTAT_H */