Code Review
/
linux-3.10.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
[PATCH] node local per-cpu-pages
[linux-3.10.git]
/
include
/
linux
/
mmzone.h
diff --git
a/include/linux/mmzone.h
b/include/linux/mmzone.h
index beacd931b606f478cc188ec72bb081fc1f966632..4733d35d8223bf847bc6b31ff7e705da8adb45ce 100644
(file)
--- a/
include/linux/mmzone.h
+++ b/
include/linux/mmzone.h
@@
-63,6
+63,12
@@
struct per_cpu_pageset {
#endif
} ____cacheline_aligned_in_smp;
#endif
} ____cacheline_aligned_in_smp;
+#ifdef CONFIG_NUMA
+#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
+#else
+#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
+#endif
+
#define ZONE_DMA 0
#define ZONE_NORMAL 1
#define ZONE_HIGHMEM 2
#define ZONE_DMA 0
#define ZONE_NORMAL 1
#define ZONE_HIGHMEM 2
@@
-122,8
+128,11
@@
struct zone {
*/
unsigned long lowmem_reserve[MAX_NR_ZONES];
*/
unsigned long lowmem_reserve[MAX_NR_ZONES];
+#ifdef CONFIG_NUMA
+ struct per_cpu_pageset *pageset[NR_CPUS];
+#else
struct per_cpu_pageset pageset[NR_CPUS];
struct per_cpu_pageset pageset[NR_CPUS];
-
+#endif
/*
* free areas of different sizes
*/
/*
* free areas of different sizes
*/
@@
-144,6
+153,14
@@
struct zone {
unsigned long pages_scanned; /* since last reclaim */
int all_unreclaimable; /* All pages pinned */
unsigned long pages_scanned; /* since last reclaim */
int all_unreclaimable; /* All pages pinned */
+ /*
+ * Does the allocator try to reclaim pages from the zone as soon
+ * as it fails a watermark_ok() in __alloc_pages?
+ */
+ int reclaim_pages;
+ /* A count of how many reclaimers are scanning this zone */
+ atomic_t reclaim_in_progress;
+
/*
* prev_priority holds the scanning priority for this zone. It is
* defined as the scanning priority at which we achieved our reclaim
/*
* prev_priority holds the scanning priority for this zone. It is
* defined as the scanning priority at which we achieved our reclaim