[PATCH] VM: early zone reclaim
[linux-2.6.git] / mm / page_alloc.c
1 /*
2  *  linux/mm/page_alloc.c
3  *
4  *  Manages the free list, the system allocates free pages here.
5  *  Note that kmalloc() lives in slab.c
6  *
7  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
8  *  Swap reorganised 29.12.95, Stephen Tweedie
9  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
10  *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
11  *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
12  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
13  *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
14  *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
15  */
16
17 #include <linux/config.h>
18 #include <linux/stddef.h>
19 #include <linux/mm.h>
20 #include <linux/swap.h>
21 #include <linux/interrupt.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/compiler.h>
25 #include <linux/module.h>
26 #include <linux/suspend.h>
27 #include <linux/pagevec.h>
28 #include <linux/blkdev.h>
29 #include <linux/slab.h>
30 #include <linux/notifier.h>
31 #include <linux/topology.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/cpuset.h>
35 #include <linux/nodemask.h>
36 #include <linux/vmalloc.h>
37
38 #include <asm/tlbflush.h>
39 #include "internal.h"
40
41 /*
42  * MCD - HACK: Find somewhere to initialize this EARLY, or make this
43  * initializer cleaner
44  */
45 nodemask_t node_online_map = { { [0] = 1UL } };
46 EXPORT_SYMBOL(node_online_map);
47 nodemask_t node_possible_map = NODE_MASK_ALL;
48 EXPORT_SYMBOL(node_possible_map);
49 struct pglist_data *pgdat_list;
50 unsigned long totalram_pages;
51 unsigned long totalhigh_pages;
52 long nr_swap_pages;
53
54 /*
55  * results with 256, 32 in the lowmem_reserve sysctl:
56  *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
57  *      1G machine -> (16M dma, 784M normal, 224M high)
58  *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
59  *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
60  *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
61  */
62 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 };
63
64 EXPORT_SYMBOL(totalram_pages);
65 EXPORT_SYMBOL(nr_swap_pages);
66
67 /*
68  * Used by page_zone() to look up the address of the struct zone whose
69  * id is encoded in the upper bits of page->flags
70  */
71 struct zone *zone_table[1 << (ZONES_SHIFT + NODES_SHIFT)];
72 EXPORT_SYMBOL(zone_table);
73
74 static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
75 int min_free_kbytes = 1024;
76
77 unsigned long __initdata nr_kernel_pages;
78 unsigned long __initdata nr_all_pages;
79
80 /*
81  * Temporary debugging check for pages not lying within a given zone.
82  */
83 static int bad_range(struct zone *zone, struct page *page)
84 {
85         if (page_to_pfn(page) >= zone->zone_start_pfn + zone->spanned_pages)
86                 return 1;
87         if (page_to_pfn(page) < zone->zone_start_pfn)
88                 return 1;
89 #ifdef CONFIG_HOLES_IN_ZONE
90         if (!pfn_valid(page_to_pfn(page)))
91                 return 1;
92 #endif
93         if (zone != page_zone(page))
94                 return 1;
95         return 0;
96 }
97
98 static void bad_page(const char *function, struct page *page)
99 {
100         printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
101                 function, current->comm, page);
102         printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
103                 (int)(2*sizeof(page_flags_t)), (unsigned long)page->flags,
104                 page->mapping, page_mapcount(page), page_count(page));
105         printk(KERN_EMERG "Backtrace:\n");
106         dump_stack();
107         printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
108         page->flags &= ~(1 << PG_private        |
109                         1 << PG_locked  |
110                         1 << PG_lru     |
111                         1 << PG_active  |
112                         1 << PG_dirty   |
113                         1 << PG_swapcache |
114                         1 << PG_writeback);
115         set_page_count(page, 0);
116         reset_page_mapcount(page);
117         page->mapping = NULL;
118         tainted |= TAINT_BAD_PAGE;
119 }
120
121 #ifndef CONFIG_HUGETLB_PAGE
122 #define prep_compound_page(page, order) do { } while (0)
123 #define destroy_compound_page(page, order) do { } while (0)
124 #else
125 /*
126  * Higher-order pages are called "compound pages".  They are structured thusly:
127  *
128  * The first PAGE_SIZE page is called the "head page".
129  *
130  * The remaining PAGE_SIZE pages are called "tail pages".
131  *
132  * All pages have PG_compound set.  All pages have their ->private pointing at
133  * the head page (even the head page has this).
134  *
135  * The first tail page's ->mapping, if non-zero, holds the address of the
136  * compound page's put_page() function.
137  *
138  * The order of the allocation is stored in the first tail page's ->index
139  * This is only for debug at present.  This usage means that zero-order pages
140  * may not be compound.
141  */
142 static void prep_compound_page(struct page *page, unsigned long order)
143 {
144         int i;
145         int nr_pages = 1 << order;
146
147         page[1].mapping = NULL;
148         page[1].index = order;
149         for (i = 0; i < nr_pages; i++) {
150                 struct page *p = page + i;
151
152                 SetPageCompound(p);
153                 p->private = (unsigned long)page;
154         }
155 }
156
157 static void destroy_compound_page(struct page *page, unsigned long order)
158 {
159         int i;
160         int nr_pages = 1 << order;
161
162         if (!PageCompound(page))
163                 return;
164
165         if (page[1].index != order)
166                 bad_page(__FUNCTION__, page);
167
168         for (i = 0; i < nr_pages; i++) {
169                 struct page *p = page + i;
170
171                 if (!PageCompound(p))
172                         bad_page(__FUNCTION__, page);
173                 if (p->private != (unsigned long)page)
174                         bad_page(__FUNCTION__, page);
175                 ClearPageCompound(p);
176         }
177 }
178 #endif          /* CONFIG_HUGETLB_PAGE */
179
180 /*
181  * function for dealing with page's order in buddy system.
182  * zone->lock is already acquired when we use these.
183  * So, we don't need atomic page->flags operations here.
184  */
185 static inline unsigned long page_order(struct page *page) {
186         return page->private;
187 }
188
189 static inline void set_page_order(struct page *page, int order) {
190         page->private = order;
191         __SetPagePrivate(page);
192 }
193
194 static inline void rmv_page_order(struct page *page)
195 {
196         __ClearPagePrivate(page);
197         page->private = 0;
198 }
199
200 /*
201  * Locate the struct page for both the matching buddy in our
202  * pair (buddy1) and the combined O(n+1) page they form (page).
203  *
204  * 1) Any buddy B1 will have an order O twin B2 which satisfies
205  * the following equation:
206  *     B2 = B1 ^ (1 << O)
207  * For example, if the starting buddy (buddy2) is #8 its order
208  * 1 buddy is #10:
209  *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
210  *
211  * 2) Any buddy B will have an order O+1 parent P which
212  * satisfies the following equation:
213  *     P = B & ~(1 << O)
214  *
215  * Assumption: *_mem_map is contigious at least up to MAX_ORDER
216  */
217 static inline struct page *
218 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
219 {
220         unsigned long buddy_idx = page_idx ^ (1 << order);
221
222         return page + (buddy_idx - page_idx);
223 }
224
225 static inline unsigned long
226 __find_combined_index(unsigned long page_idx, unsigned int order)
227 {
228         return (page_idx & ~(1 << order));
229 }
230
231 /*
232  * This function checks whether a page is free && is the buddy
233  * we can do coalesce a page and its buddy if
234  * (a) the buddy is free &&
235  * (b) the buddy is on the buddy system &&
236  * (c) a page and its buddy have the same order.
237  * for recording page's order, we use page->private and PG_private.
238  *
239  */
240 static inline int page_is_buddy(struct page *page, int order)
241 {
242        if (PagePrivate(page)           &&
243            (page_order(page) == order) &&
244            !PageReserved(page)         &&
245             page_count(page) == 0)
246                return 1;
247        return 0;
248 }
249
250 /*
251  * Freeing function for a buddy system allocator.
252  *
253  * The concept of a buddy system is to maintain direct-mapped table
254  * (containing bit values) for memory blocks of various "orders".
255  * The bottom level table contains the map for the smallest allocatable
256  * units of memory (here, pages), and each level above it describes
257  * pairs of units from the levels below, hence, "buddies".
258  * At a high level, all that happens here is marking the table entry
259  * at the bottom level available, and propagating the changes upward
260  * as necessary, plus some accounting needed to play nicely with other
261  * parts of the VM system.
262  * At each level, we keep a list of pages, which are heads of continuous
263  * free pages of length of (1 << order) and marked with PG_Private.Page's
264  * order is recorded in page->private field.
265  * So when we are allocating or freeing one, we can derive the state of the
266  * other.  That is, if we allocate a small block, and both were   
267  * free, the remainder of the region must be split into blocks.   
268  * If a block is freed, and its buddy is also free, then this
269  * triggers coalescing into a block of larger size.            
270  *
271  * -- wli
272  */
273
274 static inline void __free_pages_bulk (struct page *page,
275                 struct zone *zone, unsigned int order)
276 {
277         unsigned long page_idx;
278         int order_size = 1 << order;
279
280         if (unlikely(order))
281                 destroy_compound_page(page, order);
282
283         page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
284
285         BUG_ON(page_idx & (order_size - 1));
286         BUG_ON(bad_range(zone, page));
287
288         zone->free_pages += order_size;
289         while (order < MAX_ORDER-1) {
290                 unsigned long combined_idx;
291                 struct free_area *area;
292                 struct page *buddy;
293
294                 combined_idx = __find_combined_index(page_idx, order);
295                 buddy = __page_find_buddy(page, page_idx, order);
296
297                 if (bad_range(zone, buddy))
298                         break;
299                 if (!page_is_buddy(buddy, order))
300                         break;          /* Move the buddy up one level. */
301                 list_del(&buddy->lru);
302                 area = zone->free_area + order;
303                 area->nr_free--;
304                 rmv_page_order(buddy);
305                 page = page + (combined_idx - page_idx);
306                 page_idx = combined_idx;
307                 order++;
308         }
309         set_page_order(page, order);
310         list_add(&page->lru, &zone->free_area[order].free_list);
311         zone->free_area[order].nr_free++;
312 }
313
314 static inline void free_pages_check(const char *function, struct page *page)
315 {
316         if (    page_mapcount(page) ||
317                 page->mapping != NULL ||
318                 page_count(page) != 0 ||
319                 (page->flags & (
320                         1 << PG_lru     |
321                         1 << PG_private |
322                         1 << PG_locked  |
323                         1 << PG_active  |
324                         1 << PG_reclaim |
325                         1 << PG_slab    |
326                         1 << PG_swapcache |
327                         1 << PG_writeback )))
328                 bad_page(function, page);
329         if (PageDirty(page))
330                 ClearPageDirty(page);
331 }
332
333 /*
334  * Frees a list of pages. 
335  * Assumes all pages on list are in same zone, and of same order.
336  * count is the number of pages to free, or 0 for all on the list.
337  *
338  * If the zone was previously in an "all pages pinned" state then look to
339  * see if this freeing clears that state.
340  *
341  * And clear the zone's pages_scanned counter, to hold off the "all pages are
342  * pinned" detection logic.
343  */
344 static int
345 free_pages_bulk(struct zone *zone, int count,
346                 struct list_head *list, unsigned int order)
347 {
348         unsigned long flags;
349         struct page *page = NULL;
350         int ret = 0;
351
352         spin_lock_irqsave(&zone->lock, flags);
353         zone->all_unreclaimable = 0;
354         zone->pages_scanned = 0;
355         while (!list_empty(list) && count--) {
356                 page = list_entry(list->prev, struct page, lru);
357                 /* have to delete it as __free_pages_bulk list manipulates */
358                 list_del(&page->lru);
359                 __free_pages_bulk(page, zone, order);
360                 ret++;
361         }
362         spin_unlock_irqrestore(&zone->lock, flags);
363         return ret;
364 }
365
366 void __free_pages_ok(struct page *page, unsigned int order)
367 {
368         LIST_HEAD(list);
369         int i;
370
371         arch_free_page(page, order);
372
373         mod_page_state(pgfree, 1 << order);
374
375 #ifndef CONFIG_MMU
376         if (order > 0)
377                 for (i = 1 ; i < (1 << order) ; ++i)
378                         __put_page(page + i);
379 #endif
380
381         for (i = 0 ; i < (1 << order) ; ++i)
382                 free_pages_check(__FUNCTION__, page + i);
383         list_add(&page->lru, &list);
384         kernel_map_pages(page, 1<<order, 0);
385         free_pages_bulk(page_zone(page), 1, &list, order);
386 }
387
388
389 /*
390  * The order of subdivision here is critical for the IO subsystem.
391  * Please do not alter this order without good reasons and regression
392  * testing. Specifically, as large blocks of memory are subdivided,
393  * the order in which smaller blocks are delivered depends on the order
394  * they're subdivided in this function. This is the primary factor
395  * influencing the order in which pages are delivered to the IO
396  * subsystem according to empirical testing, and this is also justified
397  * by considering the behavior of a buddy system containing a single
398  * large block of memory acted on by a series of small allocations.
399  * This behavior is a critical factor in sglist merging's success.
400  *
401  * -- wli
402  */
403 static inline struct page *
404 expand(struct zone *zone, struct page *page,
405         int low, int high, struct free_area *area)
406 {
407         unsigned long size = 1 << high;
408
409         while (high > low) {
410                 area--;
411                 high--;
412                 size >>= 1;
413                 BUG_ON(bad_range(zone, &page[size]));
414                 list_add(&page[size].lru, &area->free_list);
415                 area->nr_free++;
416                 set_page_order(&page[size], high);
417         }
418         return page;
419 }
420
421 void set_page_refs(struct page *page, int order)
422 {
423 #ifdef CONFIG_MMU
424         set_page_count(page, 1);
425 #else
426         int i;
427
428         /*
429          * We need to reference all the pages for this order, otherwise if
430          * anyone accesses one of the pages with (get/put) it will be freed.
431          * - eg: access_process_vm()
432          */
433         for (i = 0; i < (1 << order); i++)
434                 set_page_count(page + i, 1);
435 #endif /* CONFIG_MMU */
436 }
437
438 /*
439  * This page is about to be returned from the page allocator
440  */
441 static void prep_new_page(struct page *page, int order)
442 {
443         if (page->mapping || page_mapcount(page) ||
444             (page->flags & (
445                         1 << PG_private |
446                         1 << PG_locked  |
447                         1 << PG_lru     |
448                         1 << PG_active  |
449                         1 << PG_dirty   |
450                         1 << PG_reclaim |
451                         1 << PG_swapcache |
452                         1 << PG_writeback )))
453                 bad_page(__FUNCTION__, page);
454
455         page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
456                         1 << PG_referenced | 1 << PG_arch_1 |
457                         1 << PG_checked | 1 << PG_mappedtodisk);
458         page->private = 0;
459         set_page_refs(page, order);
460         kernel_map_pages(page, 1 << order, 1);
461 }
462
463 /* 
464  * Do the hard work of removing an element from the buddy allocator.
465  * Call me with the zone->lock already held.
466  */
467 static struct page *__rmqueue(struct zone *zone, unsigned int order)
468 {
469         struct free_area * area;
470         unsigned int current_order;
471         struct page *page;
472
473         for (current_order = order; current_order < MAX_ORDER; ++current_order) {
474                 area = zone->free_area + current_order;
475                 if (list_empty(&area->free_list))
476                         continue;
477
478                 page = list_entry(area->free_list.next, struct page, lru);
479                 list_del(&page->lru);
480                 rmv_page_order(page);
481                 area->nr_free--;
482                 zone->free_pages -= 1UL << order;
483                 return expand(zone, page, order, current_order, area);
484         }
485
486         return NULL;
487 }
488
489 /* 
490  * Obtain a specified number of elements from the buddy allocator, all under
491  * a single hold of the lock, for efficiency.  Add them to the supplied list.
492  * Returns the number of new pages which were placed at *list.
493  */
494 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
495                         unsigned long count, struct list_head *list)
496 {
497         unsigned long flags;
498         int i;
499         int allocated = 0;
500         struct page *page;
501         
502         spin_lock_irqsave(&zone->lock, flags);
503         for (i = 0; i < count; ++i) {
504                 page = __rmqueue(zone, order);
505                 if (page == NULL)
506                         break;
507                 allocated++;
508                 list_add_tail(&page->lru, list);
509         }
510         spin_unlock_irqrestore(&zone->lock, flags);
511         return allocated;
512 }
513
514 #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
515 static void __drain_pages(unsigned int cpu)
516 {
517         struct zone *zone;
518         int i;
519
520         for_each_zone(zone) {
521                 struct per_cpu_pageset *pset;
522
523                 pset = &zone->pageset[cpu];
524                 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
525                         struct per_cpu_pages *pcp;
526
527                         pcp = &pset->pcp[i];
528                         pcp->count -= free_pages_bulk(zone, pcp->count,
529                                                 &pcp->list, 0);
530                 }
531         }
532 }
533 #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
534
535 #ifdef CONFIG_PM
536
537 void mark_free_pages(struct zone *zone)
538 {
539         unsigned long zone_pfn, flags;
540         int order;
541         struct list_head *curr;
542
543         if (!zone->spanned_pages)
544                 return;
545
546         spin_lock_irqsave(&zone->lock, flags);
547         for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
548                 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
549
550         for (order = MAX_ORDER - 1; order >= 0; --order)
551                 list_for_each(curr, &zone->free_area[order].free_list) {
552                         unsigned long start_pfn, i;
553
554                         start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
555
556                         for (i=0; i < (1<<order); i++)
557                                 SetPageNosaveFree(pfn_to_page(start_pfn+i));
558         }
559         spin_unlock_irqrestore(&zone->lock, flags);
560 }
561
562 /*
563  * Spill all of this CPU's per-cpu pages back into the buddy allocator.
564  */
565 void drain_local_pages(void)
566 {
567         unsigned long flags;
568
569         local_irq_save(flags);  
570         __drain_pages(smp_processor_id());
571         local_irq_restore(flags);       
572 }
573 #endif /* CONFIG_PM */
574
575 static void zone_statistics(struct zonelist *zonelist, struct zone *z)
576 {
577 #ifdef CONFIG_NUMA
578         unsigned long flags;
579         int cpu;
580         pg_data_t *pg = z->zone_pgdat;
581         pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
582         struct per_cpu_pageset *p;
583
584         local_irq_save(flags);
585         cpu = smp_processor_id();
586         p = &z->pageset[cpu];
587         if (pg == orig) {
588                 z->pageset[cpu].numa_hit++;
589         } else {
590                 p->numa_miss++;
591                 zonelist->zones[0]->pageset[cpu].numa_foreign++;
592         }
593         if (pg == NODE_DATA(numa_node_id()))
594                 p->local_node++;
595         else
596                 p->other_node++;
597         local_irq_restore(flags);
598 #endif
599 }
600
601 /*
602  * Free a 0-order page
603  */
604 static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
605 static void fastcall free_hot_cold_page(struct page *page, int cold)
606 {
607         struct zone *zone = page_zone(page);
608         struct per_cpu_pages *pcp;
609         unsigned long flags;
610
611         arch_free_page(page, 0);
612
613         kernel_map_pages(page, 1, 0);
614         inc_page_state(pgfree);
615         if (PageAnon(page))
616                 page->mapping = NULL;
617         free_pages_check(__FUNCTION__, page);
618         pcp = &zone->pageset[get_cpu()].pcp[cold];
619         local_irq_save(flags);
620         if (pcp->count >= pcp->high)
621                 pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
622         list_add(&page->lru, &pcp->list);
623         pcp->count++;
624         local_irq_restore(flags);
625         put_cpu();
626 }
627
628 void fastcall free_hot_page(struct page *page)
629 {
630         free_hot_cold_page(page, 0);
631 }
632         
633 void fastcall free_cold_page(struct page *page)
634 {
635         free_hot_cold_page(page, 1);
636 }
637
638 static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags)
639 {
640         int i;
641
642         BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
643         for(i = 0; i < (1 << order); i++)
644                 clear_highpage(page + i);
645 }
646
647 /*
648  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
649  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
650  * or two.
651  */
652 static struct page *
653 buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags)
654 {
655         unsigned long flags;
656         struct page *page = NULL;
657         int cold = !!(gfp_flags & __GFP_COLD);
658
659         if (order == 0) {
660                 struct per_cpu_pages *pcp;
661
662                 pcp = &zone->pageset[get_cpu()].pcp[cold];
663                 local_irq_save(flags);
664                 if (pcp->count <= pcp->low)
665                         pcp->count += rmqueue_bulk(zone, 0,
666                                                 pcp->batch, &pcp->list);
667                 if (pcp->count) {
668                         page = list_entry(pcp->list.next, struct page, lru);
669                         list_del(&page->lru);
670                         pcp->count--;
671                 }
672                 local_irq_restore(flags);
673                 put_cpu();
674         }
675
676         if (page == NULL) {
677                 spin_lock_irqsave(&zone->lock, flags);
678                 page = __rmqueue(zone, order);
679                 spin_unlock_irqrestore(&zone->lock, flags);
680         }
681
682         if (page != NULL) {
683                 BUG_ON(bad_range(zone, page));
684                 mod_page_state_zone(zone, pgalloc, 1 << order);
685                 prep_new_page(page, order);
686
687                 if (gfp_flags & __GFP_ZERO)
688                         prep_zero_page(page, order, gfp_flags);
689
690                 if (order && (gfp_flags & __GFP_COMP))
691                         prep_compound_page(page, order);
692         }
693         return page;
694 }
695
696 /*
697  * Return 1 if free pages are above 'mark'. This takes into account the order
698  * of the allocation.
699  */
700 int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
701                       int classzone_idx, int can_try_harder, int gfp_high)
702 {
703         /* free_pages my go negative - that's OK */
704         long min = mark, free_pages = z->free_pages - (1 << order) + 1;
705         int o;
706
707         if (gfp_high)
708                 min -= min / 2;
709         if (can_try_harder)
710                 min -= min / 4;
711
712         if (free_pages <= min + z->lowmem_reserve[classzone_idx])
713                 return 0;
714         for (o = 0; o < order; o++) {
715                 /* At the next order, this order's pages become unavailable */
716                 free_pages -= z->free_area[o].nr_free << o;
717
718                 /* Require fewer higher order pages to be free */
719                 min >>= 1;
720
721                 if (free_pages <= min)
722                         return 0;
723         }
724         return 1;
725 }
726
727 static inline int
728 should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
729 {
730         if (!z->reclaim_pages)
731                 return 0;
732         return 1;
733 }
734
735 /*
736  * This is the 'heart' of the zoned buddy allocator.
737  */
738 struct page * fastcall
739 __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
740                 struct zonelist *zonelist)
741 {
742         const int wait = gfp_mask & __GFP_WAIT;
743         struct zone **zones, *z;
744         struct page *page;
745         struct reclaim_state reclaim_state;
746         struct task_struct *p = current;
747         int i;
748         int classzone_idx;
749         int do_retry;
750         int can_try_harder;
751         int did_some_progress;
752
753         might_sleep_if(wait);
754
755         /*
756          * The caller may dip into page reserves a bit more if the caller
757          * cannot run direct reclaim, or is the caller has realtime scheduling
758          * policy
759          */
760         can_try_harder = (unlikely(rt_task(p)) && !in_interrupt()) || !wait;
761
762         zones = zonelist->zones;  /* the list of zones suitable for gfp_mask */
763
764         if (unlikely(zones[0] == NULL)) {
765                 /* Should this ever happen?? */
766                 return NULL;
767         }
768
769         classzone_idx = zone_idx(zones[0]);
770
771 restart:
772         /* Go through the zonelist once, looking for a zone with enough free */
773         for (i = 0; (z = zones[i]) != NULL; i++) {
774                 int do_reclaim = should_reclaim_zone(z, gfp_mask);
775
776                 if (!cpuset_zone_allowed(z))
777                         continue;
778
779                 /*
780                  * If the zone is to attempt early page reclaim then this loop
781                  * will try to reclaim pages and check the watermark a second
782                  * time before giving up and falling back to the next zone.
783                  */
784 zone_reclaim_retry:
785                 if (!zone_watermark_ok(z, order, z->pages_low,
786                                        classzone_idx, 0, 0)) {
787                         if (!do_reclaim)
788                                 continue;
789                         else {
790                                 zone_reclaim(z, gfp_mask, order);
791                                 /* Only try reclaim once */
792                                 do_reclaim = 0;
793                                 goto zone_reclaim_retry;
794                         }
795                 }
796
797                 page = buffered_rmqueue(z, order, gfp_mask);
798                 if (page)
799                         goto got_pg;
800         }
801
802         for (i = 0; (z = zones[i]) != NULL; i++)
803                 wakeup_kswapd(z, order);
804
805         /*
806          * Go through the zonelist again. Let __GFP_HIGH and allocations
807          * coming from realtime tasks to go deeper into reserves
808          *
809          * This is the last chance, in general, before the goto nopage.
810          * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
811          */
812         for (i = 0; (z = zones[i]) != NULL; i++) {
813                 if (!zone_watermark_ok(z, order, z->pages_min,
814                                        classzone_idx, can_try_harder,
815                                        gfp_mask & __GFP_HIGH))
816                         continue;
817
818                 if (wait && !cpuset_zone_allowed(z))
819                         continue;
820
821                 page = buffered_rmqueue(z, order, gfp_mask);
822                 if (page)
823                         goto got_pg;
824         }
825
826         /* This allocation should allow future memory freeing. */
827
828         if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
829                         && !in_interrupt()) {
830                 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
831                         /* go through the zonelist yet again, ignoring mins */
832                         for (i = 0; (z = zones[i]) != NULL; i++) {
833                                 if (!cpuset_zone_allowed(z))
834                                         continue;
835                                 page = buffered_rmqueue(z, order, gfp_mask);
836                                 if (page)
837                                         goto got_pg;
838                         }
839                 }
840                 goto nopage;
841         }
842
843         /* Atomic allocations - we can't balance anything */
844         if (!wait)
845                 goto nopage;
846
847 rebalance:
848         cond_resched();
849
850         /* We now go into synchronous reclaim */
851         p->flags |= PF_MEMALLOC;
852         reclaim_state.reclaimed_slab = 0;
853         p->reclaim_state = &reclaim_state;
854
855         did_some_progress = try_to_free_pages(zones, gfp_mask, order);
856
857         p->reclaim_state = NULL;
858         p->flags &= ~PF_MEMALLOC;
859
860         cond_resched();
861
862         if (likely(did_some_progress)) {
863                 /*
864                  * Go through the zonelist yet one more time, keep
865                  * very high watermark here, this is only to catch
866                  * a parallel oom killing, we must fail if we're still
867                  * under heavy pressure.
868                  */
869                 for (i = 0; (z = zones[i]) != NULL; i++) {
870                         if (!zone_watermark_ok(z, order, z->pages_min,
871                                                classzone_idx, can_try_harder,
872                                                gfp_mask & __GFP_HIGH))
873                                 continue;
874
875                         if (!cpuset_zone_allowed(z))
876                                 continue;
877
878                         page = buffered_rmqueue(z, order, gfp_mask);
879                         if (page)
880                                 goto got_pg;
881                 }
882         } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
883                 /*
884                  * Go through the zonelist yet one more time, keep
885                  * very high watermark here, this is only to catch
886                  * a parallel oom killing, we must fail if we're still
887                  * under heavy pressure.
888                  */
889                 for (i = 0; (z = zones[i]) != NULL; i++) {
890                         if (!zone_watermark_ok(z, order, z->pages_high,
891                                                classzone_idx, 0, 0))
892                                 continue;
893
894                         if (!cpuset_zone_allowed(z))
895                                 continue;
896
897                         page = buffered_rmqueue(z, order, gfp_mask);
898                         if (page)
899                                 goto got_pg;
900                 }
901
902                 out_of_memory(gfp_mask);
903                 goto restart;
904         }
905
906         /*
907          * Don't let big-order allocations loop unless the caller explicitly
908          * requests that.  Wait for some write requests to complete then retry.
909          *
910          * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
911          * <= 3, but that may not be true in other implementations.
912          */
913         do_retry = 0;
914         if (!(gfp_mask & __GFP_NORETRY)) {
915                 if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
916                         do_retry = 1;
917                 if (gfp_mask & __GFP_NOFAIL)
918                         do_retry = 1;
919         }
920         if (do_retry) {
921                 blk_congestion_wait(WRITE, HZ/50);
922                 goto rebalance;
923         }
924
925 nopage:
926         if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
927                 printk(KERN_WARNING "%s: page allocation failure."
928                         " order:%d, mode:0x%x\n",
929                         p->comm, order, gfp_mask);
930                 dump_stack();
931         }
932         return NULL;
933 got_pg:
934         zone_statistics(zonelist, z);
935         return page;
936 }
937
938 EXPORT_SYMBOL(__alloc_pages);
939
940 /*
941  * Common helper functions.
942  */
943 fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)
944 {
945         struct page * page;
946         page = alloc_pages(gfp_mask, order);
947         if (!page)
948                 return 0;
949         return (unsigned long) page_address(page);
950 }
951
952 EXPORT_SYMBOL(__get_free_pages);
953
954 fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask)
955 {
956         struct page * page;
957
958         /*
959          * get_zeroed_page() returns a 32-bit address, which cannot represent
960          * a highmem page
961          */
962         BUG_ON(gfp_mask & __GFP_HIGHMEM);
963
964         page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
965         if (page)
966                 return (unsigned long) page_address(page);
967         return 0;
968 }
969
970 EXPORT_SYMBOL(get_zeroed_page);
971
972 void __pagevec_free(struct pagevec *pvec)
973 {
974         int i = pagevec_count(pvec);
975
976         while (--i >= 0)
977                 free_hot_cold_page(pvec->pages[i], pvec->cold);
978 }
979
980 fastcall void __free_pages(struct page *page, unsigned int order)
981 {
982         if (!PageReserved(page) && put_page_testzero(page)) {
983                 if (order == 0)
984                         free_hot_page(page);
985                 else
986                         __free_pages_ok(page, order);
987         }
988 }
989
990 EXPORT_SYMBOL(__free_pages);
991
992 fastcall void free_pages(unsigned long addr, unsigned int order)
993 {
994         if (addr != 0) {
995                 BUG_ON(!virt_addr_valid((void *)addr));
996                 __free_pages(virt_to_page((void *)addr), order);
997         }
998 }
999
1000 EXPORT_SYMBOL(free_pages);
1001
1002 /*
1003  * Total amount of free (allocatable) RAM:
1004  */
1005 unsigned int nr_free_pages(void)
1006 {
1007         unsigned int sum = 0;
1008         struct zone *zone;
1009
1010         for_each_zone(zone)
1011                 sum += zone->free_pages;
1012
1013         return sum;
1014 }
1015
1016 EXPORT_SYMBOL(nr_free_pages);
1017
1018 #ifdef CONFIG_NUMA
1019 unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
1020 {
1021         unsigned int i, sum = 0;
1022
1023         for (i = 0; i < MAX_NR_ZONES; i++)
1024                 sum += pgdat->node_zones[i].free_pages;
1025
1026         return sum;
1027 }
1028 #endif
1029
1030 static unsigned int nr_free_zone_pages(int offset)
1031 {
1032         pg_data_t *pgdat;
1033         unsigned int sum = 0;
1034
1035         for_each_pgdat(pgdat) {
1036                 struct zonelist *zonelist = pgdat->node_zonelists + offset;
1037                 struct zone **zonep = zonelist->zones;
1038                 struct zone *zone;
1039
1040                 for (zone = *zonep++; zone; zone = *zonep++) {
1041                         unsigned long size = zone->present_pages;
1042                         unsigned long high = zone->pages_high;
1043                         if (size > high)
1044                                 sum += size - high;
1045                 }
1046         }
1047
1048         return sum;
1049 }
1050
1051 /*
1052  * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1053  */
1054 unsigned int nr_free_buffer_pages(void)
1055 {
1056         return nr_free_zone_pages(GFP_USER & GFP_ZONEMASK);
1057 }
1058
1059 /*
1060  * Amount of free RAM allocatable within all zones
1061  */
1062 unsigned int nr_free_pagecache_pages(void)
1063 {
1064         return nr_free_zone_pages(GFP_HIGHUSER & GFP_ZONEMASK);
1065 }
1066
1067 #ifdef CONFIG_HIGHMEM
1068 unsigned int nr_free_highpages (void)
1069 {
1070         pg_data_t *pgdat;
1071         unsigned int pages = 0;
1072
1073         for_each_pgdat(pgdat)
1074                 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1075
1076         return pages;
1077 }
1078 #endif
1079
1080 #ifdef CONFIG_NUMA
1081 static void show_node(struct zone *zone)
1082 {
1083         printk("Node %d ", zone->zone_pgdat->node_id);
1084 }
1085 #else
1086 #define show_node(zone) do { } while (0)
1087 #endif
1088
1089 /*
1090  * Accumulate the page_state information across all CPUs.
1091  * The result is unavoidably approximate - it can change
1092  * during and after execution of this function.
1093  */
1094 static DEFINE_PER_CPU(struct page_state, page_states) = {0};
1095
1096 atomic_t nr_pagecache = ATOMIC_INIT(0);
1097 EXPORT_SYMBOL(nr_pagecache);
1098 #ifdef CONFIG_SMP
1099 DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
1100 #endif
1101
1102 void __get_page_state(struct page_state *ret, int nr)
1103 {
1104         int cpu = 0;
1105
1106         memset(ret, 0, sizeof(*ret));
1107
1108         cpu = first_cpu(cpu_online_map);
1109         while (cpu < NR_CPUS) {
1110                 unsigned long *in, *out, off;
1111
1112                 in = (unsigned long *)&per_cpu(page_states, cpu);
1113
1114                 cpu = next_cpu(cpu, cpu_online_map);
1115
1116                 if (cpu < NR_CPUS)
1117                         prefetch(&per_cpu(page_states, cpu));
1118
1119                 out = (unsigned long *)ret;
1120                 for (off = 0; off < nr; off++)
1121                         *out++ += *in++;
1122         }
1123 }
1124
1125 void get_page_state(struct page_state *ret)
1126 {
1127         int nr;
1128
1129         nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
1130         nr /= sizeof(unsigned long);
1131
1132         __get_page_state(ret, nr + 1);
1133 }
1134
1135 void get_full_page_state(struct page_state *ret)
1136 {
1137         __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long));
1138 }
1139
1140 unsigned long __read_page_state(unsigned offset)
1141 {
1142         unsigned long ret = 0;
1143         int cpu;
1144
1145         for_each_online_cpu(cpu) {
1146                 unsigned long in;
1147
1148                 in = (unsigned long)&per_cpu(page_states, cpu) + offset;
1149                 ret += *((unsigned long *)in);
1150         }
1151         return ret;
1152 }
1153
1154 void __mod_page_state(unsigned offset, unsigned long delta)
1155 {
1156         unsigned long flags;
1157         void* ptr;
1158
1159         local_irq_save(flags);
1160         ptr = &__get_cpu_var(page_states);
1161         *(unsigned long*)(ptr + offset) += delta;
1162         local_irq_restore(flags);
1163 }
1164
1165 EXPORT_SYMBOL(__mod_page_state);
1166
1167 void __get_zone_counts(unsigned long *active, unsigned long *inactive,
1168                         unsigned long *free, struct pglist_data *pgdat)
1169 {
1170         struct zone *zones = pgdat->node_zones;
1171         int i;
1172
1173         *active = 0;
1174         *inactive = 0;
1175         *free = 0;
1176         for (i = 0; i < MAX_NR_ZONES; i++) {
1177                 *active += zones[i].nr_active;
1178                 *inactive += zones[i].nr_inactive;
1179                 *free += zones[i].free_pages;
1180         }
1181 }
1182
1183 void get_zone_counts(unsigned long *active,
1184                 unsigned long *inactive, unsigned long *free)
1185 {
1186         struct pglist_data *pgdat;
1187
1188         *active = 0;
1189         *inactive = 0;
1190         *free = 0;
1191         for_each_pgdat(pgdat) {
1192                 unsigned long l, m, n;
1193                 __get_zone_counts(&l, &m, &n, pgdat);
1194                 *active += l;
1195                 *inactive += m;
1196                 *free += n;
1197         }
1198 }
1199
1200 void si_meminfo(struct sysinfo *val)
1201 {
1202         val->totalram = totalram_pages;
1203         val->sharedram = 0;
1204         val->freeram = nr_free_pages();
1205         val->bufferram = nr_blockdev_pages();
1206 #ifdef CONFIG_HIGHMEM
1207         val->totalhigh = totalhigh_pages;
1208         val->freehigh = nr_free_highpages();
1209 #else
1210         val->totalhigh = 0;
1211         val->freehigh = 0;
1212 #endif
1213         val->mem_unit = PAGE_SIZE;
1214 }
1215
1216 EXPORT_SYMBOL(si_meminfo);
1217
1218 #ifdef CONFIG_NUMA
1219 void si_meminfo_node(struct sysinfo *val, int nid)
1220 {
1221         pg_data_t *pgdat = NODE_DATA(nid);
1222
1223         val->totalram = pgdat->node_present_pages;
1224         val->freeram = nr_free_pages_pgdat(pgdat);
1225         val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1226         val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
1227         val->mem_unit = PAGE_SIZE;
1228 }
1229 #endif
1230
1231 #define K(x) ((x) << (PAGE_SHIFT-10))
1232
1233 /*
1234  * Show free area list (used inside shift_scroll-lock stuff)
1235  * We also calculate the percentage fragmentation. We do this by counting the
1236  * memory on each free list with the exception of the first item on the list.
1237  */
1238 void show_free_areas(void)
1239 {
1240         struct page_state ps;
1241         int cpu, temperature;
1242         unsigned long active;
1243         unsigned long inactive;
1244         unsigned long free;
1245         struct zone *zone;
1246
1247         for_each_zone(zone) {
1248                 show_node(zone);
1249                 printk("%s per-cpu:", zone->name);
1250
1251                 if (!zone->present_pages) {
1252                         printk(" empty\n");
1253                         continue;
1254                 } else
1255                         printk("\n");
1256
1257                 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
1258                         struct per_cpu_pageset *pageset;
1259
1260                         if (!cpu_possible(cpu))
1261                                 continue;
1262
1263                         pageset = zone->pageset + cpu;
1264
1265                         for (temperature = 0; temperature < 2; temperature++)
1266                                 printk("cpu %d %s: low %d, high %d, batch %d\n",
1267                                         cpu,
1268                                         temperature ? "cold" : "hot",
1269                                         pageset->pcp[temperature].low,
1270                                         pageset->pcp[temperature].high,
1271                                         pageset->pcp[temperature].batch);
1272                 }
1273         }
1274
1275         get_page_state(&ps);
1276         get_zone_counts(&active, &inactive, &free);
1277
1278         printk("\nFree pages: %11ukB (%ukB HighMem)\n",
1279                 K(nr_free_pages()),
1280                 K(nr_free_highpages()));
1281
1282         printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
1283                 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
1284                 active,
1285                 inactive,
1286                 ps.nr_dirty,
1287                 ps.nr_writeback,
1288                 ps.nr_unstable,
1289                 nr_free_pages(),
1290                 ps.nr_slab,
1291                 ps.nr_mapped,
1292                 ps.nr_page_table_pages);
1293
1294         for_each_zone(zone) {
1295                 int i;
1296
1297                 show_node(zone);
1298                 printk("%s"
1299                         " free:%lukB"
1300                         " min:%lukB"
1301                         " low:%lukB"
1302                         " high:%lukB"
1303                         " active:%lukB"
1304                         " inactive:%lukB"
1305                         " present:%lukB"
1306                         " pages_scanned:%lu"
1307                         " all_unreclaimable? %s"
1308                         "\n",
1309                         zone->name,
1310                         K(zone->free_pages),
1311                         K(zone->pages_min),
1312                         K(zone->pages_low),
1313                         K(zone->pages_high),
1314                         K(zone->nr_active),
1315                         K(zone->nr_inactive),
1316                         K(zone->present_pages),
1317                         zone->pages_scanned,
1318                         (zone->all_unreclaimable ? "yes" : "no")
1319                         );
1320                 printk("lowmem_reserve[]:");
1321                 for (i = 0; i < MAX_NR_ZONES; i++)
1322                         printk(" %lu", zone->lowmem_reserve[i]);
1323                 printk("\n");
1324         }
1325
1326         for_each_zone(zone) {
1327                 unsigned long nr, flags, order, total = 0;
1328
1329                 show_node(zone);
1330                 printk("%s: ", zone->name);
1331                 if (!zone->present_pages) {
1332                         printk("empty\n");
1333                         continue;
1334                 }
1335
1336                 spin_lock_irqsave(&zone->lock, flags);
1337                 for (order = 0; order < MAX_ORDER; order++) {
1338                         nr = zone->free_area[order].nr_free;
1339                         total += nr << order;
1340                         printk("%lu*%lukB ", nr, K(1UL) << order);
1341                 }
1342                 spin_unlock_irqrestore(&zone->lock, flags);
1343                 printk("= %lukB\n", K(total));
1344         }
1345
1346         show_swap_cache_info();
1347 }
1348
1349 /*
1350  * Builds allocation fallback zone lists.
1351  */
1352 static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
1353 {
1354         switch (k) {
1355                 struct zone *zone;
1356         default:
1357                 BUG();
1358         case ZONE_HIGHMEM:
1359                 zone = pgdat->node_zones + ZONE_HIGHMEM;
1360                 if (zone->present_pages) {
1361 #ifndef CONFIG_HIGHMEM
1362                         BUG();
1363 #endif
1364                         zonelist->zones[j++] = zone;
1365                 }
1366         case ZONE_NORMAL:
1367                 zone = pgdat->node_zones + ZONE_NORMAL;
1368                 if (zone->present_pages)
1369                         zonelist->zones[j++] = zone;
1370         case ZONE_DMA:
1371                 zone = pgdat->node_zones + ZONE_DMA;
1372                 if (zone->present_pages)
1373                         zonelist->zones[j++] = zone;
1374         }
1375
1376         return j;
1377 }
1378
1379 #ifdef CONFIG_NUMA
1380 #define MAX_NODE_LOAD (num_online_nodes())
1381 static int __initdata node_load[MAX_NUMNODES];
1382 /**
1383  * find_next_best_node - find the next node that should appear in a given node's fallback list
1384  * @node: node whose fallback list we're appending
1385  * @used_node_mask: nodemask_t of already used nodes
1386  *
1387  * We use a number of factors to determine which is the next node that should
1388  * appear on a given node's fallback list.  The node should not have appeared
1389  * already in @node's fallback list, and it should be the next closest node
1390  * according to the distance array (which contains arbitrary distance values
1391  * from each node to each node in the system), and should also prefer nodes
1392  * with no CPUs, since presumably they'll have very little allocation pressure
1393  * on them otherwise.
1394  * It returns -1 if no node is found.
1395  */
1396 static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
1397 {
1398         int i, n, val;
1399         int min_val = INT_MAX;
1400         int best_node = -1;
1401
1402         for_each_online_node(i) {
1403                 cpumask_t tmp;
1404
1405                 /* Start from local node */
1406                 n = (node+i) % num_online_nodes();
1407
1408                 /* Don't want a node to appear more than once */
1409                 if (node_isset(n, *used_node_mask))
1410                         continue;
1411
1412                 /* Use the local node if we haven't already */
1413                 if (!node_isset(node, *used_node_mask)) {
1414                         best_node = node;
1415                         break;
1416                 }
1417
1418                 /* Use the distance array to find the distance */
1419                 val = node_distance(node, n);
1420
1421                 /* Give preference to headless and unused nodes */
1422                 tmp = node_to_cpumask(n);
1423                 if (!cpus_empty(tmp))
1424                         val += PENALTY_FOR_NODE_WITH_CPUS;
1425
1426                 /* Slight preference for less loaded node */
1427                 val *= (MAX_NODE_LOAD*MAX_NUMNODES);
1428                 val += node_load[n];
1429
1430                 if (val < min_val) {
1431                         min_val = val;
1432                         best_node = n;
1433                 }
1434         }
1435
1436         if (best_node >= 0)
1437                 node_set(best_node, *used_node_mask);
1438
1439         return best_node;
1440 }
1441
1442 static void __init build_zonelists(pg_data_t *pgdat)
1443 {
1444         int i, j, k, node, local_node;
1445         int prev_node, load;
1446         struct zonelist *zonelist;
1447         nodemask_t used_mask;
1448
1449         /* initialize zonelists */
1450         for (i = 0; i < GFP_ZONETYPES; i++) {
1451                 zonelist = pgdat->node_zonelists + i;
1452                 zonelist->zones[0] = NULL;
1453         }
1454
1455         /* NUMA-aware ordering of nodes */
1456         local_node = pgdat->node_id;
1457         load = num_online_nodes();
1458         prev_node = local_node;
1459         nodes_clear(used_mask);
1460         while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
1461                 /*
1462                  * We don't want to pressure a particular node.
1463                  * So adding penalty to the first node in same
1464                  * distance group to make it round-robin.
1465                  */
1466                 if (node_distance(local_node, node) !=
1467                                 node_distance(local_node, prev_node))
1468                         node_load[node] += load;
1469                 prev_node = node;
1470                 load--;
1471                 for (i = 0; i < GFP_ZONETYPES; i++) {
1472                         zonelist = pgdat->node_zonelists + i;
1473                         for (j = 0; zonelist->zones[j] != NULL; j++);
1474
1475                         k = ZONE_NORMAL;
1476                         if (i & __GFP_HIGHMEM)
1477                                 k = ZONE_HIGHMEM;
1478                         if (i & __GFP_DMA)
1479                                 k = ZONE_DMA;
1480
1481                         j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1482                         zonelist->zones[j] = NULL;
1483                 }
1484         }
1485 }
1486
1487 #else   /* CONFIG_NUMA */
1488
1489 static void __init build_zonelists(pg_data_t *pgdat)
1490 {
1491         int i, j, k, node, local_node;
1492
1493         local_node = pgdat->node_id;
1494         for (i = 0; i < GFP_ZONETYPES; i++) {
1495                 struct zonelist *zonelist;
1496
1497                 zonelist = pgdat->node_zonelists + i;
1498
1499                 j = 0;
1500                 k = ZONE_NORMAL;
1501                 if (i & __GFP_HIGHMEM)
1502                         k = ZONE_HIGHMEM;
1503                 if (i & __GFP_DMA)
1504                         k = ZONE_DMA;
1505
1506                 j = build_zonelists_node(pgdat, zonelist, j, k);
1507                 /*
1508                  * Now we build the zonelist so that it contains the zones
1509                  * of all the other nodes.
1510                  * We don't want to pressure a particular node, so when
1511                  * building the zones for node N, we make sure that the
1512                  * zones coming right after the local ones are those from
1513                  * node N+1 (modulo N)
1514                  */
1515                 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
1516                         if (!node_online(node))
1517                                 continue;
1518                         j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1519                 }
1520                 for (node = 0; node < local_node; node++) {
1521                         if (!node_online(node))
1522                                 continue;
1523                         j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
1524                 }
1525
1526                 zonelist->zones[j] = NULL;
1527         }
1528 }
1529
1530 #endif  /* CONFIG_NUMA */
1531
1532 void __init build_all_zonelists(void)
1533 {
1534         int i;
1535
1536         for_each_online_node(i)
1537                 build_zonelists(NODE_DATA(i));
1538         printk("Built %i zonelists\n", num_online_nodes());
1539         cpuset_init_current_mems_allowed();
1540 }
1541
1542 /*
1543  * Helper functions to size the waitqueue hash table.
1544  * Essentially these want to choose hash table sizes sufficiently
1545  * large so that collisions trying to wait on pages are rare.
1546  * But in fact, the number of active page waitqueues on typical
1547  * systems is ridiculously low, less than 200. So this is even
1548  * conservative, even though it seems large.
1549  *
1550  * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
1551  * waitqueues, i.e. the size of the waitq table given the number of pages.
1552  */
1553 #define PAGES_PER_WAITQUEUE     256
1554
1555 static inline unsigned long wait_table_size(unsigned long pages)
1556 {
1557         unsigned long size = 1;
1558
1559         pages /= PAGES_PER_WAITQUEUE;
1560
1561         while (size < pages)
1562                 size <<= 1;
1563
1564         /*
1565          * Once we have dozens or even hundreds of threads sleeping
1566          * on IO we've got bigger problems than wait queue collision.
1567          * Limit the size of the wait table to a reasonable size.
1568          */
1569         size = min(size, 4096UL);
1570
1571         return max(size, 4UL);
1572 }
1573
1574 /*
1575  * This is an integer logarithm so that shifts can be used later
1576  * to extract the more random high bits from the multiplicative
1577  * hash function before the remainder is taken.
1578  */
1579 static inline unsigned long wait_table_bits(unsigned long size)
1580 {
1581         return ffz(~size);
1582 }
1583
1584 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
1585
1586 static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
1587                 unsigned long *zones_size, unsigned long *zholes_size)
1588 {
1589         unsigned long realtotalpages, totalpages = 0;
1590         int i;
1591
1592         for (i = 0; i < MAX_NR_ZONES; i++)
1593                 totalpages += zones_size[i];
1594         pgdat->node_spanned_pages = totalpages;
1595
1596         realtotalpages = totalpages;
1597         if (zholes_size)
1598                 for (i = 0; i < MAX_NR_ZONES; i++)
1599                         realtotalpages -= zholes_size[i];
1600         pgdat->node_present_pages = realtotalpages;
1601         printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
1602 }
1603
1604
1605 /*
1606  * Initially all pages are reserved - free ones are freed
1607  * up by free_all_bootmem() once the early boot process is
1608  * done. Non-atomic initialization, single-pass.
1609  */
1610 void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1611                 unsigned long start_pfn)
1612 {
1613         struct page *start = pfn_to_page(start_pfn);
1614         struct page *page;
1615
1616         for (page = start; page < (start + size); page++) {
1617                 set_page_zone(page, NODEZONE(nid, zone));
1618                 set_page_count(page, 0);
1619                 reset_page_mapcount(page);
1620                 SetPageReserved(page);
1621                 INIT_LIST_HEAD(&page->lru);
1622 #ifdef WANT_PAGE_VIRTUAL
1623                 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1624                 if (!is_highmem_idx(zone))
1625                         set_page_address(page, __va(start_pfn << PAGE_SHIFT));
1626 #endif
1627                 start_pfn++;
1628         }
1629 }
1630
1631 void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1632                                 unsigned long size)
1633 {
1634         int order;
1635         for (order = 0; order < MAX_ORDER ; order++) {
1636                 INIT_LIST_HEAD(&zone->free_area[order].free_list);
1637                 zone->free_area[order].nr_free = 0;
1638         }
1639 }
1640
1641 #ifndef __HAVE_ARCH_MEMMAP_INIT
1642 #define memmap_init(size, nid, zone, start_pfn) \
1643         memmap_init_zone((size), (nid), (zone), (start_pfn))
1644 #endif
1645
1646 /*
1647  * Set up the zone data structures:
1648  *   - mark all pages reserved
1649  *   - mark all memory queues empty
1650  *   - clear the memory bitmaps
1651  */
1652 static void __init free_area_init_core(struct pglist_data *pgdat,
1653                 unsigned long *zones_size, unsigned long *zholes_size)
1654 {
1655         unsigned long i, j;
1656         const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
1657         int cpu, nid = pgdat->node_id;
1658         unsigned long zone_start_pfn = pgdat->node_start_pfn;
1659
1660         pgdat->nr_zones = 0;
1661         init_waitqueue_head(&pgdat->kswapd_wait);
1662         pgdat->kswapd_max_order = 0;
1663         
1664         for (j = 0; j < MAX_NR_ZONES; j++) {
1665                 struct zone *zone = pgdat->node_zones + j;
1666                 unsigned long size, realsize;
1667                 unsigned long batch;
1668
1669                 zone_table[NODEZONE(nid, j)] = zone;
1670                 realsize = size = zones_size[j];
1671                 if (zholes_size)
1672                         realsize -= zholes_size[j];
1673
1674                 if (j == ZONE_DMA || j == ZONE_NORMAL)
1675                         nr_kernel_pages += realsize;
1676                 nr_all_pages += realsize;
1677
1678                 zone->spanned_pages = size;
1679                 zone->present_pages = realsize;
1680                 zone->name = zone_names[j];
1681                 spin_lock_init(&zone->lock);
1682                 spin_lock_init(&zone->lru_lock);
1683                 zone->zone_pgdat = pgdat;
1684                 zone->free_pages = 0;
1685
1686                 zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
1687
1688                 /*
1689                  * The per-cpu-pages pools are set to around 1000th of the
1690                  * size of the zone.  But no more than 1/4 of a meg - there's
1691                  * no point in going beyond the size of L2 cache.
1692                  *
1693                  * OK, so we don't know how big the cache is.  So guess.
1694                  */
1695                 batch = zone->present_pages / 1024;
1696                 if (batch * PAGE_SIZE > 256 * 1024)
1697                         batch = (256 * 1024) / PAGE_SIZE;
1698                 batch /= 4;             /* We effectively *= 4 below */
1699                 if (batch < 1)
1700                         batch = 1;
1701
1702                 /*
1703                  * Clamp the batch to a 2^n - 1 value. Having a power
1704                  * of 2 value was found to be more likely to have
1705                  * suboptimal cache aliasing properties in some cases.
1706                  *
1707                  * For example if 2 tasks are alternately allocating
1708                  * batches of pages, one task can end up with a lot
1709                  * of pages of one half of the possible page colors
1710                  * and the other with pages of the other colors.
1711                  */
1712                 batch = (1 << fls(batch + batch/2)) - 1;
1713
1714                 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1715                         struct per_cpu_pages *pcp;
1716
1717                         pcp = &zone->pageset[cpu].pcp[0];       /* hot */
1718                         pcp->count = 0;
1719                         pcp->low = 2 * batch;
1720                         pcp->high = 6 * batch;
1721                         pcp->batch = 1 * batch;
1722                         INIT_LIST_HEAD(&pcp->list);
1723
1724                         pcp = &zone->pageset[cpu].pcp[1];       /* cold */
1725                         pcp->count = 0;
1726                         pcp->low = 0;
1727                         pcp->high = 2 * batch;
1728                         pcp->batch = 1 * batch;
1729                         INIT_LIST_HEAD(&pcp->list);
1730                 }
1731                 printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
1732                                 zone_names[j], realsize, batch);
1733                 INIT_LIST_HEAD(&zone->active_list);
1734                 INIT_LIST_HEAD(&zone->inactive_list);
1735                 zone->nr_scan_active = 0;
1736                 zone->nr_scan_inactive = 0;
1737                 zone->nr_active = 0;
1738                 zone->nr_inactive = 0;
1739                 if (!size)
1740                         continue;
1741
1742                 /*
1743                  * The per-page waitqueue mechanism uses hashed waitqueues
1744                  * per zone.
1745                  */
1746                 zone->wait_table_size = wait_table_size(size);
1747                 zone->wait_table_bits =
1748                         wait_table_bits(zone->wait_table_size);
1749                 zone->wait_table = (wait_queue_head_t *)
1750                         alloc_bootmem_node(pgdat, zone->wait_table_size
1751                                                 * sizeof(wait_queue_head_t));
1752
1753                 for(i = 0; i < zone->wait_table_size; ++i)
1754                         init_waitqueue_head(zone->wait_table + i);
1755
1756                 pgdat->nr_zones = j+1;
1757
1758                 zone->zone_mem_map = pfn_to_page(zone_start_pfn);
1759                 zone->zone_start_pfn = zone_start_pfn;
1760
1761                 if ((zone_start_pfn) & (zone_required_alignment-1))
1762                         printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n");
1763
1764                 memmap_init(size, nid, j, zone_start_pfn);
1765
1766                 zone_start_pfn += size;
1767
1768                 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
1769         }
1770 }
1771
1772 static void __init alloc_node_mem_map(struct pglist_data *pgdat)
1773 {
1774         unsigned long size;
1775
1776         /* Skip empty nodes */
1777         if (!pgdat->node_spanned_pages)
1778                 return;
1779
1780         /* ia64 gets its own node_mem_map, before this, without bootmem */
1781         if (!pgdat->node_mem_map) {
1782                 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
1783                 pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
1784         }
1785 #ifndef CONFIG_DISCONTIGMEM
1786         /*
1787          * With no DISCONTIG, the global mem_map is just set as node 0's
1788          */
1789         if (pgdat == NODE_DATA(0))
1790                 mem_map = NODE_DATA(0)->node_mem_map;
1791 #endif
1792 }
1793
1794 void __init free_area_init_node(int nid, struct pglist_data *pgdat,
1795                 unsigned long *zones_size, unsigned long node_start_pfn,
1796                 unsigned long *zholes_size)
1797 {
1798         pgdat->node_id = nid;
1799         pgdat->node_start_pfn = node_start_pfn;
1800         calculate_zone_totalpages(pgdat, zones_size, zholes_size);
1801
1802         alloc_node_mem_map(pgdat);
1803
1804         free_area_init_core(pgdat, zones_size, zholes_size);
1805 }
1806
1807 #ifndef CONFIG_DISCONTIGMEM
1808 static bootmem_data_t contig_bootmem_data;
1809 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
1810
1811 EXPORT_SYMBOL(contig_page_data);
1812
1813 void __init free_area_init(unsigned long *zones_size)
1814 {
1815         free_area_init_node(0, &contig_page_data, zones_size,
1816                         __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
1817 }
1818 #endif
1819
1820 #ifdef CONFIG_PROC_FS
1821
1822 #include <linux/seq_file.h>
1823
1824 static void *frag_start(struct seq_file *m, loff_t *pos)
1825 {
1826         pg_data_t *pgdat;
1827         loff_t node = *pos;
1828
1829         for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
1830                 --node;
1831
1832         return pgdat;
1833 }
1834
1835 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1836 {
1837         pg_data_t *pgdat = (pg_data_t *)arg;
1838
1839         (*pos)++;
1840         return pgdat->pgdat_next;
1841 }
1842
1843 static void frag_stop(struct seq_file *m, void *arg)
1844 {
1845 }
1846
1847 /* 
1848  * This walks the free areas for each zone.
1849  */
1850 static int frag_show(struct seq_file *m, void *arg)
1851 {
1852         pg_data_t *pgdat = (pg_data_t *)arg;
1853         struct zone *zone;
1854         struct zone *node_zones = pgdat->node_zones;
1855         unsigned long flags;
1856         int order;
1857
1858         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1859                 if (!zone->present_pages)
1860                         continue;
1861
1862                 spin_lock_irqsave(&zone->lock, flags);
1863                 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1864                 for (order = 0; order < MAX_ORDER; ++order)
1865                         seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1866                 spin_unlock_irqrestore(&zone->lock, flags);
1867                 seq_putc(m, '\n');
1868         }
1869         return 0;
1870 }
1871
1872 struct seq_operations fragmentation_op = {
1873         .start  = frag_start,
1874         .next   = frag_next,
1875         .stop   = frag_stop,
1876         .show   = frag_show,
1877 };
1878
1879 /*
1880  * Output information about zones in @pgdat.
1881  */
1882 static int zoneinfo_show(struct seq_file *m, void *arg)
1883 {
1884         pg_data_t *pgdat = arg;
1885         struct zone *zone;
1886         struct zone *node_zones = pgdat->node_zones;
1887         unsigned long flags;
1888
1889         for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
1890                 int i;
1891
1892                 if (!zone->present_pages)
1893                         continue;
1894
1895                 spin_lock_irqsave(&zone->lock, flags);
1896                 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1897                 seq_printf(m,
1898                            "\n  pages free     %lu"
1899                            "\n        min      %lu"
1900                            "\n        low      %lu"
1901                            "\n        high     %lu"
1902                            "\n        active   %lu"
1903                            "\n        inactive %lu"
1904                            "\n        scanned  %lu (a: %lu i: %lu)"
1905                            "\n        spanned  %lu"
1906                            "\n        present  %lu",
1907                            zone->free_pages,
1908                            zone->pages_min,
1909                            zone->pages_low,
1910                            zone->pages_high,
1911                            zone->nr_active,
1912                            zone->nr_inactive,
1913                            zone->pages_scanned,
1914                            zone->nr_scan_active, zone->nr_scan_inactive,
1915                            zone->spanned_pages,
1916                            zone->present_pages);
1917                 seq_printf(m,
1918                            "\n        protection: (%lu",
1919                            zone->lowmem_reserve[0]);
1920                 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1921                         seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
1922                 seq_printf(m,
1923                            ")"
1924                            "\n  pagesets");
1925                 for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
1926                         struct per_cpu_pageset *pageset;
1927                         int j;
1928
1929                         pageset = &zone->pageset[i];
1930                         for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
1931                                 if (pageset->pcp[j].count)
1932                                         break;
1933                         }
1934                         if (j == ARRAY_SIZE(pageset->pcp))
1935                                 continue;
1936                         for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
1937                                 seq_printf(m,
1938                                            "\n    cpu: %i pcp: %i"
1939                                            "\n              count: %i"
1940                                            "\n              low:   %i"
1941                                            "\n              high:  %i"
1942                                            "\n              batch: %i",
1943                                            i, j,
1944                                            pageset->pcp[j].count,
1945                                            pageset->pcp[j].low,
1946                                            pageset->pcp[j].high,
1947                                            pageset->pcp[j].batch);
1948                         }
1949 #ifdef CONFIG_NUMA
1950                         seq_printf(m,
1951                                    "\n            numa_hit:       %lu"
1952                                    "\n            numa_miss:      %lu"
1953                                    "\n            numa_foreign:   %lu"
1954                                    "\n            interleave_hit: %lu"
1955                                    "\n            local_node:     %lu"
1956                                    "\n            other_node:     %lu",
1957                                    pageset->numa_hit,
1958                                    pageset->numa_miss,
1959                                    pageset->numa_foreign,
1960                                    pageset->interleave_hit,
1961                                    pageset->local_node,
1962                                    pageset->other_node);
1963 #endif
1964                 }
1965                 seq_printf(m,
1966                            "\n  all_unreclaimable: %u"
1967                            "\n  prev_priority:     %i"
1968                            "\n  temp_priority:     %i"
1969                            "\n  start_pfn:         %lu",
1970                            zone->all_unreclaimable,
1971                            zone->prev_priority,
1972                            zone->temp_priority,
1973                            zone->zone_start_pfn);
1974                 spin_unlock_irqrestore(&zone->lock, flags);
1975                 seq_putc(m, '\n');
1976         }
1977         return 0;
1978 }
1979
1980 struct seq_operations zoneinfo_op = {
1981         .start  = frag_start, /* iterate over all zones. The same as in
1982                                * fragmentation. */
1983         .next   = frag_next,
1984         .stop   = frag_stop,
1985         .show   = zoneinfo_show,
1986 };
1987
1988 static char *vmstat_text[] = {
1989         "nr_dirty",
1990         "nr_writeback",
1991         "nr_unstable",
1992         "nr_page_table_pages",
1993         "nr_mapped",
1994         "nr_slab",
1995
1996         "pgpgin",
1997         "pgpgout",
1998         "pswpin",
1999         "pswpout",
2000         "pgalloc_high",
2001
2002         "pgalloc_normal",
2003         "pgalloc_dma",
2004         "pgfree",
2005         "pgactivate",
2006         "pgdeactivate",
2007
2008         "pgfault",
2009         "pgmajfault",
2010         "pgrefill_high",
2011         "pgrefill_normal",
2012         "pgrefill_dma",
2013
2014         "pgsteal_high",
2015         "pgsteal_normal",
2016         "pgsteal_dma",
2017         "pgscan_kswapd_high",
2018         "pgscan_kswapd_normal",
2019
2020         "pgscan_kswapd_dma",
2021         "pgscan_direct_high",
2022         "pgscan_direct_normal",
2023         "pgscan_direct_dma",
2024         "pginodesteal",
2025
2026         "slabs_scanned",
2027         "kswapd_steal",
2028         "kswapd_inodesteal",
2029         "pageoutrun",
2030         "allocstall",
2031
2032         "pgrotated",
2033         "nr_bounce",
2034 };
2035
2036 static void *vmstat_start(struct seq_file *m, loff_t *pos)
2037 {
2038         struct page_state *ps;
2039
2040         if (*pos >= ARRAY_SIZE(vmstat_text))
2041                 return NULL;
2042
2043         ps = kmalloc(sizeof(*ps), GFP_KERNEL);
2044         m->private = ps;
2045         if (!ps)
2046                 return ERR_PTR(-ENOMEM);
2047         get_full_page_state(ps);
2048         ps->pgpgin /= 2;                /* sectors -> kbytes */
2049         ps->pgpgout /= 2;
2050         return (unsigned long *)ps + *pos;
2051 }
2052
2053 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
2054 {
2055         (*pos)++;
2056         if (*pos >= ARRAY_SIZE(vmstat_text))
2057                 return NULL;
2058         return (unsigned long *)m->private + *pos;
2059 }
2060
2061 static int vmstat_show(struct seq_file *m, void *arg)
2062 {
2063         unsigned long *l = arg;
2064         unsigned long off = l - (unsigned long *)m->private;
2065
2066         seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
2067         return 0;
2068 }
2069
2070 static void vmstat_stop(struct seq_file *m, void *arg)
2071 {
2072         kfree(m->private);
2073         m->private = NULL;
2074 }
2075
2076 struct seq_operations vmstat_op = {
2077         .start  = vmstat_start,
2078         .next   = vmstat_next,
2079         .stop   = vmstat_stop,
2080         .show   = vmstat_show,
2081 };
2082
2083 #endif /* CONFIG_PROC_FS */
2084
2085 #ifdef CONFIG_HOTPLUG_CPU
2086 static int page_alloc_cpu_notify(struct notifier_block *self,
2087                                  unsigned long action, void *hcpu)
2088 {
2089         int cpu = (unsigned long)hcpu;
2090         long *count;
2091         unsigned long *src, *dest;
2092
2093         if (action == CPU_DEAD) {
2094                 int i;
2095
2096                 /* Drain local pagecache count. */
2097                 count = &per_cpu(nr_pagecache_local, cpu);
2098                 atomic_add(*count, &nr_pagecache);
2099                 *count = 0;
2100                 local_irq_disable();
2101                 __drain_pages(cpu);
2102
2103                 /* Add dead cpu's page_states to our own. */
2104                 dest = (unsigned long *)&__get_cpu_var(page_states);
2105                 src = (unsigned long *)&per_cpu(page_states, cpu);
2106
2107                 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
2108                                 i++) {
2109                         dest[i] += src[i];
2110                         src[i] = 0;
2111                 }
2112
2113                 local_irq_enable();
2114         }
2115         return NOTIFY_OK;
2116 }
2117 #endif /* CONFIG_HOTPLUG_CPU */
2118
2119 void __init page_alloc_init(void)
2120 {
2121         hotcpu_notifier(page_alloc_cpu_notify, 0);
2122 }
2123
2124 /*
2125  * setup_per_zone_lowmem_reserve - called whenever
2126  *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
2127  *      has a correct pages reserved value, so an adequate number of
2128  *      pages are left in the zone after a successful __alloc_pages().
2129  */
2130 static void setup_per_zone_lowmem_reserve(void)
2131 {
2132         struct pglist_data *pgdat;
2133         int j, idx;
2134
2135         for_each_pgdat(pgdat) {
2136                 for (j = 0; j < MAX_NR_ZONES; j++) {
2137                         struct zone *zone = pgdat->node_zones + j;
2138                         unsigned long present_pages = zone->present_pages;
2139
2140                         zone->lowmem_reserve[j] = 0;
2141
2142                         for (idx = j-1; idx >= 0; idx--) {
2143                                 struct zone *lower_zone;
2144
2145                                 if (sysctl_lowmem_reserve_ratio[idx] < 1)
2146                                         sysctl_lowmem_reserve_ratio[idx] = 1;
2147
2148                                 lower_zone = pgdat->node_zones + idx;
2149                                 lower_zone->lowmem_reserve[j] = present_pages /
2150                                         sysctl_lowmem_reserve_ratio[idx];
2151                                 present_pages += lower_zone->present_pages;
2152                         }
2153                 }
2154         }
2155 }
2156
2157 /*
2158  * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures 
2159  *      that the pages_{min,low,high} values for each zone are set correctly 
2160  *      with respect to min_free_kbytes.
2161  */
2162 static void setup_per_zone_pages_min(void)
2163 {
2164         unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
2165         unsigned long lowmem_pages = 0;
2166         struct zone *zone;
2167         unsigned long flags;
2168
2169         /* Calculate total number of !ZONE_HIGHMEM pages */
2170         for_each_zone(zone) {
2171                 if (!is_highmem(zone))
2172                         lowmem_pages += zone->present_pages;
2173         }
2174
2175         for_each_zone(zone) {
2176                 spin_lock_irqsave(&zone->lru_lock, flags);
2177                 if (is_highmem(zone)) {
2178                         /*
2179                          * Often, highmem doesn't need to reserve any pages.
2180                          * But the pages_min/low/high values are also used for
2181                          * batching up page reclaim activity so we need a
2182                          * decent value here.
2183                          */
2184                         int min_pages;
2185
2186                         min_pages = zone->present_pages / 1024;
2187                         if (min_pages < SWAP_CLUSTER_MAX)
2188                                 min_pages = SWAP_CLUSTER_MAX;
2189                         if (min_pages > 128)
2190                                 min_pages = 128;
2191                         zone->pages_min = min_pages;
2192                 } else {
2193                         /* if it's a lowmem zone, reserve a number of pages
2194                          * proportionate to the zone's size.
2195                          */
2196                         zone->pages_min = (pages_min * zone->present_pages) /
2197                                            lowmem_pages;
2198                 }
2199
2200                 /*
2201                  * When interpreting these watermarks, just keep in mind that:
2202                  * zone->pages_min == (zone->pages_min * 4) / 4;
2203                  */
2204                 zone->pages_low   = (zone->pages_min * 5) / 4;
2205                 zone->pages_high  = (zone->pages_min * 6) / 4;
2206                 spin_unlock_irqrestore(&zone->lru_lock, flags);
2207         }
2208 }
2209
2210 /*
2211  * Initialise min_free_kbytes.
2212  *
2213  * For small machines we want it small (128k min).  For large machines
2214  * we want it large (64MB max).  But it is not linear, because network
2215  * bandwidth does not increase linearly with machine size.  We use
2216  *
2217  *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
2218  *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
2219  *
2220  * which yields
2221  *
2222  * 16MB:        512k
2223  * 32MB:        724k
2224  * 64MB:        1024k
2225  * 128MB:       1448k
2226  * 256MB:       2048k
2227  * 512MB:       2896k
2228  * 1024MB:      4096k
2229  * 2048MB:      5792k
2230  * 4096MB:      8192k
2231  * 8192MB:      11584k
2232  * 16384MB:     16384k
2233  */
2234 static int __init init_per_zone_pages_min(void)
2235 {
2236         unsigned long lowmem_kbytes;
2237
2238         lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
2239
2240         min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
2241         if (min_free_kbytes < 128)
2242                 min_free_kbytes = 128;
2243         if (min_free_kbytes > 65536)
2244                 min_free_kbytes = 65536;
2245         setup_per_zone_pages_min();
2246         setup_per_zone_lowmem_reserve();
2247         return 0;
2248 }
2249 module_init(init_per_zone_pages_min)
2250
2251 /*
2252  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
2253  *      that we can call two helper functions whenever min_free_kbytes
2254  *      changes.
2255  */
2256 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
2257         struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2258 {
2259         proc_dointvec(table, write, file, buffer, length, ppos);
2260         setup_per_zone_pages_min();
2261         return 0;
2262 }
2263
2264 /*
2265  * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
2266  *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
2267  *      whenever sysctl_lowmem_reserve_ratio changes.
2268  *
2269  * The reserve ratio obviously has absolutely no relation with the
2270  * pages_min watermarks. The lowmem reserve ratio can only make sense
2271  * if in function of the boot time zone sizes.
2272  */
2273 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
2274         struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
2275 {
2276         proc_dointvec_minmax(table, write, file, buffer, length, ppos);
2277         setup_per_zone_lowmem_reserve();
2278         return 0;
2279 }
2280
2281 __initdata int hashdist = HASHDIST_DEFAULT;
2282
2283 #ifdef CONFIG_NUMA
2284 static int __init set_hashdist(char *str)
2285 {
2286         if (!str)
2287                 return 0;
2288         hashdist = simple_strtoul(str, &str, 0);
2289         return 1;
2290 }
2291 __setup("hashdist=", set_hashdist);
2292 #endif
2293
2294 /*
2295  * allocate a large system hash table from bootmem
2296  * - it is assumed that the hash table must contain an exact power-of-2
2297  *   quantity of entries
2298  * - limit is the number of hash buckets, not the total allocation size
2299  */
2300 void *__init alloc_large_system_hash(const char *tablename,
2301                                      unsigned long bucketsize,
2302                                      unsigned long numentries,
2303                                      int scale,
2304                                      int flags,
2305                                      unsigned int *_hash_shift,
2306                                      unsigned int *_hash_mask,
2307                                      unsigned long limit)
2308 {
2309         unsigned long long max = limit;
2310         unsigned long log2qty, size;
2311         void *table = NULL;
2312
2313         /* allow the kernel cmdline to have a say */
2314         if (!numentries) {
2315                 /* round applicable memory size up to nearest megabyte */
2316                 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
2317                 numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
2318                 numentries >>= 20 - PAGE_SHIFT;
2319                 numentries <<= 20 - PAGE_SHIFT;
2320
2321                 /* limit to 1 bucket per 2^scale bytes of low memory */
2322                 if (scale > PAGE_SHIFT)
2323                         numentries >>= (scale - PAGE_SHIFT);
2324                 else
2325                         numentries <<= (PAGE_SHIFT - scale);
2326         }
2327         /* rounded up to nearest power of 2 in size */
2328         numentries = 1UL << (long_log2(numentries) + 1);
2329
2330         /* limit allocation size to 1/16 total memory by default */
2331         if (max == 0) {
2332                 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
2333                 do_div(max, bucketsize);
2334         }
2335
2336         if (numentries > max)
2337                 numentries = max;
2338
2339         log2qty = long_log2(numentries);
2340
2341         do {
2342                 size = bucketsize << log2qty;
2343                 if (flags & HASH_EARLY)
2344                         table = alloc_bootmem(size);
2345                 else if (hashdist)
2346                         table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
2347                 else {
2348                         unsigned long order;
2349                         for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
2350                                 ;
2351                         table = (void*) __get_free_pages(GFP_ATOMIC, order);
2352                 }
2353         } while (!table && size > PAGE_SIZE && --log2qty);
2354
2355         if (!table)
2356                 panic("Failed to allocate %s hash table\n", tablename);
2357
2358         printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
2359                tablename,
2360                (1U << log2qty),
2361                long_log2(size) - PAGE_SHIFT,
2362                size);
2363
2364         if (_hash_shift)
2365                 *_hash_shift = log2qty;
2366         if (_hash_mask)
2367                 *_hash_mask = (1 << log2qty) - 1;
2368
2369         return table;
2370 }