bffcf774f60b6dd261a541318720ceff9379a4b4
[linux-2.6.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/seq_file.h>
11 #include <linux/sysctl.h>
12 #include <linux/highmem.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/nodemask.h>
15 #include <linux/pagemap.h>
16 #include <linux/mempolicy.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21
22 #include <asm/page.h>
23 #include <asm/pgtable.h>
24 #include <asm/io.h>
25
26 #include <linux/hugetlb.h>
27 #include "internal.h"
28
29 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
30 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31 unsigned long hugepages_treat_as_movable;
32
33 static int max_hstate;
34 unsigned int default_hstate_idx;
35 struct hstate hstates[HUGE_MAX_HSTATE];
36
37 __initdata LIST_HEAD(huge_boot_pages);
38
39 /* for command line parsing */
40 static struct hstate * __initdata parsed_hstate;
41 static unsigned long __initdata default_hstate_max_huge_pages;
42 static unsigned long __initdata default_hstate_size;
43
44 #define for_each_hstate(h) \
45         for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
46
47 /*
48  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
49  */
50 static DEFINE_SPINLOCK(hugetlb_lock);
51
52 /*
53  * Region tracking -- allows tracking of reservations and instantiated pages
54  *                    across the pages in a mapping.
55  *
56  * The region data structures are protected by a combination of the mmap_sem
57  * and the hugetlb_instantion_mutex.  To access or modify a region the caller
58  * must either hold the mmap_sem for write, or the mmap_sem for read and
59  * the hugetlb_instantiation mutex:
60  *
61  *      down_write(&mm->mmap_sem);
62  * or
63  *      down_read(&mm->mmap_sem);
64  *      mutex_lock(&hugetlb_instantiation_mutex);
65  */
66 struct file_region {
67         struct list_head link;
68         long from;
69         long to;
70 };
71
72 static long region_add(struct list_head *head, long f, long t)
73 {
74         struct file_region *rg, *nrg, *trg;
75
76         /* Locate the region we are either in or before. */
77         list_for_each_entry(rg, head, link)
78                 if (f <= rg->to)
79                         break;
80
81         /* Round our left edge to the current segment if it encloses us. */
82         if (f > rg->from)
83                 f = rg->from;
84
85         /* Check for and consume any regions we now overlap with. */
86         nrg = rg;
87         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
88                 if (&rg->link == head)
89                         break;
90                 if (rg->from > t)
91                         break;
92
93                 /* If this area reaches higher then extend our area to
94                  * include it completely.  If this is not the first area
95                  * which we intend to reuse, free it. */
96                 if (rg->to > t)
97                         t = rg->to;
98                 if (rg != nrg) {
99                         list_del(&rg->link);
100                         kfree(rg);
101                 }
102         }
103         nrg->from = f;
104         nrg->to = t;
105         return 0;
106 }
107
108 static long region_chg(struct list_head *head, long f, long t)
109 {
110         struct file_region *rg, *nrg;
111         long chg = 0;
112
113         /* Locate the region we are before or in. */
114         list_for_each_entry(rg, head, link)
115                 if (f <= rg->to)
116                         break;
117
118         /* If we are below the current region then a new region is required.
119          * Subtle, allocate a new region at the position but make it zero
120          * size such that we can guarantee to record the reservation. */
121         if (&rg->link == head || t < rg->from) {
122                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
123                 if (!nrg)
124                         return -ENOMEM;
125                 nrg->from = f;
126                 nrg->to   = f;
127                 INIT_LIST_HEAD(&nrg->link);
128                 list_add(&nrg->link, rg->link.prev);
129
130                 return t - f;
131         }
132
133         /* Round our left edge to the current segment if it encloses us. */
134         if (f > rg->from)
135                 f = rg->from;
136         chg = t - f;
137
138         /* Check for and consume any regions we now overlap with. */
139         list_for_each_entry(rg, rg->link.prev, link) {
140                 if (&rg->link == head)
141                         break;
142                 if (rg->from > t)
143                         return chg;
144
145                 /* We overlap with this area, if it extends futher than
146                  * us then we must extend ourselves.  Account for its
147                  * existing reservation. */
148                 if (rg->to > t) {
149                         chg += rg->to - t;
150                         t = rg->to;
151                 }
152                 chg -= rg->to - rg->from;
153         }
154         return chg;
155 }
156
157 static long region_truncate(struct list_head *head, long end)
158 {
159         struct file_region *rg, *trg;
160         long chg = 0;
161
162         /* Locate the region we are either in or before. */
163         list_for_each_entry(rg, head, link)
164                 if (end <= rg->to)
165                         break;
166         if (&rg->link == head)
167                 return 0;
168
169         /* If we are in the middle of a region then adjust it. */
170         if (end > rg->from) {
171                 chg = rg->to - end;
172                 rg->to = end;
173                 rg = list_entry(rg->link.next, typeof(*rg), link);
174         }
175
176         /* Drop any remaining regions. */
177         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
178                 if (&rg->link == head)
179                         break;
180                 chg += rg->to - rg->from;
181                 list_del(&rg->link);
182                 kfree(rg);
183         }
184         return chg;
185 }
186
187 static long region_count(struct list_head *head, long f, long t)
188 {
189         struct file_region *rg;
190         long chg = 0;
191
192         /* Locate each segment we overlap with, and count that overlap. */
193         list_for_each_entry(rg, head, link) {
194                 int seg_from;
195                 int seg_to;
196
197                 if (rg->to <= f)
198                         continue;
199                 if (rg->from >= t)
200                         break;
201
202                 seg_from = max(rg->from, f);
203                 seg_to = min(rg->to, t);
204
205                 chg += seg_to - seg_from;
206         }
207
208         return chg;
209 }
210
211 /*
212  * Convert the address within this vma to the page offset within
213  * the mapping, in pagecache page units; huge pages here.
214  */
215 static pgoff_t vma_hugecache_offset(struct hstate *h,
216                         struct vm_area_struct *vma, unsigned long address)
217 {
218         return ((address - vma->vm_start) >> huge_page_shift(h)) +
219                         (vma->vm_pgoff >> huge_page_order(h));
220 }
221
222 /*
223  * Return the size of the pages allocated when backing a VMA. In the majority
224  * cases this will be same size as used by the page table entries.
225  */
226 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
227 {
228         struct hstate *hstate;
229
230         if (!is_vm_hugetlb_page(vma))
231                 return PAGE_SIZE;
232
233         hstate = hstate_vma(vma);
234
235         return 1UL << (hstate->order + PAGE_SHIFT);
236 }
237 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
238
239 /*
240  * Return the page size being used by the MMU to back a VMA. In the majority
241  * of cases, the page size used by the kernel matches the MMU size. On
242  * architectures where it differs, an architecture-specific version of this
243  * function is required.
244  */
245 #ifndef vma_mmu_pagesize
246 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
247 {
248         return vma_kernel_pagesize(vma);
249 }
250 #endif
251
252 /*
253  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
254  * bits of the reservation map pointer, which are always clear due to
255  * alignment.
256  */
257 #define HPAGE_RESV_OWNER    (1UL << 0)
258 #define HPAGE_RESV_UNMAPPED (1UL << 1)
259 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
260
261 /*
262  * These helpers are used to track how many pages are reserved for
263  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
264  * is guaranteed to have their future faults succeed.
265  *
266  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
267  * the reserve counters are updated with the hugetlb_lock held. It is safe
268  * to reset the VMA at fork() time as it is not in use yet and there is no
269  * chance of the global counters getting corrupted as a result of the values.
270  *
271  * The private mapping reservation is represented in a subtly different
272  * manner to a shared mapping.  A shared mapping has a region map associated
273  * with the underlying file, this region map represents the backing file
274  * pages which have ever had a reservation assigned which this persists even
275  * after the page is instantiated.  A private mapping has a region map
276  * associated with the original mmap which is attached to all VMAs which
277  * reference it, this region map represents those offsets which have consumed
278  * reservation ie. where pages have been instantiated.
279  */
280 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
281 {
282         return (unsigned long)vma->vm_private_data;
283 }
284
285 static void set_vma_private_data(struct vm_area_struct *vma,
286                                                         unsigned long value)
287 {
288         vma->vm_private_data = (void *)value;
289 }
290
291 struct resv_map {
292         struct kref refs;
293         struct list_head regions;
294 };
295
296 static struct resv_map *resv_map_alloc(void)
297 {
298         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
299         if (!resv_map)
300                 return NULL;
301
302         kref_init(&resv_map->refs);
303         INIT_LIST_HEAD(&resv_map->regions);
304
305         return resv_map;
306 }
307
308 static void resv_map_release(struct kref *ref)
309 {
310         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
311
312         /* Clear out any active regions before we release the map. */
313         region_truncate(&resv_map->regions, 0);
314         kfree(resv_map);
315 }
316
317 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
318 {
319         VM_BUG_ON(!is_vm_hugetlb_page(vma));
320         if (!(vma->vm_flags & VM_MAYSHARE))
321                 return (struct resv_map *)(get_vma_private_data(vma) &
322                                                         ~HPAGE_RESV_MASK);
323         return NULL;
324 }
325
326 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
327 {
328         VM_BUG_ON(!is_vm_hugetlb_page(vma));
329         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
330
331         set_vma_private_data(vma, (get_vma_private_data(vma) &
332                                 HPAGE_RESV_MASK) | (unsigned long)map);
333 }
334
335 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
336 {
337         VM_BUG_ON(!is_vm_hugetlb_page(vma));
338         VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
339
340         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
341 }
342
343 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
344 {
345         VM_BUG_ON(!is_vm_hugetlb_page(vma));
346
347         return (get_vma_private_data(vma) & flag) != 0;
348 }
349
350 /* Decrement the reserved pages in the hugepage pool by one */
351 static void decrement_hugepage_resv_vma(struct hstate *h,
352                         struct vm_area_struct *vma)
353 {
354         if (vma->vm_flags & VM_NORESERVE)
355                 return;
356
357         if (vma->vm_flags & VM_MAYSHARE) {
358                 /* Shared mappings always use reserves */
359                 h->resv_huge_pages--;
360         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
361                 /*
362                  * Only the process that called mmap() has reserves for
363                  * private mappings.
364                  */
365                 h->resv_huge_pages--;
366         }
367 }
368
369 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
370 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
371 {
372         VM_BUG_ON(!is_vm_hugetlb_page(vma));
373         if (!(vma->vm_flags & VM_MAYSHARE))
374                 vma->vm_private_data = (void *)0;
375 }
376
377 /* Returns true if the VMA has associated reserve pages */
378 static int vma_has_reserves(struct vm_area_struct *vma)
379 {
380         if (vma->vm_flags & VM_MAYSHARE)
381                 return 1;
382         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
383                 return 1;
384         return 0;
385 }
386
387 static void clear_gigantic_page(struct page *page,
388                         unsigned long addr, unsigned long sz)
389 {
390         int i;
391         struct page *p = page;
392
393         might_sleep();
394         for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
395                 cond_resched();
396                 clear_user_highpage(p, addr + i * PAGE_SIZE);
397         }
398 }
399 static void clear_huge_page(struct page *page,
400                         unsigned long addr, unsigned long sz)
401 {
402         int i;
403
404         if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
405                 clear_gigantic_page(page, addr, sz);
406                 return;
407         }
408
409         might_sleep();
410         for (i = 0; i < sz/PAGE_SIZE; i++) {
411                 cond_resched();
412                 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
413         }
414 }
415
416 static void copy_gigantic_page(struct page *dst, struct page *src,
417                            unsigned long addr, struct vm_area_struct *vma)
418 {
419         int i;
420         struct hstate *h = hstate_vma(vma);
421         struct page *dst_base = dst;
422         struct page *src_base = src;
423         might_sleep();
424         for (i = 0; i < pages_per_huge_page(h); ) {
425                 cond_resched();
426                 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
427
428                 i++;
429                 dst = mem_map_next(dst, dst_base, i);
430                 src = mem_map_next(src, src_base, i);
431         }
432 }
433 static void copy_huge_page(struct page *dst, struct page *src,
434                            unsigned long addr, struct vm_area_struct *vma)
435 {
436         int i;
437         struct hstate *h = hstate_vma(vma);
438
439         if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
440                 copy_gigantic_page(dst, src, addr, vma);
441                 return;
442         }
443
444         might_sleep();
445         for (i = 0; i < pages_per_huge_page(h); i++) {
446                 cond_resched();
447                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
448         }
449 }
450
451 static void enqueue_huge_page(struct hstate *h, struct page *page)
452 {
453         int nid = page_to_nid(page);
454         list_add(&page->lru, &h->hugepage_freelists[nid]);
455         h->free_huge_pages++;
456         h->free_huge_pages_node[nid]++;
457 }
458
459 static struct page *dequeue_huge_page_vma(struct hstate *h,
460                                 struct vm_area_struct *vma,
461                                 unsigned long address, int avoid_reserve)
462 {
463         int nid;
464         struct page *page = NULL;
465         struct mempolicy *mpol;
466         nodemask_t *nodemask;
467         struct zonelist *zonelist = huge_zonelist(vma, address,
468                                         htlb_alloc_mask, &mpol, &nodemask);
469         struct zone *zone;
470         struct zoneref *z;
471
472         /*
473          * A child process with MAP_PRIVATE mappings created by their parent
474          * have no page reserves. This check ensures that reservations are
475          * not "stolen". The child may still get SIGKILLed
476          */
477         if (!vma_has_reserves(vma) &&
478                         h->free_huge_pages - h->resv_huge_pages == 0)
479                 return NULL;
480
481         /* If reserves cannot be used, ensure enough pages are in the pool */
482         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
483                 return NULL;
484
485         for_each_zone_zonelist_nodemask(zone, z, zonelist,
486                                                 MAX_NR_ZONES - 1, nodemask) {
487                 nid = zone_to_nid(zone);
488                 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
489                     !list_empty(&h->hugepage_freelists[nid])) {
490                         page = list_entry(h->hugepage_freelists[nid].next,
491                                           struct page, lru);
492                         list_del(&page->lru);
493                         h->free_huge_pages--;
494                         h->free_huge_pages_node[nid]--;
495
496                         if (!avoid_reserve)
497                                 decrement_hugepage_resv_vma(h, vma);
498
499                         break;
500                 }
501         }
502         mpol_cond_put(mpol);
503         return page;
504 }
505
506 static void update_and_free_page(struct hstate *h, struct page *page)
507 {
508         int i;
509
510         VM_BUG_ON(h->order >= MAX_ORDER);
511
512         h->nr_huge_pages--;
513         h->nr_huge_pages_node[page_to_nid(page)]--;
514         for (i = 0; i < pages_per_huge_page(h); i++) {
515                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
516                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
517                                 1 << PG_private | 1<< PG_writeback);
518         }
519         set_compound_page_dtor(page, NULL);
520         set_page_refcounted(page);
521         arch_release_hugepage(page);
522         __free_pages(page, huge_page_order(h));
523 }
524
525 struct hstate *size_to_hstate(unsigned long size)
526 {
527         struct hstate *h;
528
529         for_each_hstate(h) {
530                 if (huge_page_size(h) == size)
531                         return h;
532         }
533         return NULL;
534 }
535
536 static void free_huge_page(struct page *page)
537 {
538         /*
539          * Can't pass hstate in here because it is called from the
540          * compound page destructor.
541          */
542         struct hstate *h = page_hstate(page);
543         int nid = page_to_nid(page);
544         struct address_space *mapping;
545
546         mapping = (struct address_space *) page_private(page);
547         set_page_private(page, 0);
548         BUG_ON(page_count(page));
549         INIT_LIST_HEAD(&page->lru);
550
551         spin_lock(&hugetlb_lock);
552         if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
553                 update_and_free_page(h, page);
554                 h->surplus_huge_pages--;
555                 h->surplus_huge_pages_node[nid]--;
556         } else {
557                 enqueue_huge_page(h, page);
558         }
559         spin_unlock(&hugetlb_lock);
560         if (mapping)
561                 hugetlb_put_quota(mapping, 1);
562 }
563
564 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
565 {
566         set_compound_page_dtor(page, free_huge_page);
567         spin_lock(&hugetlb_lock);
568         h->nr_huge_pages++;
569         h->nr_huge_pages_node[nid]++;
570         spin_unlock(&hugetlb_lock);
571         put_page(page); /* free it into the hugepage allocator */
572 }
573
574 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
575 {
576         int i;
577         int nr_pages = 1 << order;
578         struct page *p = page + 1;
579
580         /* we rely on prep_new_huge_page to set the destructor */
581         set_compound_order(page, order);
582         __SetPageHead(page);
583         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
584                 __SetPageTail(p);
585                 p->first_page = page;
586         }
587 }
588
589 int PageHuge(struct page *page)
590 {
591         compound_page_dtor *dtor;
592
593         if (!PageCompound(page))
594                 return 0;
595
596         page = compound_head(page);
597         dtor = get_compound_page_dtor(page);
598
599         return dtor == free_huge_page;
600 }
601
602 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
603 {
604         struct page *page;
605
606         if (h->order >= MAX_ORDER)
607                 return NULL;
608
609         page = alloc_pages_exact_node(nid,
610                 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
611                                                 __GFP_REPEAT|__GFP_NOWARN,
612                 huge_page_order(h));
613         if (page) {
614                 if (arch_prepare_hugepage(page)) {
615                         __free_pages(page, huge_page_order(h));
616                         return NULL;
617                 }
618                 prep_new_huge_page(h, page, nid);
619         }
620
621         return page;
622 }
623
624 /*
625  * common helper function for hstate_next_node_to_{alloc|free}.
626  * return next node in node_online_map, wrapping at end.
627  */
628 static int next_node_allowed(int nid)
629 {
630         nid = next_node(nid, node_online_map);
631         if (nid == MAX_NUMNODES)
632                 nid = first_node(node_online_map);
633         VM_BUG_ON(nid >= MAX_NUMNODES);
634
635         return nid;
636 }
637
638 /*
639  * Use a helper variable to find the next node and then
640  * copy it back to next_nid_to_alloc afterwards:
641  * otherwise there's a window in which a racer might
642  * pass invalid nid MAX_NUMNODES to alloc_pages_exact_node.
643  * But we don't need to use a spin_lock here: it really
644  * doesn't matter if occasionally a racer chooses the
645  * same nid as we do.  Move nid forward in the mask even
646  * if we just successfully allocated a hugepage so that
647  * the next caller gets hugepages on the next node.
648  */
649 static int hstate_next_node_to_alloc(struct hstate *h)
650 {
651         int nid, next_nid;
652
653         nid = h->next_nid_to_alloc;
654         next_nid = next_node_allowed(nid);
655         h->next_nid_to_alloc = next_nid;
656         return nid;
657 }
658
659 static int alloc_fresh_huge_page(struct hstate *h)
660 {
661         struct page *page;
662         int start_nid;
663         int next_nid;
664         int ret = 0;
665
666         start_nid = hstate_next_node_to_alloc(h);
667         next_nid = start_nid;
668
669         do {
670                 page = alloc_fresh_huge_page_node(h, next_nid);
671                 if (page) {
672                         ret = 1;
673                         break;
674                 }
675                 next_nid = hstate_next_node_to_alloc(h);
676         } while (next_nid != start_nid);
677
678         if (ret)
679                 count_vm_event(HTLB_BUDDY_PGALLOC);
680         else
681                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
682
683         return ret;
684 }
685
686 /*
687  * helper for free_pool_huge_page() - return the next node
688  * from which to free a huge page.  Advance the next node id
689  * whether or not we find a free huge page to free so that the
690  * next attempt to free addresses the next node.
691  */
692 static int hstate_next_node_to_free(struct hstate *h)
693 {
694         int nid, next_nid;
695
696         nid = h->next_nid_to_free;
697         next_nid = next_node_allowed(nid);
698         h->next_nid_to_free = next_nid;
699         return nid;
700 }
701
702 /*
703  * Free huge page from pool from next node to free.
704  * Attempt to keep persistent huge pages more or less
705  * balanced over allowed nodes.
706  * Called with hugetlb_lock locked.
707  */
708 static int free_pool_huge_page(struct hstate *h, bool acct_surplus)
709 {
710         int start_nid;
711         int next_nid;
712         int ret = 0;
713
714         start_nid = hstate_next_node_to_free(h);
715         next_nid = start_nid;
716
717         do {
718                 /*
719                  * If we're returning unused surplus pages, only examine
720                  * nodes with surplus pages.
721                  */
722                 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
723                     !list_empty(&h->hugepage_freelists[next_nid])) {
724                         struct page *page =
725                                 list_entry(h->hugepage_freelists[next_nid].next,
726                                           struct page, lru);
727                         list_del(&page->lru);
728                         h->free_huge_pages--;
729                         h->free_huge_pages_node[next_nid]--;
730                         if (acct_surplus) {
731                                 h->surplus_huge_pages--;
732                                 h->surplus_huge_pages_node[next_nid]--;
733                         }
734                         update_and_free_page(h, page);
735                         ret = 1;
736                         break;
737                 }
738                 next_nid = hstate_next_node_to_free(h);
739         } while (next_nid != start_nid);
740
741         return ret;
742 }
743
744 static struct page *alloc_buddy_huge_page(struct hstate *h,
745                         struct vm_area_struct *vma, unsigned long address)
746 {
747         struct page *page;
748         unsigned int nid;
749
750         if (h->order >= MAX_ORDER)
751                 return NULL;
752
753         /*
754          * Assume we will successfully allocate the surplus page to
755          * prevent racing processes from causing the surplus to exceed
756          * overcommit
757          *
758          * This however introduces a different race, where a process B
759          * tries to grow the static hugepage pool while alloc_pages() is
760          * called by process A. B will only examine the per-node
761          * counters in determining if surplus huge pages can be
762          * converted to normal huge pages in adjust_pool_surplus(). A
763          * won't be able to increment the per-node counter, until the
764          * lock is dropped by B, but B doesn't drop hugetlb_lock until
765          * no more huge pages can be converted from surplus to normal
766          * state (and doesn't try to convert again). Thus, we have a
767          * case where a surplus huge page exists, the pool is grown, and
768          * the surplus huge page still exists after, even though it
769          * should just have been converted to a normal huge page. This
770          * does not leak memory, though, as the hugepage will be freed
771          * once it is out of use. It also does not allow the counters to
772          * go out of whack in adjust_pool_surplus() as we don't modify
773          * the node values until we've gotten the hugepage and only the
774          * per-node value is checked there.
775          */
776         spin_lock(&hugetlb_lock);
777         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
778                 spin_unlock(&hugetlb_lock);
779                 return NULL;
780         } else {
781                 h->nr_huge_pages++;
782                 h->surplus_huge_pages++;
783         }
784         spin_unlock(&hugetlb_lock);
785
786         page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
787                                         __GFP_REPEAT|__GFP_NOWARN,
788                                         huge_page_order(h));
789
790         if (page && arch_prepare_hugepage(page)) {
791                 __free_pages(page, huge_page_order(h));
792                 return NULL;
793         }
794
795         spin_lock(&hugetlb_lock);
796         if (page) {
797                 /*
798                  * This page is now managed by the hugetlb allocator and has
799                  * no users -- drop the buddy allocator's reference.
800                  */
801                 put_page_testzero(page);
802                 VM_BUG_ON(page_count(page));
803                 nid = page_to_nid(page);
804                 set_compound_page_dtor(page, free_huge_page);
805                 /*
806                  * We incremented the global counters already
807                  */
808                 h->nr_huge_pages_node[nid]++;
809                 h->surplus_huge_pages_node[nid]++;
810                 __count_vm_event(HTLB_BUDDY_PGALLOC);
811         } else {
812                 h->nr_huge_pages--;
813                 h->surplus_huge_pages--;
814                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
815         }
816         spin_unlock(&hugetlb_lock);
817
818         return page;
819 }
820
821 /*
822  * Increase the hugetlb pool such that it can accomodate a reservation
823  * of size 'delta'.
824  */
825 static int gather_surplus_pages(struct hstate *h, int delta)
826 {
827         struct list_head surplus_list;
828         struct page *page, *tmp;
829         int ret, i;
830         int needed, allocated;
831
832         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
833         if (needed <= 0) {
834                 h->resv_huge_pages += delta;
835                 return 0;
836         }
837
838         allocated = 0;
839         INIT_LIST_HEAD(&surplus_list);
840
841         ret = -ENOMEM;
842 retry:
843         spin_unlock(&hugetlb_lock);
844         for (i = 0; i < needed; i++) {
845                 page = alloc_buddy_huge_page(h, NULL, 0);
846                 if (!page) {
847                         /*
848                          * We were not able to allocate enough pages to
849                          * satisfy the entire reservation so we free what
850                          * we've allocated so far.
851                          */
852                         spin_lock(&hugetlb_lock);
853                         needed = 0;
854                         goto free;
855                 }
856
857                 list_add(&page->lru, &surplus_list);
858         }
859         allocated += needed;
860
861         /*
862          * After retaking hugetlb_lock, we need to recalculate 'needed'
863          * because either resv_huge_pages or free_huge_pages may have changed.
864          */
865         spin_lock(&hugetlb_lock);
866         needed = (h->resv_huge_pages + delta) -
867                         (h->free_huge_pages + allocated);
868         if (needed > 0)
869                 goto retry;
870
871         /*
872          * The surplus_list now contains _at_least_ the number of extra pages
873          * needed to accomodate the reservation.  Add the appropriate number
874          * of pages to the hugetlb pool and free the extras back to the buddy
875          * allocator.  Commit the entire reservation here to prevent another
876          * process from stealing the pages as they are added to the pool but
877          * before they are reserved.
878          */
879         needed += allocated;
880         h->resv_huge_pages += delta;
881         ret = 0;
882 free:
883         /* Free the needed pages to the hugetlb pool */
884         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
885                 if ((--needed) < 0)
886                         break;
887                 list_del(&page->lru);
888                 enqueue_huge_page(h, page);
889         }
890
891         /* Free unnecessary surplus pages to the buddy allocator */
892         if (!list_empty(&surplus_list)) {
893                 spin_unlock(&hugetlb_lock);
894                 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
895                         list_del(&page->lru);
896                         /*
897                          * The page has a reference count of zero already, so
898                          * call free_huge_page directly instead of using
899                          * put_page.  This must be done with hugetlb_lock
900                          * unlocked which is safe because free_huge_page takes
901                          * hugetlb_lock before deciding how to free the page.
902                          */
903                         free_huge_page(page);
904                 }
905                 spin_lock(&hugetlb_lock);
906         }
907
908         return ret;
909 }
910
911 /*
912  * When releasing a hugetlb pool reservation, any surplus pages that were
913  * allocated to satisfy the reservation must be explicitly freed if they were
914  * never used.
915  * Called with hugetlb_lock held.
916  */
917 static void return_unused_surplus_pages(struct hstate *h,
918                                         unsigned long unused_resv_pages)
919 {
920         unsigned long nr_pages;
921
922         /* Uncommit the reservation */
923         h->resv_huge_pages -= unused_resv_pages;
924
925         /* Cannot return gigantic pages currently */
926         if (h->order >= MAX_ORDER)
927                 return;
928
929         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
930
931         /*
932          * We want to release as many surplus pages as possible, spread
933          * evenly across all nodes. Iterate across all nodes until we
934          * can no longer free unreserved surplus pages. This occurs when
935          * the nodes with surplus pages have no free pages.
936          * free_pool_huge_page() will balance the the frees across the
937          * on-line nodes for us and will handle the hstate accounting.
938          */
939         while (nr_pages--) {
940                 if (!free_pool_huge_page(h, 1))
941                         break;
942         }
943 }
944
945 /*
946  * Determine if the huge page at addr within the vma has an associated
947  * reservation.  Where it does not we will need to logically increase
948  * reservation and actually increase quota before an allocation can occur.
949  * Where any new reservation would be required the reservation change is
950  * prepared, but not committed.  Once the page has been quota'd allocated
951  * an instantiated the change should be committed via vma_commit_reservation.
952  * No action is required on failure.
953  */
954 static long vma_needs_reservation(struct hstate *h,
955                         struct vm_area_struct *vma, unsigned long addr)
956 {
957         struct address_space *mapping = vma->vm_file->f_mapping;
958         struct inode *inode = mapping->host;
959
960         if (vma->vm_flags & VM_MAYSHARE) {
961                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
962                 return region_chg(&inode->i_mapping->private_list,
963                                                         idx, idx + 1);
964
965         } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
966                 return 1;
967
968         } else  {
969                 long err;
970                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
971                 struct resv_map *reservations = vma_resv_map(vma);
972
973                 err = region_chg(&reservations->regions, idx, idx + 1);
974                 if (err < 0)
975                         return err;
976                 return 0;
977         }
978 }
979 static void vma_commit_reservation(struct hstate *h,
980                         struct vm_area_struct *vma, unsigned long addr)
981 {
982         struct address_space *mapping = vma->vm_file->f_mapping;
983         struct inode *inode = mapping->host;
984
985         if (vma->vm_flags & VM_MAYSHARE) {
986                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
987                 region_add(&inode->i_mapping->private_list, idx, idx + 1);
988
989         } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
990                 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
991                 struct resv_map *reservations = vma_resv_map(vma);
992
993                 /* Mark this page used in the map. */
994                 region_add(&reservations->regions, idx, idx + 1);
995         }
996 }
997
998 static struct page *alloc_huge_page(struct vm_area_struct *vma,
999                                     unsigned long addr, int avoid_reserve)
1000 {
1001         struct hstate *h = hstate_vma(vma);
1002         struct page *page;
1003         struct address_space *mapping = vma->vm_file->f_mapping;
1004         struct inode *inode = mapping->host;
1005         long chg;
1006
1007         /*
1008          * Processes that did not create the mapping will have no reserves and
1009          * will not have accounted against quota. Check that the quota can be
1010          * made before satisfying the allocation
1011          * MAP_NORESERVE mappings may also need pages and quota allocated
1012          * if no reserve mapping overlaps.
1013          */
1014         chg = vma_needs_reservation(h, vma, addr);
1015         if (chg < 0)
1016                 return ERR_PTR(chg);
1017         if (chg)
1018                 if (hugetlb_get_quota(inode->i_mapping, chg))
1019                         return ERR_PTR(-ENOSPC);
1020
1021         spin_lock(&hugetlb_lock);
1022         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1023         spin_unlock(&hugetlb_lock);
1024
1025         if (!page) {
1026                 page = alloc_buddy_huge_page(h, vma, addr);
1027                 if (!page) {
1028                         hugetlb_put_quota(inode->i_mapping, chg);
1029                         return ERR_PTR(-VM_FAULT_OOM);
1030                 }
1031         }
1032
1033         set_page_refcounted(page);
1034         set_page_private(page, (unsigned long) mapping);
1035
1036         vma_commit_reservation(h, vma, addr);
1037
1038         return page;
1039 }
1040
1041 int __weak alloc_bootmem_huge_page(struct hstate *h)
1042 {
1043         struct huge_bootmem_page *m;
1044         int nr_nodes = nodes_weight(node_online_map);
1045
1046         while (nr_nodes) {
1047                 void *addr;
1048
1049                 addr = __alloc_bootmem_node_nopanic(
1050                                 NODE_DATA(hstate_next_node_to_alloc(h)),
1051                                 huge_page_size(h), huge_page_size(h), 0);
1052
1053                 if (addr) {
1054                         /*
1055                          * Use the beginning of the huge page to store the
1056                          * huge_bootmem_page struct (until gather_bootmem
1057                          * puts them into the mem_map).
1058                          */
1059                         m = addr;
1060                         goto found;
1061                 }
1062                 nr_nodes--;
1063         }
1064         return 0;
1065
1066 found:
1067         BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1068         /* Put them into a private list first because mem_map is not up yet */
1069         list_add(&m->list, &huge_boot_pages);
1070         m->hstate = h;
1071         return 1;
1072 }
1073
1074 static void prep_compound_huge_page(struct page *page, int order)
1075 {
1076         if (unlikely(order > (MAX_ORDER - 1)))
1077                 prep_compound_gigantic_page(page, order);
1078         else
1079                 prep_compound_page(page, order);
1080 }
1081
1082 /* Put bootmem huge pages into the standard lists after mem_map is up */
1083 static void __init gather_bootmem_prealloc(void)
1084 {
1085         struct huge_bootmem_page *m;
1086
1087         list_for_each_entry(m, &huge_boot_pages, list) {
1088                 struct page *page = virt_to_page(m);
1089                 struct hstate *h = m->hstate;
1090                 __ClearPageReserved(page);
1091                 WARN_ON(page_count(page) != 1);
1092                 prep_compound_huge_page(page, h->order);
1093                 prep_new_huge_page(h, page, page_to_nid(page));
1094         }
1095 }
1096
1097 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1098 {
1099         unsigned long i;
1100
1101         for (i = 0; i < h->max_huge_pages; ++i) {
1102                 if (h->order >= MAX_ORDER) {
1103                         if (!alloc_bootmem_huge_page(h))
1104                                 break;
1105                 } else if (!alloc_fresh_huge_page(h))
1106                         break;
1107         }
1108         h->max_huge_pages = i;
1109 }
1110
1111 static void __init hugetlb_init_hstates(void)
1112 {
1113         struct hstate *h;
1114
1115         for_each_hstate(h) {
1116                 /* oversize hugepages were init'ed in early boot */
1117                 if (h->order < MAX_ORDER)
1118                         hugetlb_hstate_alloc_pages(h);
1119         }
1120 }
1121
1122 static char * __init memfmt(char *buf, unsigned long n)
1123 {
1124         if (n >= (1UL << 30))
1125                 sprintf(buf, "%lu GB", n >> 30);
1126         else if (n >= (1UL << 20))
1127                 sprintf(buf, "%lu MB", n >> 20);
1128         else
1129                 sprintf(buf, "%lu KB", n >> 10);
1130         return buf;
1131 }
1132
1133 static void __init report_hugepages(void)
1134 {
1135         struct hstate *h;
1136
1137         for_each_hstate(h) {
1138                 char buf[32];
1139                 printk(KERN_INFO "HugeTLB registered %s page size, "
1140                                  "pre-allocated %ld pages\n",
1141                         memfmt(buf, huge_page_size(h)),
1142                         h->free_huge_pages);
1143         }
1144 }
1145
1146 #ifdef CONFIG_HIGHMEM
1147 static void try_to_free_low(struct hstate *h, unsigned long count)
1148 {
1149         int i;
1150
1151         if (h->order >= MAX_ORDER)
1152                 return;
1153
1154         for (i = 0; i < MAX_NUMNODES; ++i) {
1155                 struct page *page, *next;
1156                 struct list_head *freel = &h->hugepage_freelists[i];
1157                 list_for_each_entry_safe(page, next, freel, lru) {
1158                         if (count >= h->nr_huge_pages)
1159                                 return;
1160                         if (PageHighMem(page))
1161                                 continue;
1162                         list_del(&page->lru);
1163                         update_and_free_page(h, page);
1164                         h->free_huge_pages--;
1165                         h->free_huge_pages_node[page_to_nid(page)]--;
1166                 }
1167         }
1168 }
1169 #else
1170 static inline void try_to_free_low(struct hstate *h, unsigned long count)
1171 {
1172 }
1173 #endif
1174
1175 /*
1176  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1177  * balanced by operating on them in a round-robin fashion.
1178  * Returns 1 if an adjustment was made.
1179  */
1180 static int adjust_pool_surplus(struct hstate *h, int delta)
1181 {
1182         int start_nid, next_nid;
1183         int ret = 0;
1184
1185         VM_BUG_ON(delta != -1 && delta != 1);
1186
1187         if (delta < 0)
1188                 start_nid = hstate_next_node_to_alloc(h);
1189         else
1190                 start_nid = hstate_next_node_to_free(h);
1191         next_nid = start_nid;
1192
1193         do {
1194                 int nid = next_nid;
1195                 if (delta < 0)  {
1196                         /*
1197                          * To shrink on this node, there must be a surplus page
1198                          */
1199                         if (!h->surplus_huge_pages_node[nid]) {
1200                                 next_nid = hstate_next_node_to_alloc(h);
1201                                 continue;
1202                         }
1203                 }
1204                 if (delta > 0) {
1205                         /*
1206                          * Surplus cannot exceed the total number of pages
1207                          */
1208                         if (h->surplus_huge_pages_node[nid] >=
1209                                                 h->nr_huge_pages_node[nid]) {
1210                                 next_nid = hstate_next_node_to_free(h);
1211                                 continue;
1212                         }
1213                 }
1214
1215                 h->surplus_huge_pages += delta;
1216                 h->surplus_huge_pages_node[nid] += delta;
1217                 ret = 1;
1218                 break;
1219         } while (next_nid != start_nid);
1220
1221         return ret;
1222 }
1223
1224 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1225 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count)
1226 {
1227         unsigned long min_count, ret;
1228
1229         if (h->order >= MAX_ORDER)
1230                 return h->max_huge_pages;
1231
1232         /*
1233          * Increase the pool size
1234          * First take pages out of surplus state.  Then make up the
1235          * remaining difference by allocating fresh huge pages.
1236          *
1237          * We might race with alloc_buddy_huge_page() here and be unable
1238          * to convert a surplus huge page to a normal huge page. That is
1239          * not critical, though, it just means the overall size of the
1240          * pool might be one hugepage larger than it needs to be, but
1241          * within all the constraints specified by the sysctls.
1242          */
1243         spin_lock(&hugetlb_lock);
1244         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1245                 if (!adjust_pool_surplus(h, -1))
1246                         break;
1247         }
1248
1249         while (count > persistent_huge_pages(h)) {
1250                 /*
1251                  * If this allocation races such that we no longer need the
1252                  * page, free_huge_page will handle it by freeing the page
1253                  * and reducing the surplus.
1254                  */
1255                 spin_unlock(&hugetlb_lock);
1256                 ret = alloc_fresh_huge_page(h);
1257                 spin_lock(&hugetlb_lock);
1258                 if (!ret)
1259                         goto out;
1260
1261         }
1262
1263         /*
1264          * Decrease the pool size
1265          * First return free pages to the buddy allocator (being careful
1266          * to keep enough around to satisfy reservations).  Then place
1267          * pages into surplus state as needed so the pool will shrink
1268          * to the desired size as pages become free.
1269          *
1270          * By placing pages into the surplus state independent of the
1271          * overcommit value, we are allowing the surplus pool size to
1272          * exceed overcommit. There are few sane options here. Since
1273          * alloc_buddy_huge_page() is checking the global counter,
1274          * though, we'll note that we're not allowed to exceed surplus
1275          * and won't grow the pool anywhere else. Not until one of the
1276          * sysctls are changed, or the surplus pages go out of use.
1277          */
1278         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1279         min_count = max(count, min_count);
1280         try_to_free_low(h, min_count);
1281         while (min_count < persistent_huge_pages(h)) {
1282                 if (!free_pool_huge_page(h, 0))
1283                         break;
1284         }
1285         while (count < persistent_huge_pages(h)) {
1286                 if (!adjust_pool_surplus(h, 1))
1287                         break;
1288         }
1289 out:
1290         ret = persistent_huge_pages(h);
1291         spin_unlock(&hugetlb_lock);
1292         return ret;
1293 }
1294
1295 #define HSTATE_ATTR_RO(_name) \
1296         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1297
1298 #define HSTATE_ATTR(_name) \
1299         static struct kobj_attribute _name##_attr = \
1300                 __ATTR(_name, 0644, _name##_show, _name##_store)
1301
1302 static struct kobject *hugepages_kobj;
1303 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1304
1305 static struct hstate *kobj_to_hstate(struct kobject *kobj)
1306 {
1307         int i;
1308         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1309                 if (hstate_kobjs[i] == kobj)
1310                         return &hstates[i];
1311         BUG();
1312         return NULL;
1313 }
1314
1315 static ssize_t nr_hugepages_show(struct kobject *kobj,
1316                                         struct kobj_attribute *attr, char *buf)
1317 {
1318         struct hstate *h = kobj_to_hstate(kobj);
1319         return sprintf(buf, "%lu\n", h->nr_huge_pages);
1320 }
1321 static ssize_t nr_hugepages_store(struct kobject *kobj,
1322                 struct kobj_attribute *attr, const char *buf, size_t count)
1323 {
1324         int err;
1325         unsigned long input;
1326         struct hstate *h = kobj_to_hstate(kobj);
1327
1328         err = strict_strtoul(buf, 10, &input);
1329         if (err)
1330                 return 0;
1331
1332         h->max_huge_pages = set_max_huge_pages(h, input);
1333
1334         return count;
1335 }
1336 HSTATE_ATTR(nr_hugepages);
1337
1338 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1339                                         struct kobj_attribute *attr, char *buf)
1340 {
1341         struct hstate *h = kobj_to_hstate(kobj);
1342         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1343 }
1344 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1345                 struct kobj_attribute *attr, const char *buf, size_t count)
1346 {
1347         int err;
1348         unsigned long input;
1349         struct hstate *h = kobj_to_hstate(kobj);
1350
1351         err = strict_strtoul(buf, 10, &input);
1352         if (err)
1353                 return 0;
1354
1355         spin_lock(&hugetlb_lock);
1356         h->nr_overcommit_huge_pages = input;
1357         spin_unlock(&hugetlb_lock);
1358
1359         return count;
1360 }
1361 HSTATE_ATTR(nr_overcommit_hugepages);
1362
1363 static ssize_t free_hugepages_show(struct kobject *kobj,
1364                                         struct kobj_attribute *attr, char *buf)
1365 {
1366         struct hstate *h = kobj_to_hstate(kobj);
1367         return sprintf(buf, "%lu\n", h->free_huge_pages);
1368 }
1369 HSTATE_ATTR_RO(free_hugepages);
1370
1371 static ssize_t resv_hugepages_show(struct kobject *kobj,
1372                                         struct kobj_attribute *attr, char *buf)
1373 {
1374         struct hstate *h = kobj_to_hstate(kobj);
1375         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1376 }
1377 HSTATE_ATTR_RO(resv_hugepages);
1378
1379 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1380                                         struct kobj_attribute *attr, char *buf)
1381 {
1382         struct hstate *h = kobj_to_hstate(kobj);
1383         return sprintf(buf, "%lu\n", h->surplus_huge_pages);
1384 }
1385 HSTATE_ATTR_RO(surplus_hugepages);
1386
1387 static struct attribute *hstate_attrs[] = {
1388         &nr_hugepages_attr.attr,
1389         &nr_overcommit_hugepages_attr.attr,
1390         &free_hugepages_attr.attr,
1391         &resv_hugepages_attr.attr,
1392         &surplus_hugepages_attr.attr,
1393         NULL,
1394 };
1395
1396 static struct attribute_group hstate_attr_group = {
1397         .attrs = hstate_attrs,
1398 };
1399
1400 static int __init hugetlb_sysfs_add_hstate(struct hstate *h)
1401 {
1402         int retval;
1403
1404         hstate_kobjs[h - hstates] = kobject_create_and_add(h->name,
1405                                                         hugepages_kobj);
1406         if (!hstate_kobjs[h - hstates])
1407                 return -ENOMEM;
1408
1409         retval = sysfs_create_group(hstate_kobjs[h - hstates],
1410                                                         &hstate_attr_group);
1411         if (retval)
1412                 kobject_put(hstate_kobjs[h - hstates]);
1413
1414         return retval;
1415 }
1416
1417 static void __init hugetlb_sysfs_init(void)
1418 {
1419         struct hstate *h;
1420         int err;
1421
1422         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1423         if (!hugepages_kobj)
1424                 return;
1425
1426         for_each_hstate(h) {
1427                 err = hugetlb_sysfs_add_hstate(h);
1428                 if (err)
1429                         printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1430                                                                 h->name);
1431         }
1432 }
1433
1434 static void __exit hugetlb_exit(void)
1435 {
1436         struct hstate *h;
1437
1438         for_each_hstate(h) {
1439                 kobject_put(hstate_kobjs[h - hstates]);
1440         }
1441
1442         kobject_put(hugepages_kobj);
1443 }
1444 module_exit(hugetlb_exit);
1445
1446 static int __init hugetlb_init(void)
1447 {
1448         /* Some platform decide whether they support huge pages at boot
1449          * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1450          * there is no such support
1451          */
1452         if (HPAGE_SHIFT == 0)
1453                 return 0;
1454
1455         if (!size_to_hstate(default_hstate_size)) {
1456                 default_hstate_size = HPAGE_SIZE;
1457                 if (!size_to_hstate(default_hstate_size))
1458                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1459         }
1460         default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1461         if (default_hstate_max_huge_pages)
1462                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1463
1464         hugetlb_init_hstates();
1465
1466         gather_bootmem_prealloc();
1467
1468         report_hugepages();
1469
1470         hugetlb_sysfs_init();
1471
1472         return 0;
1473 }
1474 module_init(hugetlb_init);
1475
1476 /* Should be called on processing a hugepagesz=... option */
1477 void __init hugetlb_add_hstate(unsigned order)
1478 {
1479         struct hstate *h;
1480         unsigned long i;
1481
1482         if (size_to_hstate(PAGE_SIZE << order)) {
1483                 printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1484                 return;
1485         }
1486         BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1487         BUG_ON(order == 0);
1488         h = &hstates[max_hstate++];
1489         h->order = order;
1490         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1491         h->nr_huge_pages = 0;
1492         h->free_huge_pages = 0;
1493         for (i = 0; i < MAX_NUMNODES; ++i)
1494                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1495         h->next_nid_to_alloc = first_node(node_online_map);
1496         h->next_nid_to_free = first_node(node_online_map);
1497         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1498                                         huge_page_size(h)/1024);
1499
1500         parsed_hstate = h;
1501 }
1502
1503 static int __init hugetlb_nrpages_setup(char *s)
1504 {
1505         unsigned long *mhp;
1506         static unsigned long *last_mhp;
1507
1508         /*
1509          * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1510          * so this hugepages= parameter goes to the "default hstate".
1511          */
1512         if (!max_hstate)
1513                 mhp = &default_hstate_max_huge_pages;
1514         else
1515                 mhp = &parsed_hstate->max_huge_pages;
1516
1517         if (mhp == last_mhp) {
1518                 printk(KERN_WARNING "hugepages= specified twice without "
1519                         "interleaving hugepagesz=, ignoring\n");
1520                 return 1;
1521         }
1522
1523         if (sscanf(s, "%lu", mhp) <= 0)
1524                 *mhp = 0;
1525
1526         /*
1527          * Global state is always initialized later in hugetlb_init.
1528          * But we need to allocate >= MAX_ORDER hstates here early to still
1529          * use the bootmem allocator.
1530          */
1531         if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1532                 hugetlb_hstate_alloc_pages(parsed_hstate);
1533
1534         last_mhp = mhp;
1535
1536         return 1;
1537 }
1538 __setup("hugepages=", hugetlb_nrpages_setup);
1539
1540 static int __init hugetlb_default_setup(char *s)
1541 {
1542         default_hstate_size = memparse(s, &s);
1543         return 1;
1544 }
1545 __setup("default_hugepagesz=", hugetlb_default_setup);
1546
1547 static unsigned int cpuset_mems_nr(unsigned int *array)
1548 {
1549         int node;
1550         unsigned int nr = 0;
1551
1552         for_each_node_mask(node, cpuset_current_mems_allowed)
1553                 nr += array[node];
1554
1555         return nr;
1556 }
1557
1558 #ifdef CONFIG_SYSCTL
1559 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1560                            void __user *buffer,
1561                            size_t *length, loff_t *ppos)
1562 {
1563         struct hstate *h = &default_hstate;
1564         unsigned long tmp;
1565
1566         if (!write)
1567                 tmp = h->max_huge_pages;
1568
1569         table->data = &tmp;
1570         table->maxlen = sizeof(unsigned long);
1571         proc_doulongvec_minmax(table, write, buffer, length, ppos);
1572
1573         if (write)
1574                 h->max_huge_pages = set_max_huge_pages(h, tmp);
1575
1576         return 0;
1577 }
1578
1579 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1580                         void __user *buffer,
1581                         size_t *length, loff_t *ppos)
1582 {
1583         proc_dointvec(table, write, buffer, length, ppos);
1584         if (hugepages_treat_as_movable)
1585                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1586         else
1587                 htlb_alloc_mask = GFP_HIGHUSER;
1588         return 0;
1589 }
1590
1591 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1592                         void __user *buffer,
1593                         size_t *length, loff_t *ppos)
1594 {
1595         struct hstate *h = &default_hstate;
1596         unsigned long tmp;
1597
1598         if (!write)
1599                 tmp = h->nr_overcommit_huge_pages;
1600
1601         table->data = &tmp;
1602         table->maxlen = sizeof(unsigned long);
1603         proc_doulongvec_minmax(table, write, buffer, length, ppos);
1604
1605         if (write) {
1606                 spin_lock(&hugetlb_lock);
1607                 h->nr_overcommit_huge_pages = tmp;
1608                 spin_unlock(&hugetlb_lock);
1609         }
1610
1611         return 0;
1612 }
1613
1614 #endif /* CONFIG_SYSCTL */
1615
1616 void hugetlb_report_meminfo(struct seq_file *m)
1617 {
1618         struct hstate *h = &default_hstate;
1619         seq_printf(m,
1620                         "HugePages_Total:   %5lu\n"
1621                         "HugePages_Free:    %5lu\n"
1622                         "HugePages_Rsvd:    %5lu\n"
1623                         "HugePages_Surp:    %5lu\n"
1624                         "Hugepagesize:   %8lu kB\n",
1625                         h->nr_huge_pages,
1626                         h->free_huge_pages,
1627                         h->resv_huge_pages,
1628                         h->surplus_huge_pages,
1629                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1630 }
1631
1632 int hugetlb_report_node_meminfo(int nid, char *buf)
1633 {
1634         struct hstate *h = &default_hstate;
1635         return sprintf(buf,
1636                 "Node %d HugePages_Total: %5u\n"
1637                 "Node %d HugePages_Free:  %5u\n"
1638                 "Node %d HugePages_Surp:  %5u\n",
1639                 nid, h->nr_huge_pages_node[nid],
1640                 nid, h->free_huge_pages_node[nid],
1641                 nid, h->surplus_huge_pages_node[nid]);
1642 }
1643
1644 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
1645 unsigned long hugetlb_total_pages(void)
1646 {
1647         struct hstate *h = &default_hstate;
1648         return h->nr_huge_pages * pages_per_huge_page(h);
1649 }
1650
1651 static int hugetlb_acct_memory(struct hstate *h, long delta)
1652 {
1653         int ret = -ENOMEM;
1654
1655         spin_lock(&hugetlb_lock);
1656         /*
1657          * When cpuset is configured, it breaks the strict hugetlb page
1658          * reservation as the accounting is done on a global variable. Such
1659          * reservation is completely rubbish in the presence of cpuset because
1660          * the reservation is not checked against page availability for the
1661          * current cpuset. Application can still potentially OOM'ed by kernel
1662          * with lack of free htlb page in cpuset that the task is in.
1663          * Attempt to enforce strict accounting with cpuset is almost
1664          * impossible (or too ugly) because cpuset is too fluid that
1665          * task or memory node can be dynamically moved between cpusets.
1666          *
1667          * The change of semantics for shared hugetlb mapping with cpuset is
1668          * undesirable. However, in order to preserve some of the semantics,
1669          * we fall back to check against current free page availability as
1670          * a best attempt and hopefully to minimize the impact of changing
1671          * semantics that cpuset has.
1672          */
1673         if (delta > 0) {
1674                 if (gather_surplus_pages(h, delta) < 0)
1675                         goto out;
1676
1677                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
1678                         return_unused_surplus_pages(h, delta);
1679                         goto out;
1680                 }
1681         }
1682
1683         ret = 0;
1684         if (delta < 0)
1685                 return_unused_surplus_pages(h, (unsigned long) -delta);
1686
1687 out:
1688         spin_unlock(&hugetlb_lock);
1689         return ret;
1690 }
1691
1692 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
1693 {
1694         struct resv_map *reservations = vma_resv_map(vma);
1695
1696         /*
1697          * This new VMA should share its siblings reservation map if present.
1698          * The VMA will only ever have a valid reservation map pointer where
1699          * it is being copied for another still existing VMA.  As that VMA
1700          * has a reference to the reservation map it cannot dissappear until
1701          * after this open call completes.  It is therefore safe to take a
1702          * new reference here without additional locking.
1703          */
1704         if (reservations)
1705                 kref_get(&reservations->refs);
1706 }
1707
1708 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
1709 {
1710         struct hstate *h = hstate_vma(vma);
1711         struct resv_map *reservations = vma_resv_map(vma);
1712         unsigned long reserve;
1713         unsigned long start;
1714         unsigned long end;
1715
1716         if (reservations) {
1717                 start = vma_hugecache_offset(h, vma, vma->vm_start);
1718                 end = vma_hugecache_offset(h, vma, vma->vm_end);
1719
1720                 reserve = (end - start) -
1721                         region_count(&reservations->regions, start, end);
1722
1723                 kref_put(&reservations->refs, resv_map_release);
1724
1725                 if (reserve) {
1726                         hugetlb_acct_memory(h, -reserve);
1727                         hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
1728                 }
1729         }
1730 }
1731
1732 /*
1733  * We cannot handle pagefaults against hugetlb pages at all.  They cause
1734  * handle_mm_fault() to try to instantiate regular-sized pages in the
1735  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
1736  * this far.
1737  */
1738 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1739 {
1740         BUG();
1741         return 0;
1742 }
1743
1744 const struct vm_operations_struct hugetlb_vm_ops = {
1745         .fault = hugetlb_vm_op_fault,
1746         .open = hugetlb_vm_op_open,
1747         .close = hugetlb_vm_op_close,
1748 };
1749
1750 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
1751                                 int writable)
1752 {
1753         pte_t entry;
1754
1755         if (writable) {
1756                 entry =
1757                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
1758         } else {
1759                 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
1760         }
1761         entry = pte_mkyoung(entry);
1762         entry = pte_mkhuge(entry);
1763
1764         return entry;
1765 }
1766
1767 static void set_huge_ptep_writable(struct vm_area_struct *vma,
1768                                    unsigned long address, pte_t *ptep)
1769 {
1770         pte_t entry;
1771
1772         entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
1773         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
1774                 update_mmu_cache(vma, address, entry);
1775         }
1776 }
1777
1778
1779 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
1780                             struct vm_area_struct *vma)
1781 {
1782         pte_t *src_pte, *dst_pte, entry;
1783         struct page *ptepage;
1784         unsigned long addr;
1785         int cow;
1786         struct hstate *h = hstate_vma(vma);
1787         unsigned long sz = huge_page_size(h);
1788
1789         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1790
1791         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
1792                 src_pte = huge_pte_offset(src, addr);
1793                 if (!src_pte)
1794                         continue;
1795                 dst_pte = huge_pte_alloc(dst, addr, sz);
1796                 if (!dst_pte)
1797                         goto nomem;
1798
1799                 /* If the pagetables are shared don't copy or take references */
1800                 if (dst_pte == src_pte)
1801                         continue;
1802
1803                 spin_lock(&dst->page_table_lock);
1804                 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
1805                 if (!huge_pte_none(huge_ptep_get(src_pte))) {
1806                         if (cow)
1807                                 huge_ptep_set_wrprotect(src, addr, src_pte);
1808                         entry = huge_ptep_get(src_pte);
1809                         ptepage = pte_page(entry);
1810                         get_page(ptepage);
1811                         set_huge_pte_at(dst, addr, dst_pte, entry);
1812                 }
1813                 spin_unlock(&src->page_table_lock);
1814                 spin_unlock(&dst->page_table_lock);
1815         }
1816         return 0;
1817
1818 nomem:
1819         return -ENOMEM;
1820 }
1821
1822 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1823                             unsigned long end, struct page *ref_page)
1824 {
1825         struct mm_struct *mm = vma->vm_mm;
1826         unsigned long address;
1827         pte_t *ptep;
1828         pte_t pte;
1829         struct page *page;
1830         struct page *tmp;
1831         struct hstate *h = hstate_vma(vma);
1832         unsigned long sz = huge_page_size(h);
1833
1834         /*
1835          * A page gathering list, protected by per file i_mmap_lock. The
1836          * lock is used to avoid list corruption from multiple unmapping
1837          * of the same page since we are using page->lru.
1838          */
1839         LIST_HEAD(page_list);
1840
1841         WARN_ON(!is_vm_hugetlb_page(vma));
1842         BUG_ON(start & ~huge_page_mask(h));
1843         BUG_ON(end & ~huge_page_mask(h));
1844
1845         mmu_notifier_invalidate_range_start(mm, start, end);
1846         spin_lock(&mm->page_table_lock);
1847         for (address = start; address < end; address += sz) {
1848                 ptep = huge_pte_offset(mm, address);
1849                 if (!ptep)
1850                         continue;
1851
1852                 if (huge_pmd_unshare(mm, &address, ptep))
1853                         continue;
1854
1855                 /*
1856                  * If a reference page is supplied, it is because a specific
1857                  * page is being unmapped, not a range. Ensure the page we
1858                  * are about to unmap is the actual page of interest.
1859                  */
1860                 if (ref_page) {
1861                         pte = huge_ptep_get(ptep);
1862                         if (huge_pte_none(pte))
1863                                 continue;
1864                         page = pte_page(pte);
1865                         if (page != ref_page)
1866                                 continue;
1867
1868                         /*
1869                          * Mark the VMA as having unmapped its page so that
1870                          * future faults in this VMA will fail rather than
1871                          * looking like data was lost
1872                          */
1873                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
1874                 }
1875
1876                 pte = huge_ptep_get_and_clear(mm, address, ptep);
1877                 if (huge_pte_none(pte))
1878                         continue;
1879
1880                 page = pte_page(pte);
1881                 if (pte_dirty(pte))
1882                         set_page_dirty(page);
1883                 list_add(&page->lru, &page_list);
1884         }
1885         spin_unlock(&mm->page_table_lock);
1886         flush_tlb_range(vma, start, end);
1887         mmu_notifier_invalidate_range_end(mm, start, end);
1888         list_for_each_entry_safe(page, tmp, &page_list, lru) {
1889                 list_del(&page->lru);
1890                 put_page(page);
1891         }
1892 }
1893
1894 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
1895                           unsigned long end, struct page *ref_page)
1896 {
1897         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
1898         __unmap_hugepage_range(vma, start, end, ref_page);
1899         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
1900 }
1901
1902 /*
1903  * This is called when the original mapper is failing to COW a MAP_PRIVATE
1904  * mappping it owns the reserve page for. The intention is to unmap the page
1905  * from other VMAs and let the children be SIGKILLed if they are faulting the
1906  * same region.
1907  */
1908 static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
1909                                 struct page *page, unsigned long address)
1910 {
1911         struct hstate *h = hstate_vma(vma);
1912         struct vm_area_struct *iter_vma;
1913         struct address_space *mapping;
1914         struct prio_tree_iter iter;
1915         pgoff_t pgoff;
1916
1917         /*
1918          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
1919          * from page cache lookup which is in HPAGE_SIZE units.
1920          */
1921         address = address & huge_page_mask(h);
1922         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
1923                 + (vma->vm_pgoff >> PAGE_SHIFT);
1924         mapping = (struct address_space *)page_private(page);
1925
1926         vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1927                 /* Do not unmap the current VMA */
1928                 if (iter_vma == vma)
1929                         continue;
1930
1931                 /*
1932                  * Unmap the page from other VMAs without their own reserves.
1933                  * They get marked to be SIGKILLed if they fault in these
1934                  * areas. This is because a future no-page fault on this VMA
1935                  * could insert a zeroed page instead of the data existing
1936                  * from the time of fork. This would look like data corruption
1937                  */
1938                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
1939                         unmap_hugepage_range(iter_vma,
1940                                 address, address + huge_page_size(h),
1941                                 page);
1942         }
1943
1944         return 1;
1945 }
1946
1947 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
1948                         unsigned long address, pte_t *ptep, pte_t pte,
1949                         struct page *pagecache_page)
1950 {
1951         struct hstate *h = hstate_vma(vma);
1952         struct page *old_page, *new_page;
1953         int avoidcopy;
1954         int outside_reserve = 0;
1955
1956         old_page = pte_page(pte);
1957
1958 retry_avoidcopy:
1959         /* If no-one else is actually using this page, avoid the copy
1960          * and just make the page writable */
1961         avoidcopy = (page_count(old_page) == 1);
1962         if (avoidcopy) {
1963                 set_huge_ptep_writable(vma, address, ptep);
1964                 return 0;
1965         }
1966
1967         /*
1968          * If the process that created a MAP_PRIVATE mapping is about to
1969          * perform a COW due to a shared page count, attempt to satisfy
1970          * the allocation without using the existing reserves. The pagecache
1971          * page is used to determine if the reserve at this address was
1972          * consumed or not. If reserves were used, a partial faulted mapping
1973          * at the time of fork() could consume its reserves on COW instead
1974          * of the full address range.
1975          */
1976         if (!(vma->vm_flags & VM_MAYSHARE) &&
1977                         is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1978                         old_page != pagecache_page)
1979                 outside_reserve = 1;
1980
1981         page_cache_get(old_page);
1982         new_page = alloc_huge_page(vma, address, outside_reserve);
1983
1984         if (IS_ERR(new_page)) {
1985                 page_cache_release(old_page);
1986
1987                 /*
1988                  * If a process owning a MAP_PRIVATE mapping fails to COW,
1989                  * it is due to references held by a child and an insufficient
1990                  * huge page pool. To guarantee the original mappers
1991                  * reliability, unmap the page from child processes. The child
1992                  * may get SIGKILLed if it later faults.
1993                  */
1994                 if (outside_reserve) {
1995                         BUG_ON(huge_pte_none(pte));
1996                         if (unmap_ref_private(mm, vma, old_page, address)) {
1997                                 BUG_ON(page_count(old_page) != 1);
1998                                 BUG_ON(huge_pte_none(pte));
1999                                 goto retry_avoidcopy;
2000                         }
2001                         WARN_ON_ONCE(1);
2002                 }
2003
2004                 return -PTR_ERR(new_page);
2005         }
2006
2007         spin_unlock(&mm->page_table_lock);
2008         copy_huge_page(new_page, old_page, address, vma);
2009         __SetPageUptodate(new_page);
2010         spin_lock(&mm->page_table_lock);
2011
2012         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2013         if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2014                 /* Break COW */
2015                 huge_ptep_clear_flush(vma, address, ptep);
2016                 set_huge_pte_at(mm, address, ptep,
2017                                 make_huge_pte(vma, new_page, 1));
2018                 /* Make the old page be freed below */
2019                 new_page = old_page;
2020         }
2021         page_cache_release(new_page);
2022         page_cache_release(old_page);
2023         return 0;
2024 }
2025
2026 /* Return the pagecache page at a given address within a VMA */
2027 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2028                         struct vm_area_struct *vma, unsigned long address)
2029 {
2030         struct address_space *mapping;
2031         pgoff_t idx;
2032
2033         mapping = vma->vm_file->f_mapping;
2034         idx = vma_hugecache_offset(h, vma, address);
2035
2036         return find_lock_page(mapping, idx);
2037 }
2038
2039 /*
2040  * Return whether there is a pagecache page to back given address within VMA.
2041  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2042  */
2043 static bool hugetlbfs_pagecache_present(struct hstate *h,
2044                         struct vm_area_struct *vma, unsigned long address)
2045 {
2046         struct address_space *mapping;
2047         pgoff_t idx;
2048         struct page *page;
2049
2050         mapping = vma->vm_file->f_mapping;
2051         idx = vma_hugecache_offset(h, vma, address);
2052
2053         page = find_get_page(mapping, idx);
2054         if (page)
2055                 put_page(page);
2056         return page != NULL;
2057 }
2058
2059 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2060                         unsigned long address, pte_t *ptep, unsigned int flags)
2061 {
2062         struct hstate *h = hstate_vma(vma);
2063         int ret = VM_FAULT_SIGBUS;
2064         pgoff_t idx;
2065         unsigned long size;
2066         struct page *page;
2067         struct address_space *mapping;
2068         pte_t new_pte;
2069
2070         /*
2071          * Currently, we are forced to kill the process in the event the
2072          * original mapper has unmapped pages from the child due to a failed
2073          * COW. Warn that such a situation has occured as it may not be obvious
2074          */
2075         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2076                 printk(KERN_WARNING
2077                         "PID %d killed due to inadequate hugepage pool\n",
2078                         current->pid);
2079                 return ret;
2080         }
2081
2082         mapping = vma->vm_file->f_mapping;
2083         idx = vma_hugecache_offset(h, vma, address);
2084
2085         /*
2086          * Use page lock to guard against racing truncation
2087          * before we get page_table_lock.
2088          */
2089 retry:
2090         page = find_lock_page(mapping, idx);
2091         if (!page) {
2092                 size = i_size_read(mapping->host) >> huge_page_shift(h);
2093                 if (idx >= size)
2094                         goto out;
2095                 page = alloc_huge_page(vma, address, 0);
2096                 if (IS_ERR(page)) {
2097                         ret = -PTR_ERR(page);
2098                         goto out;
2099                 }
2100                 clear_huge_page(page, address, huge_page_size(h));
2101                 __SetPageUptodate(page);
2102
2103                 if (vma->vm_flags & VM_MAYSHARE) {
2104                         int err;
2105                         struct inode *inode = mapping->host;
2106
2107                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2108                         if (err) {
2109                                 put_page(page);
2110                                 if (err == -EEXIST)
2111                                         goto retry;
2112                                 goto out;
2113                         }
2114
2115                         spin_lock(&inode->i_lock);
2116                         inode->i_blocks += blocks_per_huge_page(h);
2117                         spin_unlock(&inode->i_lock);
2118                 } else
2119                         lock_page(page);
2120         }
2121
2122         /*
2123          * If we are going to COW a private mapping later, we examine the
2124          * pending reservations for this page now. This will ensure that
2125          * any allocations necessary to record that reservation occur outside
2126          * the spinlock.
2127          */
2128         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2129                 if (vma_needs_reservation(h, vma, address) < 0) {
2130                         ret = VM_FAULT_OOM;
2131                         goto backout_unlocked;
2132                 }
2133
2134         spin_lock(&mm->page_table_lock);
2135         size = i_size_read(mapping->host) >> huge_page_shift(h);
2136         if (idx >= size)
2137                 goto backout;
2138
2139         ret = 0;
2140         if (!huge_pte_none(huge_ptep_get(ptep)))
2141                 goto backout;
2142
2143         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2144                                 && (vma->vm_flags & VM_SHARED)));
2145         set_huge_pte_at(mm, address, ptep, new_pte);
2146
2147         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2148                 /* Optimization, do the COW without a second fault */
2149                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2150         }
2151
2152         spin_unlock(&mm->page_table_lock);
2153         unlock_page(page);
2154 out:
2155         return ret;
2156
2157 backout:
2158         spin_unlock(&mm->page_table_lock);
2159 backout_unlocked:
2160         unlock_page(page);
2161         put_page(page);
2162         goto out;
2163 }
2164
2165 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2166                         unsigned long address, unsigned int flags)
2167 {
2168         pte_t *ptep;
2169         pte_t entry;
2170         int ret;
2171         struct page *pagecache_page = NULL;
2172         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2173         struct hstate *h = hstate_vma(vma);
2174
2175         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2176         if (!ptep)
2177                 return VM_FAULT_OOM;
2178
2179         /*
2180          * Serialize hugepage allocation and instantiation, so that we don't
2181          * get spurious allocation failures if two CPUs race to instantiate
2182          * the same page in the page cache.
2183          */
2184         mutex_lock(&hugetlb_instantiation_mutex);
2185         entry = huge_ptep_get(ptep);
2186         if (huge_pte_none(entry)) {
2187                 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2188                 goto out_mutex;
2189         }
2190
2191         ret = 0;
2192
2193         /*
2194          * If we are going to COW the mapping later, we examine the pending
2195          * reservations for this page now. This will ensure that any
2196          * allocations necessary to record that reservation occur outside the
2197          * spinlock. For private mappings, we also lookup the pagecache
2198          * page now as it is used to determine if a reservation has been
2199          * consumed.
2200          */
2201         if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2202                 if (vma_needs_reservation(h, vma, address) < 0) {
2203                         ret = VM_FAULT_OOM;
2204                         goto out_mutex;
2205                 }
2206
2207                 if (!(vma->vm_flags & VM_MAYSHARE))
2208                         pagecache_page = hugetlbfs_pagecache_page(h,
2209                                                                 vma, address);
2210         }
2211
2212         spin_lock(&mm->page_table_lock);
2213         /* Check for a racing update before calling hugetlb_cow */
2214         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2215                 goto out_page_table_lock;
2216
2217
2218         if (flags & FAULT_FLAG_WRITE) {
2219                 if (!pte_write(entry)) {
2220                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
2221                                                         pagecache_page);
2222                         goto out_page_table_lock;
2223                 }
2224                 entry = pte_mkdirty(entry);
2225         }
2226         entry = pte_mkyoung(entry);
2227         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2228                                                 flags & FAULT_FLAG_WRITE))
2229                 update_mmu_cache(vma, address, entry);
2230
2231 out_page_table_lock:
2232         spin_unlock(&mm->page_table_lock);
2233
2234         if (pagecache_page) {
2235                 unlock_page(pagecache_page);
2236                 put_page(pagecache_page);
2237         }
2238
2239 out_mutex:
2240         mutex_unlock(&hugetlb_instantiation_mutex);
2241
2242         return ret;
2243 }
2244
2245 /* Can be overriden by architectures */
2246 __attribute__((weak)) struct page *
2247 follow_huge_pud(struct mm_struct *mm, unsigned long address,
2248                pud_t *pud, int write)
2249 {
2250         BUG();
2251         return NULL;
2252 }
2253
2254 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2255                         struct page **pages, struct vm_area_struct **vmas,
2256                         unsigned long *position, int *length, int i,
2257                         unsigned int flags)
2258 {
2259         unsigned long pfn_offset;
2260         unsigned long vaddr = *position;
2261         int remainder = *length;
2262         struct hstate *h = hstate_vma(vma);
2263
2264         spin_lock(&mm->page_table_lock);
2265         while (vaddr < vma->vm_end && remainder) {
2266                 pte_t *pte;
2267                 int absent;
2268                 struct page *page;
2269
2270                 /*
2271                  * Some archs (sparc64, sh*) have multiple pte_ts to
2272                  * each hugepage.  We have to make sure we get the
2273                  * first, for the page indexing below to work.
2274                  */
2275                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2276                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2277
2278                 /*
2279                  * When coredumping, it suits get_dump_page if we just return
2280                  * an error where there's an empty slot with no huge pagecache
2281                  * to back it.  This way, we avoid allocating a hugepage, and
2282                  * the sparse dumpfile avoids allocating disk blocks, but its
2283                  * huge holes still show up with zeroes where they need to be.
2284                  */
2285                 if (absent && (flags & FOLL_DUMP) &&
2286                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2287                         remainder = 0;
2288                         break;
2289                 }
2290
2291                 if (absent ||
2292                     ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2293                         int ret;
2294
2295                         spin_unlock(&mm->page_table_lock);
2296                         ret = hugetlb_fault(mm, vma, vaddr,
2297                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2298                         spin_lock(&mm->page_table_lock);
2299                         if (!(ret & VM_FAULT_ERROR))
2300                                 continue;
2301
2302                         remainder = 0;
2303                         break;
2304                 }
2305
2306                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2307                 page = pte_page(huge_ptep_get(pte));
2308 same_page:
2309                 if (pages) {
2310                         pages[i] = mem_map_offset(page, pfn_offset);
2311                         get_page(pages[i]);
2312                 }
2313
2314                 if (vmas)
2315                         vmas[i] = vma;
2316
2317                 vaddr += PAGE_SIZE;
2318                 ++pfn_offset;
2319                 --remainder;
2320                 ++i;
2321                 if (vaddr < vma->vm_end && remainder &&
2322                                 pfn_offset < pages_per_huge_page(h)) {
2323                         /*
2324                          * We use pfn_offset to avoid touching the pageframes
2325                          * of this compound page.
2326                          */
2327                         goto same_page;
2328                 }
2329         }
2330         spin_unlock(&mm->page_table_lock);
2331         *length = remainder;
2332         *position = vaddr;
2333
2334         return i ? i : -EFAULT;
2335 }
2336
2337 void hugetlb_change_protection(struct vm_area_struct *vma,
2338                 unsigned long address, unsigned long end, pgprot_t newprot)
2339 {
2340         struct mm_struct *mm = vma->vm_mm;
2341         unsigned long start = address;
2342         pte_t *ptep;
2343         pte_t pte;
2344         struct hstate *h = hstate_vma(vma);
2345
2346         BUG_ON(address >= end);
2347         flush_cache_range(vma, address, end);
2348
2349         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
2350         spin_lock(&mm->page_table_lock);
2351         for (; address < end; address += huge_page_size(h)) {
2352                 ptep = huge_pte_offset(mm, address);
2353                 if (!ptep)
2354                         continue;
2355                 if (huge_pmd_unshare(mm, &address, ptep))
2356                         continue;
2357                 if (!huge_pte_none(huge_ptep_get(ptep))) {
2358                         pte = huge_ptep_get_and_clear(mm, address, ptep);
2359                         pte = pte_mkhuge(pte_modify(pte, newprot));
2360                         set_huge_pte_at(mm, address, ptep, pte);
2361                 }
2362         }
2363         spin_unlock(&mm->page_table_lock);
2364         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
2365
2366         flush_tlb_range(vma, start, end);
2367 }
2368
2369 int hugetlb_reserve_pages(struct inode *inode,
2370                                         long from, long to,
2371                                         struct vm_area_struct *vma,
2372                                         int acctflag)
2373 {
2374         long ret, chg;
2375         struct hstate *h = hstate_inode(inode);
2376
2377         /*
2378          * Only apply hugepage reservation if asked. At fault time, an
2379          * attempt will be made for VM_NORESERVE to allocate a page
2380          * and filesystem quota without using reserves
2381          */
2382         if (acctflag & VM_NORESERVE)
2383                 return 0;
2384
2385         /*
2386          * Shared mappings base their reservation on the number of pages that
2387          * are already allocated on behalf of the file. Private mappings need
2388          * to reserve the full area even if read-only as mprotect() may be
2389          * called to make the mapping read-write. Assume !vma is a shm mapping
2390          */
2391         if (!vma || vma->vm_flags & VM_MAYSHARE)
2392                 chg = region_chg(&inode->i_mapping->private_list, from, to);
2393         else {
2394                 struct resv_map *resv_map = resv_map_alloc();
2395                 if (!resv_map)
2396                         return -ENOMEM;
2397
2398                 chg = to - from;
2399
2400                 set_vma_resv_map(vma, resv_map);
2401                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2402         }
2403
2404         if (chg < 0)
2405                 return chg;
2406
2407         /* There must be enough filesystem quota for the mapping */
2408         if (hugetlb_get_quota(inode->i_mapping, chg))
2409                 return -ENOSPC;
2410
2411         /*
2412          * Check enough hugepages are available for the reservation.
2413          * Hand back the quota if there are not
2414          */
2415         ret = hugetlb_acct_memory(h, chg);
2416         if (ret < 0) {
2417                 hugetlb_put_quota(inode->i_mapping, chg);
2418                 return ret;
2419         }
2420
2421         /*
2422          * Account for the reservations made. Shared mappings record regions
2423          * that have reservations as they are shared by multiple VMAs.
2424          * When the last VMA disappears, the region map says how much
2425          * the reservation was and the page cache tells how much of
2426          * the reservation was consumed. Private mappings are per-VMA and
2427          * only the consumed reservations are tracked. When the VMA
2428          * disappears, the original reservation is the VMA size and the
2429          * consumed reservations are stored in the map. Hence, nothing
2430          * else has to be done for private mappings here
2431          */
2432         if (!vma || vma->vm_flags & VM_MAYSHARE)
2433                 region_add(&inode->i_mapping->private_list, from, to);
2434         return 0;
2435 }
2436
2437 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2438 {
2439         struct hstate *h = hstate_inode(inode);
2440         long chg = region_truncate(&inode->i_mapping->private_list, offset);
2441
2442         spin_lock(&inode->i_lock);
2443         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2444         spin_unlock(&inode->i_lock);
2445
2446         hugetlb_put_quota(inode->i_mapping, (chg - freed));
2447         hugetlb_acct_memory(h, -(chg - freed));
2448 }