]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - mm/hugetlb.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 unsigned long max_huge_pages;
27 static struct list_head hugepage_freelists[MAX_NUMNODES];
28 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29 static unsigned int free_huge_pages_node[MAX_NUMNODES];
30 /*
31  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32  */
33 static DEFINE_SPINLOCK(hugetlb_lock);
34
35 static void clear_huge_page(struct page *page, unsigned long addr)
36 {
37         int i;
38
39         might_sleep();
40         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
41                 cond_resched();
42                 clear_user_highpage(page + i, addr);
43         }
44 }
45
46 static void copy_huge_page(struct page *dst, struct page *src,
47                            unsigned long addr, struct vm_area_struct *vma)
48 {
49         int i;
50
51         might_sleep();
52         for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53                 cond_resched();
54                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
55         }
56 }
57
58 static void enqueue_huge_page(struct page *page)
59 {
60         int nid = page_to_nid(page);
61         list_add(&page->lru, &hugepage_freelists[nid]);
62         free_huge_pages++;
63         free_huge_pages_node[nid]++;
64 }
65
66 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67                                 unsigned long address)
68 {
69         int nid = numa_node_id();
70         struct page *page = NULL;
71         struct zonelist *zonelist = huge_zonelist(vma, address);
72         struct zone **z;
73
74         for (z = zonelist->zones; *z; z++) {
75                 nid = zone_to_nid(*z);
76                 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
77                     !list_empty(&hugepage_freelists[nid]))
78                         break;
79         }
80
81         if (*z) {
82                 page = list_entry(hugepage_freelists[nid].next,
83                                   struct page, lru);
84                 list_del(&page->lru);
85                 free_huge_pages--;
86                 free_huge_pages_node[nid]--;
87         }
88         return page;
89 }
90
91 static void free_huge_page(struct page *page)
92 {
93         BUG_ON(page_count(page));
94
95         INIT_LIST_HEAD(&page->lru);
96
97         spin_lock(&hugetlb_lock);
98         enqueue_huge_page(page);
99         spin_unlock(&hugetlb_lock);
100 }
101
102 static int alloc_fresh_huge_page(void)
103 {
104         static int nid = 0;
105         struct page *page;
106         page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
107                                         HUGETLB_PAGE_ORDER);
108         nid = next_node(nid, node_online_map);
109         if (nid == MAX_NUMNODES)
110                 nid = first_node(node_online_map);
111         if (page) {
112                 set_compound_page_dtor(page, free_huge_page);
113                 spin_lock(&hugetlb_lock);
114                 nr_huge_pages++;
115                 nr_huge_pages_node[page_to_nid(page)]++;
116                 spin_unlock(&hugetlb_lock);
117                 put_page(page); /* free it into the hugepage allocator */
118                 return 1;
119         }
120         return 0;
121 }
122
123 static struct page *alloc_huge_page(struct vm_area_struct *vma,
124                                     unsigned long addr)
125 {
126         struct page *page;
127
128         spin_lock(&hugetlb_lock);
129         if (vma->vm_flags & VM_MAYSHARE)
130                 resv_huge_pages--;
131         else if (free_huge_pages <= resv_huge_pages)
132                 goto fail;
133
134         page = dequeue_huge_page(vma, addr);
135         if (!page)
136                 goto fail;
137
138         spin_unlock(&hugetlb_lock);
139         set_page_refcounted(page);
140         return page;
141
142 fail:
143         if (vma->vm_flags & VM_MAYSHARE)
144                 resv_huge_pages++;
145         spin_unlock(&hugetlb_lock);
146         return NULL;
147 }
148
149 static int __init hugetlb_init(void)
150 {
151         unsigned long i;
152
153         if (HPAGE_SHIFT == 0)
154                 return 0;
155
156         for (i = 0; i < MAX_NUMNODES; ++i)
157                 INIT_LIST_HEAD(&hugepage_freelists[i]);
158
159         for (i = 0; i < max_huge_pages; ++i) {
160                 if (!alloc_fresh_huge_page())
161                         break;
162         }
163         max_huge_pages = free_huge_pages = nr_huge_pages = i;
164         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
165         return 0;
166 }
167 module_init(hugetlb_init);
168
169 static int __init hugetlb_setup(char *s)
170 {
171         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
172                 max_huge_pages = 0;
173         return 1;
174 }
175 __setup("hugepages=", hugetlb_setup);
176
177 static unsigned int cpuset_mems_nr(unsigned int *array)
178 {
179         int node;
180         unsigned int nr = 0;
181
182         for_each_node_mask(node, cpuset_current_mems_allowed)
183                 nr += array[node];
184
185         return nr;
186 }
187
188 #ifdef CONFIG_SYSCTL
189 static void update_and_free_page(struct page *page)
190 {
191         int i;
192         nr_huge_pages--;
193         nr_huge_pages_node[page_to_nid(page)]--;
194         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
195                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
196                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
197                                 1 << PG_private | 1<< PG_writeback);
198         }
199         page[1].lru.next = NULL;
200         set_page_refcounted(page);
201         __free_pages(page, HUGETLB_PAGE_ORDER);
202 }
203
204 #ifdef CONFIG_HIGHMEM
205 static void try_to_free_low(unsigned long count)
206 {
207         int i;
208
209         for (i = 0; i < MAX_NUMNODES; ++i) {
210                 struct page *page, *next;
211                 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
212                         if (PageHighMem(page))
213                                 continue;
214                         list_del(&page->lru);
215                         update_and_free_page(page);
216                         free_huge_pages--;
217                         free_huge_pages_node[page_to_nid(page)]--;
218                         if (count >= nr_huge_pages)
219                                 return;
220                 }
221         }
222 }
223 #else
224 static inline void try_to_free_low(unsigned long count)
225 {
226 }
227 #endif
228
229 static unsigned long set_max_huge_pages(unsigned long count)
230 {
231         while (count > nr_huge_pages) {
232                 if (!alloc_fresh_huge_page())
233                         return nr_huge_pages;
234         }
235         if (count >= nr_huge_pages)
236                 return nr_huge_pages;
237
238         spin_lock(&hugetlb_lock);
239         count = max(count, resv_huge_pages);
240         try_to_free_low(count);
241         while (count < nr_huge_pages) {
242                 struct page *page = dequeue_huge_page(NULL, 0);
243                 if (!page)
244                         break;
245                 update_and_free_page(page);
246         }
247         spin_unlock(&hugetlb_lock);
248         return nr_huge_pages;
249 }
250
251 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
252                            struct file *file, void __user *buffer,
253                            size_t *length, loff_t *ppos)
254 {
255         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
256         max_huge_pages = set_max_huge_pages(max_huge_pages);
257         return 0;
258 }
259 #endif /* CONFIG_SYSCTL */
260
261 int hugetlb_report_meminfo(char *buf)
262 {
263         return sprintf(buf,
264                         "HugePages_Total: %5lu\n"
265                         "HugePages_Free:  %5lu\n"
266                         "HugePages_Rsvd:  %5lu\n"
267                         "Hugepagesize:    %5lu kB\n",
268                         nr_huge_pages,
269                         free_huge_pages,
270                         resv_huge_pages,
271                         HPAGE_SIZE/1024);
272 }
273
274 int hugetlb_report_node_meminfo(int nid, char *buf)
275 {
276         return sprintf(buf,
277                 "Node %d HugePages_Total: %5u\n"
278                 "Node %d HugePages_Free:  %5u\n",
279                 nid, nr_huge_pages_node[nid],
280                 nid, free_huge_pages_node[nid]);
281 }
282
283 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
284 unsigned long hugetlb_total_pages(void)
285 {
286         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
287 }
288
289 /*
290  * We cannot handle pagefaults against hugetlb pages at all.  They cause
291  * handle_mm_fault() to try to instantiate regular-sized pages in the
292  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
293  * this far.
294  */
295 static struct page *hugetlb_nopage(struct vm_area_struct *vma,
296                                 unsigned long address, int *unused)
297 {
298         BUG();
299         return NULL;
300 }
301
302 struct vm_operations_struct hugetlb_vm_ops = {
303         .nopage = hugetlb_nopage,
304 };
305
306 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
307                                 int writable)
308 {
309         pte_t entry;
310
311         if (writable) {
312                 entry =
313                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
314         } else {
315                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
316         }
317         entry = pte_mkyoung(entry);
318         entry = pte_mkhuge(entry);
319
320         return entry;
321 }
322
323 static void set_huge_ptep_writable(struct vm_area_struct *vma,
324                                    unsigned long address, pte_t *ptep)
325 {
326         pte_t entry;
327
328         entry = pte_mkwrite(pte_mkdirty(*ptep));
329         ptep_set_access_flags(vma, address, ptep, entry, 1);
330         update_mmu_cache(vma, address, entry);
331         lazy_mmu_prot_update(entry);
332 }
333
334
335 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
336                             struct vm_area_struct *vma)
337 {
338         pte_t *src_pte, *dst_pte, entry;
339         struct page *ptepage;
340         unsigned long addr;
341         int cow;
342
343         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
344
345         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
346                 src_pte = huge_pte_offset(src, addr);
347                 if (!src_pte)
348                         continue;
349                 dst_pte = huge_pte_alloc(dst, addr);
350                 if (!dst_pte)
351                         goto nomem;
352                 spin_lock(&dst->page_table_lock);
353                 spin_lock(&src->page_table_lock);
354                 if (!pte_none(*src_pte)) {
355                         if (cow)
356                                 ptep_set_wrprotect(src, addr, src_pte);
357                         entry = *src_pte;
358                         ptepage = pte_page(entry);
359                         get_page(ptepage);
360                         set_huge_pte_at(dst, addr, dst_pte, entry);
361                 }
362                 spin_unlock(&src->page_table_lock);
363                 spin_unlock(&dst->page_table_lock);
364         }
365         return 0;
366
367 nomem:
368         return -ENOMEM;
369 }
370
371 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
372                             unsigned long end)
373 {
374         struct mm_struct *mm = vma->vm_mm;
375         unsigned long address;
376         pte_t *ptep;
377         pte_t pte;
378         struct page *page;
379         struct page *tmp;
380         /*
381          * A page gathering list, protected by per file i_mmap_lock. The
382          * lock is used to avoid list corruption from multiple unmapping
383          * of the same page since we are using page->lru.
384          */
385         LIST_HEAD(page_list);
386
387         WARN_ON(!is_vm_hugetlb_page(vma));
388         BUG_ON(start & ~HPAGE_MASK);
389         BUG_ON(end & ~HPAGE_MASK);
390
391         spin_lock(&mm->page_table_lock);
392         for (address = start; address < end; address += HPAGE_SIZE) {
393                 ptep = huge_pte_offset(mm, address);
394                 if (!ptep)
395                         continue;
396
397                 if (huge_pmd_unshare(mm, &address, ptep))
398                         continue;
399
400                 pte = huge_ptep_get_and_clear(mm, address, ptep);
401                 if (pte_none(pte))
402                         continue;
403
404                 page = pte_page(pte);
405                 if (pte_dirty(pte))
406                         set_page_dirty(page);
407                 list_add(&page->lru, &page_list);
408         }
409         spin_unlock(&mm->page_table_lock);
410         flush_tlb_range(vma, start, end);
411         list_for_each_entry_safe(page, tmp, &page_list, lru) {
412                 list_del(&page->lru);
413                 put_page(page);
414         }
415 }
416
417 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
418                           unsigned long end)
419 {
420         /*
421          * It is undesirable to test vma->vm_file as it should be non-null
422          * for valid hugetlb area. However, vm_file will be NULL in the error
423          * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
424          * do_mmap_pgoff() nullifies vma->vm_file before calling this function
425          * to clean up. Since no pte has actually been setup, it is safe to
426          * do nothing in this case.
427          */
428         if (vma->vm_file) {
429                 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
430                 __unmap_hugepage_range(vma, start, end);
431                 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
432         }
433 }
434
435 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
436                         unsigned long address, pte_t *ptep, pte_t pte)
437 {
438         struct page *old_page, *new_page;
439         int avoidcopy;
440
441         old_page = pte_page(pte);
442
443         /* If no-one else is actually using this page, avoid the copy
444          * and just make the page writable */
445         avoidcopy = (page_count(old_page) == 1);
446         if (avoidcopy) {
447                 set_huge_ptep_writable(vma, address, ptep);
448                 return VM_FAULT_MINOR;
449         }
450
451         page_cache_get(old_page);
452         new_page = alloc_huge_page(vma, address);
453
454         if (!new_page) {
455                 page_cache_release(old_page);
456                 return VM_FAULT_OOM;
457         }
458
459         spin_unlock(&mm->page_table_lock);
460         copy_huge_page(new_page, old_page, address, vma);
461         spin_lock(&mm->page_table_lock);
462
463         ptep = huge_pte_offset(mm, address & HPAGE_MASK);
464         if (likely(pte_same(*ptep, pte))) {
465                 /* Break COW */
466                 set_huge_pte_at(mm, address, ptep,
467                                 make_huge_pte(vma, new_page, 1));
468                 /* Make the old page be freed below */
469                 new_page = old_page;
470         }
471         page_cache_release(new_page);
472         page_cache_release(old_page);
473         return VM_FAULT_MINOR;
474 }
475
476 int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
477                         unsigned long address, pte_t *ptep, int write_access)
478 {
479         int ret = VM_FAULT_SIGBUS;
480         unsigned long idx;
481         unsigned long size;
482         struct page *page;
483         struct address_space *mapping;
484         pte_t new_pte;
485
486         mapping = vma->vm_file->f_mapping;
487         idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
488                 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
489
490         /*
491          * Use page lock to guard against racing truncation
492          * before we get page_table_lock.
493          */
494 retry:
495         page = find_lock_page(mapping, idx);
496         if (!page) {
497                 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
498                 if (idx >= size)
499                         goto out;
500                 if (hugetlb_get_quota(mapping))
501                         goto out;
502                 page = alloc_huge_page(vma, address);
503                 if (!page) {
504                         hugetlb_put_quota(mapping);
505                         ret = VM_FAULT_OOM;
506                         goto out;
507                 }
508                 clear_huge_page(page, address);
509
510                 if (vma->vm_flags & VM_SHARED) {
511                         int err;
512
513                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
514                         if (err) {
515                                 put_page(page);
516                                 hugetlb_put_quota(mapping);
517                                 if (err == -EEXIST)
518                                         goto retry;
519                                 goto out;
520                         }
521                 } else
522                         lock_page(page);
523         }
524
525         spin_lock(&mm->page_table_lock);
526         size = i_size_read(mapping->host) >> HPAGE_SHIFT;
527         if (idx >= size)
528                 goto backout;
529
530         ret = VM_FAULT_MINOR;
531         if (!pte_none(*ptep))
532                 goto backout;
533
534         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
535                                 && (vma->vm_flags & VM_SHARED)));
536         set_huge_pte_at(mm, address, ptep, new_pte);
537
538         if (write_access && !(vma->vm_flags & VM_SHARED)) {
539                 /* Optimization, do the COW without a second fault */
540                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
541         }
542
543         spin_unlock(&mm->page_table_lock);
544         unlock_page(page);
545 out:
546         return ret;
547
548 backout:
549         spin_unlock(&mm->page_table_lock);
550         hugetlb_put_quota(mapping);
551         unlock_page(page);
552         put_page(page);
553         goto out;
554 }
555
556 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
557                         unsigned long address, int write_access)
558 {
559         pte_t *ptep;
560         pte_t entry;
561         int ret;
562         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
563
564         ptep = huge_pte_alloc(mm, address);
565         if (!ptep)
566                 return VM_FAULT_OOM;
567
568         /*
569          * Serialize hugepage allocation and instantiation, so that we don't
570          * get spurious allocation failures if two CPUs race to instantiate
571          * the same page in the page cache.
572          */
573         mutex_lock(&hugetlb_instantiation_mutex);
574         entry = *ptep;
575         if (pte_none(entry)) {
576                 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
577                 mutex_unlock(&hugetlb_instantiation_mutex);
578                 return ret;
579         }
580
581         ret = VM_FAULT_MINOR;
582
583         spin_lock(&mm->page_table_lock);
584         /* Check for a racing update before calling hugetlb_cow */
585         if (likely(pte_same(entry, *ptep)))
586                 if (write_access && !pte_write(entry))
587                         ret = hugetlb_cow(mm, vma, address, ptep, entry);
588         spin_unlock(&mm->page_table_lock);
589         mutex_unlock(&hugetlb_instantiation_mutex);
590
591         return ret;
592 }
593
594 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
595                         struct page **pages, struct vm_area_struct **vmas,
596                         unsigned long *position, int *length, int i)
597 {
598         unsigned long pfn_offset;
599         unsigned long vaddr = *position;
600         int remainder = *length;
601
602         spin_lock(&mm->page_table_lock);
603         while (vaddr < vma->vm_end && remainder) {
604                 pte_t *pte;
605                 struct page *page;
606
607                 /*
608                  * Some archs (sparc64, sh*) have multiple pte_ts to
609                  * each hugepage.  We have to make * sure we get the
610                  * first, for the page indexing below to work.
611                  */
612                 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
613
614                 if (!pte || pte_none(*pte)) {
615                         int ret;
616
617                         spin_unlock(&mm->page_table_lock);
618                         ret = hugetlb_fault(mm, vma, vaddr, 0);
619                         spin_lock(&mm->page_table_lock);
620                         if (ret == VM_FAULT_MINOR)
621                                 continue;
622
623                         remainder = 0;
624                         if (!i)
625                                 i = -EFAULT;
626                         break;
627                 }
628
629                 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
630                 page = pte_page(*pte);
631 same_page:
632                 if (pages) {
633                         get_page(page);
634                         pages[i] = page + pfn_offset;
635                 }
636
637                 if (vmas)
638                         vmas[i] = vma;
639
640                 vaddr += PAGE_SIZE;
641                 ++pfn_offset;
642                 --remainder;
643                 ++i;
644                 if (vaddr < vma->vm_end && remainder &&
645                                 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
646                         /*
647                          * We use pfn_offset to avoid touching the pageframes
648                          * of this compound page.
649                          */
650                         goto same_page;
651                 }
652         }
653         spin_unlock(&mm->page_table_lock);
654         *length = remainder;
655         *position = vaddr;
656
657         return i;
658 }
659
660 void hugetlb_change_protection(struct vm_area_struct *vma,
661                 unsigned long address, unsigned long end, pgprot_t newprot)
662 {
663         struct mm_struct *mm = vma->vm_mm;
664         unsigned long start = address;
665         pte_t *ptep;
666         pte_t pte;
667
668         BUG_ON(address >= end);
669         flush_cache_range(vma, address, end);
670
671         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
672         spin_lock(&mm->page_table_lock);
673         for (; address < end; address += HPAGE_SIZE) {
674                 ptep = huge_pte_offset(mm, address);
675                 if (!ptep)
676                         continue;
677                 if (huge_pmd_unshare(mm, &address, ptep))
678                         continue;
679                 if (!pte_none(*ptep)) {
680                         pte = huge_ptep_get_and_clear(mm, address, ptep);
681                         pte = pte_mkhuge(pte_modify(pte, newprot));
682                         set_huge_pte_at(mm, address, ptep, pte);
683                         lazy_mmu_prot_update(pte);
684                 }
685         }
686         spin_unlock(&mm->page_table_lock);
687         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
688
689         flush_tlb_range(vma, start, end);
690 }
691
692 struct file_region {
693         struct list_head link;
694         long from;
695         long to;
696 };
697
698 static long region_add(struct list_head *head, long f, long t)
699 {
700         struct file_region *rg, *nrg, *trg;
701
702         /* Locate the region we are either in or before. */
703         list_for_each_entry(rg, head, link)
704                 if (f <= rg->to)
705                         break;
706
707         /* Round our left edge to the current segment if it encloses us. */
708         if (f > rg->from)
709                 f = rg->from;
710
711         /* Check for and consume any regions we now overlap with. */
712         nrg = rg;
713         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
714                 if (&rg->link == head)
715                         break;
716                 if (rg->from > t)
717                         break;
718
719                 /* If this area reaches higher then extend our area to
720                  * include it completely.  If this is not the first area
721                  * which we intend to reuse, free it. */
722                 if (rg->to > t)
723                         t = rg->to;
724                 if (rg != nrg) {
725                         list_del(&rg->link);
726                         kfree(rg);
727                 }
728         }
729         nrg->from = f;
730         nrg->to = t;
731         return 0;
732 }
733
734 static long region_chg(struct list_head *head, long f, long t)
735 {
736         struct file_region *rg, *nrg;
737         long chg = 0;
738
739         /* Locate the region we are before or in. */
740         list_for_each_entry(rg, head, link)
741                 if (f <= rg->to)
742                         break;
743
744         /* If we are below the current region then a new region is required.
745          * Subtle, allocate a new region at the position but make it zero
746          * size such that we can guarentee to record the reservation. */
747         if (&rg->link == head || t < rg->from) {
748                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
749                 if (nrg == 0)
750                         return -ENOMEM;
751                 nrg->from = f;
752                 nrg->to   = f;
753                 INIT_LIST_HEAD(&nrg->link);
754                 list_add(&nrg->link, rg->link.prev);
755
756                 return t - f;
757         }
758
759         /* Round our left edge to the current segment if it encloses us. */
760         if (f > rg->from)
761                 f = rg->from;
762         chg = t - f;
763
764         /* Check for and consume any regions we now overlap with. */
765         list_for_each_entry(rg, rg->link.prev, link) {
766                 if (&rg->link == head)
767                         break;
768                 if (rg->from > t)
769                         return chg;
770
771                 /* We overlap with this area, if it extends futher than
772                  * us then we must extend ourselves.  Account for its
773                  * existing reservation. */
774                 if (rg->to > t) {
775                         chg += rg->to - t;
776                         t = rg->to;
777                 }
778                 chg -= rg->to - rg->from;
779         }
780         return chg;
781 }
782
783 static long region_truncate(struct list_head *head, long end)
784 {
785         struct file_region *rg, *trg;
786         long chg = 0;
787
788         /* Locate the region we are either in or before. */
789         list_for_each_entry(rg, head, link)
790                 if (end <= rg->to)
791                         break;
792         if (&rg->link == head)
793                 return 0;
794
795         /* If we are in the middle of a region then adjust it. */
796         if (end > rg->from) {
797                 chg = rg->to - end;
798                 rg->to = end;
799                 rg = list_entry(rg->link.next, typeof(*rg), link);
800         }
801
802         /* Drop any remaining regions. */
803         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
804                 if (&rg->link == head)
805                         break;
806                 chg += rg->to - rg->from;
807                 list_del(&rg->link);
808                 kfree(rg);
809         }
810         return chg;
811 }
812
813 static int hugetlb_acct_memory(long delta)
814 {
815         int ret = -ENOMEM;
816
817         spin_lock(&hugetlb_lock);
818         if ((delta + resv_huge_pages) <= free_huge_pages) {
819                 resv_huge_pages += delta;
820                 ret = 0;
821         }
822         spin_unlock(&hugetlb_lock);
823         return ret;
824 }
825
826 int hugetlb_reserve_pages(struct inode *inode, long from, long to)
827 {
828         long ret, chg;
829
830         chg = region_chg(&inode->i_mapping->private_list, from, to);
831         if (chg < 0)
832                 return chg;
833         /*
834          * When cpuset is configured, it breaks the strict hugetlb page
835          * reservation as the accounting is done on a global variable. Such
836          * reservation is completely rubbish in the presence of cpuset because
837          * the reservation is not checked against page availability for the
838          * current cpuset. Application can still potentially OOM'ed by kernel
839          * with lack of free htlb page in cpuset that the task is in.
840          * Attempt to enforce strict accounting with cpuset is almost
841          * impossible (or too ugly) because cpuset is too fluid that
842          * task or memory node can be dynamically moved between cpusets.
843          *
844          * The change of semantics for shared hugetlb mapping with cpuset is
845          * undesirable. However, in order to preserve some of the semantics,
846          * we fall back to check against current free page availability as
847          * a best attempt and hopefully to minimize the impact of changing
848          * semantics that cpuset has.
849          */
850         if (chg > cpuset_mems_nr(free_huge_pages_node))
851                 return -ENOMEM;
852
853         ret = hugetlb_acct_memory(chg);
854         if (ret < 0)
855                 return ret;
856         region_add(&inode->i_mapping->private_list, from, to);
857         return 0;
858 }
859
860 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
861 {
862         long chg = region_truncate(&inode->i_mapping->private_list, offset);
863         hugetlb_acct_memory(freed - chg);
864 }