vma_adjust: fix the copying of anon_vma chains
[linux-2.6.git] / mm / mmap.c
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code        <alan@lxorguk.ukuu.org.uk>
7  */
8
9 #include <linux/slab.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mm.h>
12 #include <linux/shm.h>
13 #include <linux/mman.h>
14 #include <linux/pagemap.h>
15 #include <linux/swap.h>
16 #include <linux/syscalls.h>
17 #include <linux/capability.h>
18 #include <linux/init.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/personality.h>
22 #include <linux/security.h>
23 #include <linux/hugetlb.h>
24 #include <linux/profile.h>
25 #include <linux/module.h>
26 #include <linux/mount.h>
27 #include <linux/mempolicy.h>
28 #include <linux/rmap.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/perf_event.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/cacheflush.h>
34 #include <asm/tlb.h>
35 #include <asm/mmu_context.h>
36
37 #include "internal.h"
38
39 #ifndef arch_mmap_check
40 #define arch_mmap_check(addr, len, flags)       (0)
41 #endif
42
43 #ifndef arch_rebalance_pgtables
44 #define arch_rebalance_pgtables(addr, len)              (addr)
45 #endif
46
47 static void unmap_region(struct mm_struct *mm,
48                 struct vm_area_struct *vma, struct vm_area_struct *prev,
49                 unsigned long start, unsigned long end);
50
51 /*
52  * WARNING: the debugging will use recursive algorithms so never enable this
53  * unless you know what you are doing.
54  */
55 #undef DEBUG_MM_RB
56
57 /* description of effects of mapping type and prot in current implementation.
58  * this is due to the limited x86 page protection hardware.  The expected
59  * behavior is in parens:
60  *
61  * map_type     prot
62  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
63  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
64  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
65  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
66  *              
67  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
68  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
69  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
70  *
71  */
72 pgprot_t protection_map[16] = {
73         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
74         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
75 };
76
77 pgprot_t vm_get_page_prot(unsigned long vm_flags)
78 {
79         return __pgprot(pgprot_val(protection_map[vm_flags &
80                                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
81                         pgprot_val(arch_vm_get_page_prot(vm_flags)));
82 }
83 EXPORT_SYMBOL(vm_get_page_prot);
84
85 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
86 int sysctl_overcommit_ratio = 50;       /* default is 50% */
87 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
88 struct percpu_counter vm_committed_as;
89
90 /*
91  * Check that a process has enough memory to allocate a new virtual
92  * mapping. 0 means there is enough memory for the allocation to
93  * succeed and -ENOMEM implies there is not.
94  *
95  * We currently support three overcommit policies, which are set via the
96  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
97  *
98  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
99  * Additional code 2002 Jul 20 by Robert Love.
100  *
101  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
102  *
103  * Note this is a helper function intended to be used by LSMs which
104  * wish to use this logic.
105  */
106 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
107 {
108         unsigned long free, allowed;
109
110         vm_acct_memory(pages);
111
112         /*
113          * Sometimes we want to use more memory than we have
114          */
115         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
116                 return 0;
117
118         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
119                 unsigned long n;
120
121                 free = global_page_state(NR_FILE_PAGES);
122                 free += nr_swap_pages;
123
124                 /*
125                  * Any slabs which are created with the
126                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
127                  * which are reclaimable, under pressure.  The dentry
128                  * cache and most inode caches should fall into this
129                  */
130                 free += global_page_state(NR_SLAB_RECLAIMABLE);
131
132                 /*
133                  * Leave the last 3% for root
134                  */
135                 if (!cap_sys_admin)
136                         free -= free / 32;
137
138                 if (free > pages)
139                         return 0;
140
141                 /*
142                  * nr_free_pages() is very expensive on large systems,
143                  * only call if we're about to fail.
144                  */
145                 n = nr_free_pages();
146
147                 /*
148                  * Leave reserved pages. The pages are not for anonymous pages.
149                  */
150                 if (n <= totalreserve_pages)
151                         goto error;
152                 else
153                         n -= totalreserve_pages;
154
155                 /*
156                  * Leave the last 3% for root
157                  */
158                 if (!cap_sys_admin)
159                         n -= n / 32;
160                 free += n;
161
162                 if (free > pages)
163                         return 0;
164
165                 goto error;
166         }
167
168         allowed = (totalram_pages - hugetlb_total_pages())
169                 * sysctl_overcommit_ratio / 100;
170         /*
171          * Leave the last 3% for root
172          */
173         if (!cap_sys_admin)
174                 allowed -= allowed / 32;
175         allowed += total_swap_pages;
176
177         /* Don't let a single process grow too big:
178            leave 3% of the size of this process for other processes */
179         if (mm)
180                 allowed -= mm->total_vm / 32;
181
182         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
183                 return 0;
184 error:
185         vm_unacct_memory(pages);
186
187         return -ENOMEM;
188 }
189
190 /*
191  * Requires inode->i_mapping->i_mmap_lock
192  */
193 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
194                 struct file *file, struct address_space *mapping)
195 {
196         if (vma->vm_flags & VM_DENYWRITE)
197                 atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
198         if (vma->vm_flags & VM_SHARED)
199                 mapping->i_mmap_writable--;
200
201         flush_dcache_mmap_lock(mapping);
202         if (unlikely(vma->vm_flags & VM_NONLINEAR))
203                 list_del_init(&vma->shared.vm_set.list);
204         else
205                 vma_prio_tree_remove(vma, &mapping->i_mmap);
206         flush_dcache_mmap_unlock(mapping);
207 }
208
209 /*
210  * Unlink a file-based vm structure from its prio_tree, to hide
211  * vma from rmap and vmtruncate before freeing its page tables.
212  */
213 void unlink_file_vma(struct vm_area_struct *vma)
214 {
215         struct file *file = vma->vm_file;
216
217         if (file) {
218                 struct address_space *mapping = file->f_mapping;
219                 spin_lock(&mapping->i_mmap_lock);
220                 __remove_shared_vm_struct(vma, file, mapping);
221                 spin_unlock(&mapping->i_mmap_lock);
222         }
223 }
224
225 /*
226  * Close a vm structure and free it, returning the next.
227  */
228 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
229 {
230         struct vm_area_struct *next = vma->vm_next;
231
232         might_sleep();
233         if (vma->vm_ops && vma->vm_ops->close)
234                 vma->vm_ops->close(vma);
235         if (vma->vm_file) {
236                 fput(vma->vm_file);
237                 if (vma->vm_flags & VM_EXECUTABLE)
238                         removed_exe_file_vma(vma->vm_mm);
239         }
240         mpol_put(vma_policy(vma));
241         kmem_cache_free(vm_area_cachep, vma);
242         return next;
243 }
244
245 SYSCALL_DEFINE1(brk, unsigned long, brk)
246 {
247         unsigned long rlim, retval;
248         unsigned long newbrk, oldbrk;
249         struct mm_struct *mm = current->mm;
250         unsigned long min_brk;
251
252         down_write(&mm->mmap_sem);
253
254 #ifdef CONFIG_COMPAT_BRK
255         min_brk = mm->end_code;
256 #else
257         min_brk = mm->start_brk;
258 #endif
259         if (brk < min_brk)
260                 goto out;
261
262         /*
263          * Check against rlimit here. If this check is done later after the test
264          * of oldbrk with newbrk then it can escape the test and let the data
265          * segment grow beyond its set limit the in case where the limit is
266          * not page aligned -Ram Gupta
267          */
268         rlim = rlimit(RLIMIT_DATA);
269         if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
270                         (mm->end_data - mm->start_data) > rlim)
271                 goto out;
272
273         newbrk = PAGE_ALIGN(brk);
274         oldbrk = PAGE_ALIGN(mm->brk);
275         if (oldbrk == newbrk)
276                 goto set_brk;
277
278         /* Always allow shrinking brk. */
279         if (brk <= mm->brk) {
280                 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
281                         goto set_brk;
282                 goto out;
283         }
284
285         /* Check against existing mmap mappings. */
286         if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
287                 goto out;
288
289         /* Ok, looks good - let it rip. */
290         if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
291                 goto out;
292 set_brk:
293         mm->brk = brk;
294 out:
295         retval = mm->brk;
296         up_write(&mm->mmap_sem);
297         return retval;
298 }
299
300 #ifdef DEBUG_MM_RB
301 static int browse_rb(struct rb_root *root)
302 {
303         int i = 0, j;
304         struct rb_node *nd, *pn = NULL;
305         unsigned long prev = 0, pend = 0;
306
307         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
308                 struct vm_area_struct *vma;
309                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
310                 if (vma->vm_start < prev)
311                         printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1;
312                 if (vma->vm_start < pend)
313                         printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
314                 if (vma->vm_start > vma->vm_end)
315                         printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start);
316                 i++;
317                 pn = nd;
318                 prev = vma->vm_start;
319                 pend = vma->vm_end;
320         }
321         j = 0;
322         for (nd = pn; nd; nd = rb_prev(nd)) {
323                 j++;
324         }
325         if (i != j)
326                 printk("backwards %d, forwards %d\n", j, i), i = 0;
327         return i;
328 }
329
330 void validate_mm(struct mm_struct *mm)
331 {
332         int bug = 0;
333         int i = 0;
334         struct vm_area_struct *tmp = mm->mmap;
335         while (tmp) {
336                 tmp = tmp->vm_next;
337                 i++;
338         }
339         if (i != mm->map_count)
340                 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1;
341         i = browse_rb(&mm->mm_rb);
342         if (i != mm->map_count)
343                 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
344         BUG_ON(bug);
345 }
346 #else
347 #define validate_mm(mm) do { } while (0)
348 #endif
349
350 static struct vm_area_struct *
351 find_vma_prepare(struct mm_struct *mm, unsigned long addr,
352                 struct vm_area_struct **pprev, struct rb_node ***rb_link,
353                 struct rb_node ** rb_parent)
354 {
355         struct vm_area_struct * vma;
356         struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
357
358         __rb_link = &mm->mm_rb.rb_node;
359         rb_prev = __rb_parent = NULL;
360         vma = NULL;
361
362         while (*__rb_link) {
363                 struct vm_area_struct *vma_tmp;
364
365                 __rb_parent = *__rb_link;
366                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
367
368                 if (vma_tmp->vm_end > addr) {
369                         vma = vma_tmp;
370                         if (vma_tmp->vm_start <= addr)
371                                 break;
372                         __rb_link = &__rb_parent->rb_left;
373                 } else {
374                         rb_prev = __rb_parent;
375                         __rb_link = &__rb_parent->rb_right;
376                 }
377         }
378
379         *pprev = NULL;
380         if (rb_prev)
381                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
382         *rb_link = __rb_link;
383         *rb_parent = __rb_parent;
384         return vma;
385 }
386
387 static inline void
388 __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
389                 struct vm_area_struct *prev, struct rb_node *rb_parent)
390 {
391         if (prev) {
392                 vma->vm_next = prev->vm_next;
393                 prev->vm_next = vma;
394         } else {
395                 mm->mmap = vma;
396                 if (rb_parent)
397                         vma->vm_next = rb_entry(rb_parent,
398                                         struct vm_area_struct, vm_rb);
399                 else
400                         vma->vm_next = NULL;
401         }
402 }
403
404 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
405                 struct rb_node **rb_link, struct rb_node *rb_parent)
406 {
407         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
408         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
409 }
410
411 static void __vma_link_file(struct vm_area_struct *vma)
412 {
413         struct file *file;
414
415         file = vma->vm_file;
416         if (file) {
417                 struct address_space *mapping = file->f_mapping;
418
419                 if (vma->vm_flags & VM_DENYWRITE)
420                         atomic_dec(&file->f_path.dentry->d_inode->i_writecount);
421                 if (vma->vm_flags & VM_SHARED)
422                         mapping->i_mmap_writable++;
423
424                 flush_dcache_mmap_lock(mapping);
425                 if (unlikely(vma->vm_flags & VM_NONLINEAR))
426                         vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
427                 else
428                         vma_prio_tree_insert(vma, &mapping->i_mmap);
429                 flush_dcache_mmap_unlock(mapping);
430         }
431 }
432
433 static void
434 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
435         struct vm_area_struct *prev, struct rb_node **rb_link,
436         struct rb_node *rb_parent)
437 {
438         __vma_link_list(mm, vma, prev, rb_parent);
439         __vma_link_rb(mm, vma, rb_link, rb_parent);
440 }
441
442 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
443                         struct vm_area_struct *prev, struct rb_node **rb_link,
444                         struct rb_node *rb_parent)
445 {
446         struct address_space *mapping = NULL;
447
448         if (vma->vm_file)
449                 mapping = vma->vm_file->f_mapping;
450
451         if (mapping) {
452                 spin_lock(&mapping->i_mmap_lock);
453                 vma->vm_truncate_count = mapping->truncate_count;
454         }
455         anon_vma_lock(vma);
456
457         __vma_link(mm, vma, prev, rb_link, rb_parent);
458         __vma_link_file(vma);
459
460         anon_vma_unlock(vma);
461         if (mapping)
462                 spin_unlock(&mapping->i_mmap_lock);
463
464         mm->map_count++;
465         validate_mm(mm);
466 }
467
468 /*
469  * Helper for vma_adjust in the split_vma insert case:
470  * insert vm structure into list and rbtree and anon_vma,
471  * but it has already been inserted into prio_tree earlier.
472  */
473 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
474 {
475         struct vm_area_struct *__vma, *prev;
476         struct rb_node **rb_link, *rb_parent;
477
478         __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
479         BUG_ON(__vma && __vma->vm_start < vma->vm_end);
480         __vma_link(mm, vma, prev, rb_link, rb_parent);
481         mm->map_count++;
482 }
483
484 static inline void
485 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
486                 struct vm_area_struct *prev)
487 {
488         prev->vm_next = vma->vm_next;
489         rb_erase(&vma->vm_rb, &mm->mm_rb);
490         if (mm->mmap_cache == vma)
491                 mm->mmap_cache = prev;
492 }
493
494 /*
495  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
496  * is already present in an i_mmap tree without adjusting the tree.
497  * The following helper function should be used when such adjustments
498  * are necessary.  The "insert" vma (if any) is to be inserted
499  * before we drop the necessary locks.
500  */
501 int vma_adjust(struct vm_area_struct *vma, unsigned long start,
502         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
503 {
504         struct mm_struct *mm = vma->vm_mm;
505         struct vm_area_struct *next = vma->vm_next;
506         struct vm_area_struct *importer = NULL;
507         struct address_space *mapping = NULL;
508         struct prio_tree_root *root = NULL;
509         struct file *file = vma->vm_file;
510         long adjust_next = 0;
511         int remove_next = 0;
512
513         if (next && !insert) {
514                 struct vm_area_struct *exporter = NULL;
515
516                 if (end >= next->vm_end) {
517                         /*
518                          * vma expands, overlapping all the next, and
519                          * perhaps the one after too (mprotect case 6).
520                          */
521 again:                  remove_next = 1 + (end > next->vm_end);
522                         end = next->vm_end;
523                         exporter = next;
524                         importer = vma;
525                 } else if (end > next->vm_start) {
526                         /*
527                          * vma expands, overlapping part of the next:
528                          * mprotect case 5 shifting the boundary up.
529                          */
530                         adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
531                         exporter = next;
532                         importer = vma;
533                 } else if (end < vma->vm_end) {
534                         /*
535                          * vma shrinks, and !insert tells it's not
536                          * split_vma inserting another: so it must be
537                          * mprotect case 4 shifting the boundary down.
538                          */
539                         adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
540                         exporter = vma;
541                         importer = next;
542                 }
543
544                 /*
545                  * Easily overlooked: when mprotect shifts the boundary,
546                  * make sure the expanding vma has anon_vma set if the
547                  * shrinking vma had, to cover any anon pages imported.
548                  */
549                 if (exporter && exporter->anon_vma && !importer->anon_vma) {
550                         if (anon_vma_clone(importer, exporter))
551                                 return -ENOMEM;
552                         importer->anon_vma = exporter->anon_vma;
553                 }
554         }
555
556         if (file) {
557                 mapping = file->f_mapping;
558                 if (!(vma->vm_flags & VM_NONLINEAR))
559                         root = &mapping->i_mmap;
560                 spin_lock(&mapping->i_mmap_lock);
561                 if (importer &&
562                     vma->vm_truncate_count != next->vm_truncate_count) {
563                         /*
564                          * unmap_mapping_range might be in progress:
565                          * ensure that the expanding vma is rescanned.
566                          */
567                         importer->vm_truncate_count = 0;
568                 }
569                 if (insert) {
570                         insert->vm_truncate_count = vma->vm_truncate_count;
571                         /*
572                          * Put into prio_tree now, so instantiated pages
573                          * are visible to arm/parisc __flush_dcache_page
574                          * throughout; but we cannot insert into address
575                          * space until vma start or end is updated.
576                          */
577                         __vma_link_file(insert);
578                 }
579         }
580
581         if (root) {
582                 flush_dcache_mmap_lock(mapping);
583                 vma_prio_tree_remove(vma, root);
584                 if (adjust_next)
585                         vma_prio_tree_remove(next, root);
586         }
587
588         vma->vm_start = start;
589         vma->vm_end = end;
590         vma->vm_pgoff = pgoff;
591         if (adjust_next) {
592                 next->vm_start += adjust_next << PAGE_SHIFT;
593                 next->vm_pgoff += adjust_next;
594         }
595
596         if (root) {
597                 if (adjust_next)
598                         vma_prio_tree_insert(next, root);
599                 vma_prio_tree_insert(vma, root);
600                 flush_dcache_mmap_unlock(mapping);
601         }
602
603         if (remove_next) {
604                 /*
605                  * vma_merge has merged next into vma, and needs
606                  * us to remove next before dropping the locks.
607                  */
608                 __vma_unlink(mm, next, vma);
609                 if (file)
610                         __remove_shared_vm_struct(next, file, mapping);
611         } else if (insert) {
612                 /*
613                  * split_vma has split insert from vma, and needs
614                  * us to insert it before dropping the locks
615                  * (it may either follow vma or precede it).
616                  */
617                 __insert_vm_struct(mm, insert);
618         }
619
620         if (mapping)
621                 spin_unlock(&mapping->i_mmap_lock);
622
623         if (remove_next) {
624                 if (file) {
625                         fput(file);
626                         if (next->vm_flags & VM_EXECUTABLE)
627                                 removed_exe_file_vma(mm);
628                 }
629                 if (next->anon_vma)
630                         anon_vma_merge(vma, next);
631                 mm->map_count--;
632                 mpol_put(vma_policy(next));
633                 kmem_cache_free(vm_area_cachep, next);
634                 /*
635                  * In mprotect's case 6 (see comments on vma_merge),
636                  * we must remove another next too. It would clutter
637                  * up the code too much to do both in one go.
638                  */
639                 if (remove_next == 2) {
640                         next = vma->vm_next;
641                         goto again;
642                 }
643         }
644
645         validate_mm(mm);
646
647         return 0;
648 }
649
650 /*
651  * If the vma has a ->close operation then the driver probably needs to release
652  * per-vma resources, so we don't attempt to merge those.
653  */
654 static inline int is_mergeable_vma(struct vm_area_struct *vma,
655                         struct file *file, unsigned long vm_flags)
656 {
657         /* VM_CAN_NONLINEAR may get set later by f_op->mmap() */
658         if ((vma->vm_flags ^ vm_flags) & ~VM_CAN_NONLINEAR)
659                 return 0;
660         if (vma->vm_file != file)
661                 return 0;
662         if (vma->vm_ops && vma->vm_ops->close)
663                 return 0;
664         return 1;
665 }
666
667 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
668                                         struct anon_vma *anon_vma2)
669 {
670         return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2);
671 }
672
673 /*
674  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
675  * in front of (at a lower virtual address and file offset than) the vma.
676  *
677  * We cannot merge two vmas if they have differently assigned (non-NULL)
678  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
679  *
680  * We don't check here for the merged mmap wrapping around the end of pagecache
681  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
682  * wrap, nor mmaps which cover the final page at index -1UL.
683  */
684 static int
685 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
686         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
687 {
688         if (is_mergeable_vma(vma, file, vm_flags) &&
689             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
690                 if (vma->vm_pgoff == vm_pgoff)
691                         return 1;
692         }
693         return 0;
694 }
695
696 /*
697  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
698  * beyond (at a higher virtual address and file offset than) the vma.
699  *
700  * We cannot merge two vmas if they have differently assigned (non-NULL)
701  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
702  */
703 static int
704 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
705         struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
706 {
707         if (is_mergeable_vma(vma, file, vm_flags) &&
708             is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
709                 pgoff_t vm_pglen;
710                 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
711                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
712                         return 1;
713         }
714         return 0;
715 }
716
717 /*
718  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
719  * whether that can be merged with its predecessor or its successor.
720  * Or both (it neatly fills a hole).
721  *
722  * In most cases - when called for mmap, brk or mremap - [addr,end) is
723  * certain not to be mapped by the time vma_merge is called; but when
724  * called for mprotect, it is certain to be already mapped (either at
725  * an offset within prev, or at the start of next), and the flags of
726  * this area are about to be changed to vm_flags - and the no-change
727  * case has already been eliminated.
728  *
729  * The following mprotect cases have to be considered, where AAAA is
730  * the area passed down from mprotect_fixup, never extending beyond one
731  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
732  *
733  *     AAAA             AAAA                AAAA          AAAA
734  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
735  *    cannot merge    might become    might become    might become
736  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
737  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
738  *    mremap move:                                    PPPPNNNNNNNN 8
739  *        AAAA
740  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
741  *    might become    case 1 below    case 2 below    case 3 below
742  *
743  * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
744  * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
745  */
746 struct vm_area_struct *vma_merge(struct mm_struct *mm,
747                         struct vm_area_struct *prev, unsigned long addr,
748                         unsigned long end, unsigned long vm_flags,
749                         struct anon_vma *anon_vma, struct file *file,
750                         pgoff_t pgoff, struct mempolicy *policy)
751 {
752         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
753         struct vm_area_struct *area, *next;
754         int err;
755
756         /*
757          * We later require that vma->vm_flags == vm_flags,
758          * so this tests vma->vm_flags & VM_SPECIAL, too.
759          */
760         if (vm_flags & VM_SPECIAL)
761                 return NULL;
762
763         if (prev)
764                 next = prev->vm_next;
765         else
766                 next = mm->mmap;
767         area = next;
768         if (next && next->vm_end == end)                /* cases 6, 7, 8 */
769                 next = next->vm_next;
770
771         /*
772          * Can it merge with the predecessor?
773          */
774         if (prev && prev->vm_end == addr &&
775                         mpol_equal(vma_policy(prev), policy) &&
776                         can_vma_merge_after(prev, vm_flags,
777                                                 anon_vma, file, pgoff)) {
778                 /*
779                  * OK, it can.  Can we now merge in the successor as well?
780                  */
781                 if (next && end == next->vm_start &&
782                                 mpol_equal(policy, vma_policy(next)) &&
783                                 can_vma_merge_before(next, vm_flags,
784                                         anon_vma, file, pgoff+pglen) &&
785                                 is_mergeable_anon_vma(prev->anon_vma,
786                                                       next->anon_vma)) {
787                                                         /* cases 1, 6 */
788                         err = vma_adjust(prev, prev->vm_start,
789                                 next->vm_end, prev->vm_pgoff, NULL);
790                 } else                                  /* cases 2, 5, 7 */
791                         err = vma_adjust(prev, prev->vm_start,
792                                 end, prev->vm_pgoff, NULL);
793                 if (err)
794                         return NULL;
795                 return prev;
796         }
797
798         /*
799          * Can this new request be merged in front of next?
800          */
801         if (next && end == next->vm_start &&
802                         mpol_equal(policy, vma_policy(next)) &&
803                         can_vma_merge_before(next, vm_flags,
804                                         anon_vma, file, pgoff+pglen)) {
805                 if (prev && addr < prev->vm_end)        /* case 4 */
806                         err = vma_adjust(prev, prev->vm_start,
807                                 addr, prev->vm_pgoff, NULL);
808                 else                                    /* cases 3, 8 */
809                         err = vma_adjust(area, addr, next->vm_end,
810                                 next->vm_pgoff - pglen, NULL);
811                 if (err)
812                         return NULL;
813                 return area;
814         }
815
816         return NULL;
817 }
818
819 /*
820  * Rough compatbility check to quickly see if it's even worth looking
821  * at sharing an anon_vma.
822  *
823  * They need to have the same vm_file, and the flags can only differ
824  * in things that mprotect may change.
825  *
826  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
827  * we can merge the two vma's. For example, we refuse to merge a vma if
828  * there is a vm_ops->close() function, because that indicates that the
829  * driver is doing some kind of reference counting. But that doesn't
830  * really matter for the anon_vma sharing case.
831  */
832 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
833 {
834         return a->vm_end == b->vm_start &&
835                 mpol_equal(vma_policy(a), vma_policy(b)) &&
836                 a->vm_file == b->vm_file &&
837                 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
838                 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
839 }
840
841 /*
842  * Do some basic sanity checking to see if we can re-use the anon_vma
843  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
844  * the same as 'old', the other will be the new one that is trying
845  * to share the anon_vma.
846  *
847  * NOTE! This runs with mm_sem held for reading, so it is possible that
848  * the anon_vma of 'old' is concurrently in the process of being set up
849  * by another page fault trying to merge _that_. But that's ok: if it
850  * is being set up, that automatically means that it will be a singleton
851  * acceptable for merging, so we can do all of this optimistically. But
852  * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
853  *
854  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
855  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
856  * is to return an anon_vma that is "complex" due to having gone through
857  * a fork).
858  *
859  * We also make sure that the two vma's are compatible (adjacent,
860  * and with the same memory policies). That's all stable, even with just
861  * a read lock on the mm_sem.
862  */
863 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
864 {
865         if (anon_vma_compatible(a, b)) {
866                 struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
867
868                 if (anon_vma && list_is_singular(&old->anon_vma_chain))
869                         return anon_vma;
870         }
871         return NULL;
872 }
873
874 /*
875  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
876  * neighbouring vmas for a suitable anon_vma, before it goes off
877  * to allocate a new anon_vma.  It checks because a repetitive
878  * sequence of mprotects and faults may otherwise lead to distinct
879  * anon_vmas being allocated, preventing vma merge in subsequent
880  * mprotect.
881  */
882 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
883 {
884         struct anon_vma *anon_vma;
885         struct vm_area_struct *near;
886
887         near = vma->vm_next;
888         if (!near)
889                 goto try_prev;
890
891         anon_vma = reusable_anon_vma(near, vma, near);
892         if (anon_vma)
893                 return anon_vma;
894 try_prev:
895         /*
896          * It is potentially slow to have to call find_vma_prev here.
897          * But it's only on the first write fault on the vma, not
898          * every time, and we could devise a way to avoid it later
899          * (e.g. stash info in next's anon_vma_node when assigning
900          * an anon_vma, or when trying vma_merge).  Another time.
901          */
902         BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
903         if (!near)
904                 goto none;
905
906         anon_vma = reusable_anon_vma(near, near, vma);
907         if (anon_vma)
908                 return anon_vma;
909 none:
910         /*
911          * There's no absolute need to look only at touching neighbours:
912          * we could search further afield for "compatible" anon_vmas.
913          * But it would probably just be a waste of time searching,
914          * or lead to too many vmas hanging off the same anon_vma.
915          * We're trying to allow mprotect remerging later on,
916          * not trying to minimize memory used for anon_vmas.
917          */
918         return NULL;
919 }
920
921 #ifdef CONFIG_PROC_FS
922 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
923                                                 struct file *file, long pages)
924 {
925         const unsigned long stack_flags
926                 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
927
928         if (file) {
929                 mm->shared_vm += pages;
930                 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
931                         mm->exec_vm += pages;
932         } else if (flags & stack_flags)
933                 mm->stack_vm += pages;
934         if (flags & (VM_RESERVED|VM_IO))
935                 mm->reserved_vm += pages;
936 }
937 #endif /* CONFIG_PROC_FS */
938
939 /*
940  * The caller must hold down_write(&current->mm->mmap_sem).
941  */
942
943 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
944                         unsigned long len, unsigned long prot,
945                         unsigned long flags, unsigned long pgoff)
946 {
947         struct mm_struct * mm = current->mm;
948         struct inode *inode;
949         unsigned int vm_flags;
950         int error;
951         unsigned long reqprot = prot;
952
953         /*
954          * Does the application expect PROT_READ to imply PROT_EXEC?
955          *
956          * (the exception is when the underlying filesystem is noexec
957          *  mounted, in which case we dont add PROT_EXEC.)
958          */
959         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
960                 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
961                         prot |= PROT_EXEC;
962
963         if (!len)
964                 return -EINVAL;
965
966         if (!(flags & MAP_FIXED))
967                 addr = round_hint_to_min(addr);
968
969         /* Careful about overflows.. */
970         len = PAGE_ALIGN(len);
971         if (!len)
972                 return -ENOMEM;
973
974         /* offset overflow? */
975         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
976                return -EOVERFLOW;
977
978         /* Too many mappings? */
979         if (mm->map_count > sysctl_max_map_count)
980                 return -ENOMEM;
981
982         /* Obtain the address to map to. we verify (or select) it and ensure
983          * that it represents a valid section of the address space.
984          */
985         addr = get_unmapped_area(file, addr, len, pgoff, flags);
986         if (addr & ~PAGE_MASK)
987                 return addr;
988
989         /* Do simple checking here so the lower-level routines won't have
990          * to. we assume access permissions have been handled by the open
991          * of the memory object, so we don't do any here.
992          */
993         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
994                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
995
996         if (flags & MAP_LOCKED)
997                 if (!can_do_mlock())
998                         return -EPERM;
999
1000         /* mlock MCL_FUTURE? */
1001         if (vm_flags & VM_LOCKED) {
1002                 unsigned long locked, lock_limit;
1003                 locked = len >> PAGE_SHIFT;
1004                 locked += mm->locked_vm;
1005                 lock_limit = rlimit(RLIMIT_MEMLOCK);
1006                 lock_limit >>= PAGE_SHIFT;
1007                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1008                         return -EAGAIN;
1009         }
1010
1011         inode = file ? file->f_path.dentry->d_inode : NULL;
1012
1013         if (file) {
1014                 switch (flags & MAP_TYPE) {
1015                 case MAP_SHARED:
1016                         if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1017                                 return -EACCES;
1018
1019                         /*
1020                          * Make sure we don't allow writing to an append-only
1021                          * file..
1022                          */
1023                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1024                                 return -EACCES;
1025
1026                         /*
1027                          * Make sure there are no mandatory locks on the file.
1028                          */
1029                         if (locks_verify_locked(inode))
1030                                 return -EAGAIN;
1031
1032                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1033                         if (!(file->f_mode & FMODE_WRITE))
1034                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1035
1036                         /* fall through */
1037                 case MAP_PRIVATE:
1038                         if (!(file->f_mode & FMODE_READ))
1039                                 return -EACCES;
1040                         if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1041                                 if (vm_flags & VM_EXEC)
1042                                         return -EPERM;
1043                                 vm_flags &= ~VM_MAYEXEC;
1044                         }
1045
1046                         if (!file->f_op || !file->f_op->mmap)
1047                                 return -ENODEV;
1048                         break;
1049
1050                 default:
1051                         return -EINVAL;
1052                 }
1053         } else {
1054                 switch (flags & MAP_TYPE) {
1055                 case MAP_SHARED:
1056                         /*
1057                          * Ignore pgoff.
1058                          */
1059                         pgoff = 0;
1060                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1061                         break;
1062                 case MAP_PRIVATE:
1063                         /*
1064                          * Set pgoff according to addr for anon_vma.
1065                          */
1066                         pgoff = addr >> PAGE_SHIFT;
1067                         break;
1068                 default:
1069                         return -EINVAL;
1070                 }
1071         }
1072
1073         error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1074         if (error)
1075                 return error;
1076
1077         return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1078 }
1079 EXPORT_SYMBOL(do_mmap_pgoff);
1080
1081 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1082                 unsigned long, prot, unsigned long, flags,
1083                 unsigned long, fd, unsigned long, pgoff)
1084 {
1085         struct file *file = NULL;
1086         unsigned long retval = -EBADF;
1087
1088         if (!(flags & MAP_ANONYMOUS)) {
1089                 if (unlikely(flags & MAP_HUGETLB))
1090                         return -EINVAL;
1091                 file = fget(fd);
1092                 if (!file)
1093                         goto out;
1094         } else if (flags & MAP_HUGETLB) {
1095                 struct user_struct *user = NULL;
1096                 /*
1097                  * VM_NORESERVE is used because the reservations will be
1098                  * taken when vm_ops->mmap() is called
1099                  * A dummy user value is used because we are not locking
1100                  * memory so no accounting is necessary
1101                  */
1102                 len = ALIGN(len, huge_page_size(&default_hstate));
1103                 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
1104                                                 &user, HUGETLB_ANONHUGE_INODE);
1105                 if (IS_ERR(file))
1106                         return PTR_ERR(file);
1107         }
1108
1109         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1110
1111         down_write(&current->mm->mmap_sem);
1112         retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1113         up_write(&current->mm->mmap_sem);
1114
1115         if (file)
1116                 fput(file);
1117 out:
1118         return retval;
1119 }
1120
1121 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1122 struct mmap_arg_struct {
1123         unsigned long addr;
1124         unsigned long len;
1125         unsigned long prot;
1126         unsigned long flags;
1127         unsigned long fd;
1128         unsigned long offset;
1129 };
1130
1131 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1132 {
1133         struct mmap_arg_struct a;
1134
1135         if (copy_from_user(&a, arg, sizeof(a)))
1136                 return -EFAULT;
1137         if (a.offset & ~PAGE_MASK)
1138                 return -EINVAL;
1139
1140         return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1141                               a.offset >> PAGE_SHIFT);
1142 }
1143 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1144
1145 /*
1146  * Some shared mappigns will want the pages marked read-only
1147  * to track write events. If so, we'll downgrade vm_page_prot
1148  * to the private version (using protection_map[] without the
1149  * VM_SHARED bit).
1150  */
1151 int vma_wants_writenotify(struct vm_area_struct *vma)
1152 {
1153         unsigned int vm_flags = vma->vm_flags;
1154
1155         /* If it was private or non-writable, the write bit is already clear */
1156         if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1157                 return 0;
1158
1159         /* The backer wishes to know when pages are first written to? */
1160         if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1161                 return 1;
1162
1163         /* The open routine did something to the protections already? */
1164         if (pgprot_val(vma->vm_page_prot) !=
1165             pgprot_val(vm_get_page_prot(vm_flags)))
1166                 return 0;
1167
1168         /* Specialty mapping? */
1169         if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE))
1170                 return 0;
1171
1172         /* Can the mapping track the dirty pages? */
1173         return vma->vm_file && vma->vm_file->f_mapping &&
1174                 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1175 }
1176
1177 /*
1178  * We account for memory if it's a private writeable mapping,
1179  * not hugepages and VM_NORESERVE wasn't set.
1180  */
1181 static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
1182 {
1183         /*
1184          * hugetlb has its own accounting separate from the core VM
1185          * VM_HUGETLB may not be set yet so we cannot check for that flag.
1186          */
1187         if (file && is_file_hugepages(file))
1188                 return 0;
1189
1190         return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1191 }
1192
1193 unsigned long mmap_region(struct file *file, unsigned long addr,
1194                           unsigned long len, unsigned long flags,
1195                           unsigned int vm_flags, unsigned long pgoff)
1196 {
1197         struct mm_struct *mm = current->mm;
1198         struct vm_area_struct *vma, *prev;
1199         int correct_wcount = 0;
1200         int error;
1201         struct rb_node **rb_link, *rb_parent;
1202         unsigned long charged = 0;
1203         struct inode *inode =  file ? file->f_path.dentry->d_inode : NULL;
1204
1205         /* Clear old maps */
1206         error = -ENOMEM;
1207 munmap_back:
1208         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1209         if (vma && vma->vm_start < addr + len) {
1210                 if (do_munmap(mm, addr, len))
1211                         return -ENOMEM;
1212                 goto munmap_back;
1213         }
1214
1215         /* Check against address space limit. */
1216         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1217                 return -ENOMEM;
1218
1219         /*
1220          * Set 'VM_NORESERVE' if we should not account for the
1221          * memory use of this mapping.
1222          */
1223         if ((flags & MAP_NORESERVE)) {
1224                 /* We honor MAP_NORESERVE if allowed to overcommit */
1225                 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1226                         vm_flags |= VM_NORESERVE;
1227
1228                 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1229                 if (file && is_file_hugepages(file))
1230                         vm_flags |= VM_NORESERVE;
1231         }
1232
1233         /*
1234          * Private writable mapping: check memory availability
1235          */
1236         if (accountable_mapping(file, vm_flags)) {
1237                 charged = len >> PAGE_SHIFT;
1238                 if (security_vm_enough_memory(charged))
1239                         return -ENOMEM;
1240                 vm_flags |= VM_ACCOUNT;
1241         }
1242
1243         /*
1244          * Can we just expand an old mapping?
1245          */
1246         vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1247         if (vma)
1248                 goto out;
1249
1250         /*
1251          * Determine the object being mapped and call the appropriate
1252          * specific mapper. the address has already been validated, but
1253          * not unmapped, but the maps are removed from the list.
1254          */
1255         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1256         if (!vma) {
1257                 error = -ENOMEM;
1258                 goto unacct_error;
1259         }
1260
1261         vma->vm_mm = mm;
1262         vma->vm_start = addr;
1263         vma->vm_end = addr + len;
1264         vma->vm_flags = vm_flags;
1265         vma->vm_page_prot = vm_get_page_prot(vm_flags);
1266         vma->vm_pgoff = pgoff;
1267         INIT_LIST_HEAD(&vma->anon_vma_chain);
1268
1269         if (file) {
1270                 error = -EINVAL;
1271                 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1272                         goto free_vma;
1273                 if (vm_flags & VM_DENYWRITE) {
1274                         error = deny_write_access(file);
1275                         if (error)
1276                                 goto free_vma;
1277                         correct_wcount = 1;
1278                 }
1279                 vma->vm_file = file;
1280                 get_file(file);
1281                 error = file->f_op->mmap(file, vma);
1282                 if (error)
1283                         goto unmap_and_free_vma;
1284                 if (vm_flags & VM_EXECUTABLE)
1285                         added_exe_file_vma(mm);
1286
1287                 /* Can addr have changed??
1288                  *
1289                  * Answer: Yes, several device drivers can do it in their
1290                  *         f_op->mmap method. -DaveM
1291                  */
1292                 addr = vma->vm_start;
1293                 pgoff = vma->vm_pgoff;
1294                 vm_flags = vma->vm_flags;
1295         } else if (vm_flags & VM_SHARED) {
1296                 error = shmem_zero_setup(vma);
1297                 if (error)
1298                         goto free_vma;
1299         }
1300
1301         if (vma_wants_writenotify(vma)) {
1302                 pgprot_t pprot = vma->vm_page_prot;
1303
1304                 /* Can vma->vm_page_prot have changed??
1305                  *
1306                  * Answer: Yes, drivers may have changed it in their
1307                  *         f_op->mmap method.
1308                  *
1309                  * Ensures that vmas marked as uncached stay that way.
1310                  */
1311                 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1312                 if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1313                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1314         }
1315
1316         vma_link(mm, vma, prev, rb_link, rb_parent);
1317         file = vma->vm_file;
1318
1319         /* Once vma denies write, undo our temporary denial count */
1320         if (correct_wcount)
1321                 atomic_inc(&inode->i_writecount);
1322 out:
1323         perf_event_mmap(vma);
1324
1325         mm->total_vm += len >> PAGE_SHIFT;
1326         vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1327         if (vm_flags & VM_LOCKED) {
1328                 if (!mlock_vma_pages_range(vma, addr, addr + len))
1329                         mm->locked_vm += (len >> PAGE_SHIFT);
1330         } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
1331                 make_pages_present(addr, addr + len);
1332         return addr;
1333
1334 unmap_and_free_vma:
1335         if (correct_wcount)
1336                 atomic_inc(&inode->i_writecount);
1337         vma->vm_file = NULL;
1338         fput(file);
1339
1340         /* Undo any partial mapping done by a device driver. */
1341         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1342         charged = 0;
1343 free_vma:
1344         kmem_cache_free(vm_area_cachep, vma);
1345 unacct_error:
1346         if (charged)
1347                 vm_unacct_memory(charged);
1348         return error;
1349 }
1350
1351 /* Get an address range which is currently unmapped.
1352  * For shmat() with addr=0.
1353  *
1354  * Ugly calling convention alert:
1355  * Return value with the low bits set means error value,
1356  * ie
1357  *      if (ret & ~PAGE_MASK)
1358  *              error = ret;
1359  *
1360  * This function "knows" that -ENOMEM has the bits set.
1361  */
1362 #ifndef HAVE_ARCH_UNMAPPED_AREA
1363 unsigned long
1364 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1365                 unsigned long len, unsigned long pgoff, unsigned long flags)
1366 {
1367         struct mm_struct *mm = current->mm;
1368         struct vm_area_struct *vma;
1369         unsigned long start_addr;
1370
1371         if (len > TASK_SIZE)
1372                 return -ENOMEM;
1373
1374         if (flags & MAP_FIXED)
1375                 return addr;
1376
1377         if (addr) {
1378                 addr = PAGE_ALIGN(addr);
1379                 vma = find_vma(mm, addr);
1380                 if (TASK_SIZE - len >= addr &&
1381                     (!vma || addr + len <= vma->vm_start))
1382                         return addr;
1383         }
1384         if (len > mm->cached_hole_size) {
1385                 start_addr = addr = mm->free_area_cache;
1386         } else {
1387                 start_addr = addr = TASK_UNMAPPED_BASE;
1388                 mm->cached_hole_size = 0;
1389         }
1390
1391 full_search:
1392         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1393                 /* At this point:  (!vma || addr < vma->vm_end). */
1394                 if (TASK_SIZE - len < addr) {
1395                         /*
1396                          * Start a new search - just in case we missed
1397                          * some holes.
1398                          */
1399                         if (start_addr != TASK_UNMAPPED_BASE) {
1400                                 addr = TASK_UNMAPPED_BASE;
1401                                 start_addr = addr;
1402                                 mm->cached_hole_size = 0;
1403                                 goto full_search;
1404                         }
1405                         return -ENOMEM;
1406                 }
1407                 if (!vma || addr + len <= vma->vm_start) {
1408                         /*
1409                          * Remember the place where we stopped the search:
1410                          */
1411                         mm->free_area_cache = addr + len;
1412                         return addr;
1413                 }
1414                 if (addr + mm->cached_hole_size < vma->vm_start)
1415                         mm->cached_hole_size = vma->vm_start - addr;
1416                 addr = vma->vm_end;
1417         }
1418 }
1419 #endif  
1420
1421 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1422 {
1423         /*
1424          * Is this a new hole at the lowest possible address?
1425          */
1426         if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
1427                 mm->free_area_cache = addr;
1428                 mm->cached_hole_size = ~0UL;
1429         }
1430 }
1431
1432 /*
1433  * This mmap-allocator allocates new areas top-down from below the
1434  * stack's low limit (the base):
1435  */
1436 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1437 unsigned long
1438 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1439                           const unsigned long len, const unsigned long pgoff,
1440                           const unsigned long flags)
1441 {
1442         struct vm_area_struct *vma;
1443         struct mm_struct *mm = current->mm;
1444         unsigned long addr = addr0;
1445
1446         /* requested length too big for entire address space */
1447         if (len > TASK_SIZE)
1448                 return -ENOMEM;
1449
1450         if (flags & MAP_FIXED)
1451                 return addr;
1452
1453         /* requesting a specific address */
1454         if (addr) {
1455                 addr = PAGE_ALIGN(addr);
1456                 vma = find_vma(mm, addr);
1457                 if (TASK_SIZE - len >= addr &&
1458                                 (!vma || addr + len <= vma->vm_start))
1459                         return addr;
1460         }
1461
1462         /* check if free_area_cache is useful for us */
1463         if (len <= mm->cached_hole_size) {
1464                 mm->cached_hole_size = 0;
1465                 mm->free_area_cache = mm->mmap_base;
1466         }
1467
1468         /* either no address requested or can't fit in requested address hole */
1469         addr = mm->free_area_cache;
1470
1471         /* make sure it can fit in the remaining address space */
1472         if (addr > len) {
1473                 vma = find_vma(mm, addr-len);
1474                 if (!vma || addr <= vma->vm_start)
1475                         /* remember the address as a hint for next time */
1476                         return (mm->free_area_cache = addr-len);
1477         }
1478
1479         if (mm->mmap_base < len)
1480                 goto bottomup;
1481
1482         addr = mm->mmap_base-len;
1483
1484         do {
1485                 /*
1486                  * Lookup failure means no vma is above this address,
1487                  * else if new region fits below vma->vm_start,
1488                  * return with success:
1489                  */
1490                 vma = find_vma(mm, addr);
1491                 if (!vma || addr+len <= vma->vm_start)
1492                         /* remember the address as a hint for next time */
1493                         return (mm->free_area_cache = addr);
1494
1495                 /* remember the largest hole we saw so far */
1496                 if (addr + mm->cached_hole_size < vma->vm_start)
1497                         mm->cached_hole_size = vma->vm_start - addr;
1498
1499                 /* try just below the current vma->vm_start */
1500                 addr = vma->vm_start-len;
1501         } while (len < vma->vm_start);
1502
1503 bottomup:
1504         /*
1505          * A failed mmap() very likely causes application failure,
1506          * so fall back to the bottom-up function here. This scenario
1507          * can happen with large stack limits and large mmap()
1508          * allocations.
1509          */
1510         mm->cached_hole_size = ~0UL;
1511         mm->free_area_cache = TASK_UNMAPPED_BASE;
1512         addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
1513         /*
1514          * Restore the topdown base:
1515          */
1516         mm->free_area_cache = mm->mmap_base;
1517         mm->cached_hole_size = ~0UL;
1518
1519         return addr;
1520 }
1521 #endif
1522
1523 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1524 {
1525         /*
1526          * Is this a new hole at the highest possible address?
1527          */
1528         if (addr > mm->free_area_cache)
1529                 mm->free_area_cache = addr;
1530
1531         /* dont allow allocations above current base */
1532         if (mm->free_area_cache > mm->mmap_base)
1533                 mm->free_area_cache = mm->mmap_base;
1534 }
1535
1536 unsigned long
1537 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1538                 unsigned long pgoff, unsigned long flags)
1539 {
1540         unsigned long (*get_area)(struct file *, unsigned long,
1541                                   unsigned long, unsigned long, unsigned long);
1542
1543         unsigned long error = arch_mmap_check(addr, len, flags);
1544         if (error)
1545                 return error;
1546
1547         /* Careful about overflows.. */
1548         if (len > TASK_SIZE)
1549                 return -ENOMEM;
1550
1551         get_area = current->mm->get_unmapped_area;
1552         if (file && file->f_op && file->f_op->get_unmapped_area)
1553                 get_area = file->f_op->get_unmapped_area;
1554         addr = get_area(file, addr, len, pgoff, flags);
1555         if (IS_ERR_VALUE(addr))
1556                 return addr;
1557
1558         if (addr > TASK_SIZE - len)
1559                 return -ENOMEM;
1560         if (addr & ~PAGE_MASK)
1561                 return -EINVAL;
1562
1563         return arch_rebalance_pgtables(addr, len);
1564 }
1565
1566 EXPORT_SYMBOL(get_unmapped_area);
1567
1568 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1569 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1570 {
1571         struct vm_area_struct *vma = NULL;
1572
1573         if (mm) {
1574                 /* Check the cache first. */
1575                 /* (Cache hit rate is typically around 35%.) */
1576                 vma = mm->mmap_cache;
1577                 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1578                         struct rb_node * rb_node;
1579
1580                         rb_node = mm->mm_rb.rb_node;
1581                         vma = NULL;
1582
1583                         while (rb_node) {
1584                                 struct vm_area_struct * vma_tmp;
1585
1586                                 vma_tmp = rb_entry(rb_node,
1587                                                 struct vm_area_struct, vm_rb);
1588
1589                                 if (vma_tmp->vm_end > addr) {
1590                                         vma = vma_tmp;
1591                                         if (vma_tmp->vm_start <= addr)
1592                                                 break;
1593                                         rb_node = rb_node->rb_left;
1594                                 } else
1595                                         rb_node = rb_node->rb_right;
1596                         }
1597                         if (vma)
1598                                 mm->mmap_cache = vma;
1599                 }
1600         }
1601         return vma;
1602 }
1603
1604 EXPORT_SYMBOL(find_vma);
1605
1606 /* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */
1607 struct vm_area_struct *
1608 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1609                         struct vm_area_struct **pprev)
1610 {
1611         struct vm_area_struct *vma = NULL, *prev = NULL;
1612         struct rb_node *rb_node;
1613         if (!mm)
1614                 goto out;
1615
1616         /* Guard against addr being lower than the first VMA */
1617         vma = mm->mmap;
1618
1619         /* Go through the RB tree quickly. */
1620         rb_node = mm->mm_rb.rb_node;
1621
1622         while (rb_node) {
1623                 struct vm_area_struct *vma_tmp;
1624                 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
1625
1626                 if (addr < vma_tmp->vm_end) {
1627                         rb_node = rb_node->rb_left;
1628                 } else {
1629                         prev = vma_tmp;
1630                         if (!prev->vm_next || (addr < prev->vm_next->vm_end))
1631                                 break;
1632                         rb_node = rb_node->rb_right;
1633                 }
1634         }
1635
1636 out:
1637         *pprev = prev;
1638         return prev ? prev->vm_next : vma;
1639 }
1640
1641 /*
1642  * Verify that the stack growth is acceptable and
1643  * update accounting. This is shared with both the
1644  * grow-up and grow-down cases.
1645  */
1646 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
1647 {
1648         struct mm_struct *mm = vma->vm_mm;
1649         struct rlimit *rlim = current->signal->rlim;
1650         unsigned long new_start;
1651
1652         /* address space limit tests */
1653         if (!may_expand_vm(mm, grow))
1654                 return -ENOMEM;
1655
1656         /* Stack limit test */
1657         if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
1658                 return -ENOMEM;
1659
1660         /* mlock limit tests */
1661         if (vma->vm_flags & VM_LOCKED) {
1662                 unsigned long locked;
1663                 unsigned long limit;
1664                 locked = mm->locked_vm + grow;
1665                 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
1666                 limit >>= PAGE_SHIFT;
1667                 if (locked > limit && !capable(CAP_IPC_LOCK))
1668                         return -ENOMEM;
1669         }
1670
1671         /* Check to ensure the stack will not grow into a hugetlb-only region */
1672         new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1673                         vma->vm_end - size;
1674         if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1675                 return -EFAULT;
1676
1677         /*
1678          * Overcommit..  This must be the final test, as it will
1679          * update security statistics.
1680          */
1681         if (security_vm_enough_memory_mm(mm, grow))
1682                 return -ENOMEM;
1683
1684         /* Ok, everything looks good - let it rip */
1685         mm->total_vm += grow;
1686         if (vma->vm_flags & VM_LOCKED)
1687                 mm->locked_vm += grow;
1688         vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
1689         return 0;
1690 }
1691
1692 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1693 /*
1694  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1695  * vma is the last one with address > vma->vm_end.  Have to extend vma.
1696  */
1697 #ifndef CONFIG_IA64
1698 static
1699 #endif
1700 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1701 {
1702         int error;
1703
1704         if (!(vma->vm_flags & VM_GROWSUP))
1705                 return -EFAULT;
1706
1707         /*
1708          * We must make sure the anon_vma is allocated
1709          * so that the anon_vma locking is not a noop.
1710          */
1711         if (unlikely(anon_vma_prepare(vma)))
1712                 return -ENOMEM;
1713         anon_vma_lock(vma);
1714
1715         /*
1716          * vma->vm_start/vm_end cannot change under us because the caller
1717          * is required to hold the mmap_sem in read mode.  We need the
1718          * anon_vma lock to serialize against concurrent expand_stacks.
1719          * Also guard against wrapping around to address 0.
1720          */
1721         if (address < PAGE_ALIGN(address+4))
1722                 address = PAGE_ALIGN(address+4);
1723         else {
1724                 anon_vma_unlock(vma);
1725                 return -ENOMEM;
1726         }
1727         error = 0;
1728
1729         /* Somebody else might have raced and expanded it already */
1730         if (address > vma->vm_end) {
1731                 unsigned long size, grow;
1732
1733                 size = address - vma->vm_start;
1734                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1735
1736                 error = acct_stack_growth(vma, size, grow);
1737                 if (!error)
1738                         vma->vm_end = address;
1739         }
1740         anon_vma_unlock(vma);
1741         return error;
1742 }
1743 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
1744
1745 /*
1746  * vma is the first one with address < vma->vm_start.  Have to extend vma.
1747  */
1748 static int expand_downwards(struct vm_area_struct *vma,
1749                                    unsigned long address)
1750 {
1751         int error;
1752
1753         /*
1754          * We must make sure the anon_vma is allocated
1755          * so that the anon_vma locking is not a noop.
1756          */
1757         if (unlikely(anon_vma_prepare(vma)))
1758                 return -ENOMEM;
1759
1760         address &= PAGE_MASK;
1761         error = security_file_mmap(NULL, 0, 0, 0, address, 1);
1762         if (error)
1763                 return error;
1764
1765         anon_vma_lock(vma);
1766
1767         /*
1768          * vma->vm_start/vm_end cannot change under us because the caller
1769          * is required to hold the mmap_sem in read mode.  We need the
1770          * anon_vma lock to serialize against concurrent expand_stacks.
1771          */
1772
1773         /* Somebody else might have raced and expanded it already */
1774         if (address < vma->vm_start) {
1775                 unsigned long size, grow;
1776
1777                 size = vma->vm_end - address;
1778                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1779
1780                 error = acct_stack_growth(vma, size, grow);
1781                 if (!error) {
1782                         vma->vm_start = address;
1783                         vma->vm_pgoff -= grow;
1784                 }
1785         }
1786         anon_vma_unlock(vma);
1787         return error;
1788 }
1789
1790 int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address)
1791 {
1792         return expand_downwards(vma, address);
1793 }
1794
1795 #ifdef CONFIG_STACK_GROWSUP
1796 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1797 {
1798         return expand_upwards(vma, address);
1799 }
1800
1801 struct vm_area_struct *
1802 find_extend_vma(struct mm_struct *mm, unsigned long addr)
1803 {
1804         struct vm_area_struct *vma, *prev;
1805
1806         addr &= PAGE_MASK;
1807         vma = find_vma_prev(mm, addr, &prev);
1808         if (vma && (vma->vm_start <= addr))
1809                 return vma;
1810         if (!prev || expand_stack(prev, addr))
1811                 return NULL;
1812         if (prev->vm_flags & VM_LOCKED) {
1813                 mlock_vma_pages_range(prev, addr, prev->vm_end);
1814         }
1815         return prev;
1816 }
1817 #else
1818 int expand_stack(struct vm_area_struct *vma, unsigned long address)
1819 {
1820         return expand_downwards(vma, address);
1821 }
1822
1823 struct vm_area_struct *
1824 find_extend_vma(struct mm_struct * mm, unsigned long addr)
1825 {
1826         struct vm_area_struct * vma;
1827         unsigned long start;
1828
1829         addr &= PAGE_MASK;
1830         vma = find_vma(mm,addr);
1831         if (!vma)
1832                 return NULL;
1833         if (vma->vm_start <= addr)
1834                 return vma;
1835         if (!(vma->vm_flags & VM_GROWSDOWN))
1836                 return NULL;
1837         start = vma->vm_start;
1838         if (expand_stack(vma, addr))
1839                 return NULL;
1840         if (vma->vm_flags & VM_LOCKED) {
1841                 mlock_vma_pages_range(vma, addr, start);
1842         }
1843         return vma;
1844 }
1845 #endif
1846
1847 /*
1848  * Ok - we have the memory areas we should free on the vma list,
1849  * so release them, and do the vma updates.
1850  *
1851  * Called with the mm semaphore held.
1852  */
1853 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
1854 {
1855         /* Update high watermark before we lower total_vm */
1856         update_hiwater_vm(mm);
1857         do {
1858                 long nrpages = vma_pages(vma);
1859
1860                 mm->total_vm -= nrpages;
1861                 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
1862                 vma = remove_vma(vma);
1863         } while (vma);
1864         validate_mm(mm);
1865 }
1866
1867 /*
1868  * Get rid of page table information in the indicated region.
1869  *
1870  * Called with the mm semaphore held.
1871  */
1872 static void unmap_region(struct mm_struct *mm,
1873                 struct vm_area_struct *vma, struct vm_area_struct *prev,
1874                 unsigned long start, unsigned long end)
1875 {
1876         struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
1877         struct mmu_gather *tlb;
1878         unsigned long nr_accounted = 0;
1879
1880         lru_add_drain();
1881         tlb = tlb_gather_mmu(mm, 0);
1882         update_hiwater_rss(mm);
1883         unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1884         vm_unacct_memory(nr_accounted);
1885         free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1886                                  next? next->vm_start: 0);
1887         tlb_finish_mmu(tlb, start, end);
1888 }
1889
1890 /*
1891  * Create a list of vma's touched by the unmap, removing them from the mm's
1892  * vma list as we go..
1893  */
1894 static void
1895 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
1896         struct vm_area_struct *prev, unsigned long end)
1897 {
1898         struct vm_area_struct **insertion_point;
1899         struct vm_area_struct *tail_vma = NULL;
1900         unsigned long addr;
1901
1902         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
1903         do {
1904                 rb_erase(&vma->vm_rb, &mm->mm_rb);
1905                 mm->map_count--;
1906                 tail_vma = vma;
1907                 vma = vma->vm_next;
1908         } while (vma && vma->vm_start < end);
1909         *insertion_point = vma;
1910         tail_vma->vm_next = NULL;
1911         if (mm->unmap_area == arch_unmap_area)
1912                 addr = prev ? prev->vm_end : mm->mmap_base;
1913         else
1914                 addr = vma ?  vma->vm_start : mm->mmap_base;
1915         mm->unmap_area(mm, addr);
1916         mm->mmap_cache = NULL;          /* Kill the cache. */
1917 }
1918
1919 /*
1920  * __split_vma() bypasses sysctl_max_map_count checking.  We use this on the
1921  * munmap path where it doesn't make sense to fail.
1922  */
1923 static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
1924               unsigned long addr, int new_below)
1925 {
1926         struct mempolicy *pol;
1927         struct vm_area_struct *new;
1928         int err = -ENOMEM;
1929
1930         if (is_vm_hugetlb_page(vma) && (addr &
1931                                         ~(huge_page_mask(hstate_vma(vma)))))
1932                 return -EINVAL;
1933
1934         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1935         if (!new)
1936                 goto out_err;
1937
1938         /* most fields are the same, copy all, and then fixup */
1939         *new = *vma;
1940
1941         INIT_LIST_HEAD(&new->anon_vma_chain);
1942
1943         if (new_below)
1944                 new->vm_end = addr;
1945         else {
1946                 new->vm_start = addr;
1947                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
1948         }
1949
1950         pol = mpol_dup(vma_policy(vma));
1951         if (IS_ERR(pol)) {
1952                 err = PTR_ERR(pol);
1953                 goto out_free_vma;
1954         }
1955         vma_set_policy(new, pol);
1956
1957         if (anon_vma_clone(new, vma))
1958                 goto out_free_mpol;
1959
1960         if (new->vm_file) {
1961                 get_file(new->vm_file);
1962                 if (vma->vm_flags & VM_EXECUTABLE)
1963                         added_exe_file_vma(mm);
1964         }
1965
1966         if (new->vm_ops && new->vm_ops->open)
1967                 new->vm_ops->open(new);
1968
1969         if (new_below)
1970                 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
1971                         ((addr - new->vm_start) >> PAGE_SHIFT), new);
1972         else
1973                 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
1974
1975         /* Success. */
1976         if (!err)
1977                 return 0;
1978
1979         /* Clean everything up if vma_adjust failed. */
1980         new->vm_ops->close(new);
1981         if (new->vm_file) {
1982                 if (vma->vm_flags & VM_EXECUTABLE)
1983                         removed_exe_file_vma(mm);
1984                 fput(new->vm_file);
1985         }
1986  out_free_mpol:
1987         mpol_put(pol);
1988  out_free_vma:
1989         kmem_cache_free(vm_area_cachep, new);
1990  out_err:
1991         return err;
1992 }
1993
1994 /*
1995  * Split a vma into two pieces at address 'addr', a new vma is allocated
1996  * either for the first part or the tail.
1997  */
1998 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1999               unsigned long addr, int new_below)
2000 {
2001         if (mm->map_count >= sysctl_max_map_count)
2002                 return -ENOMEM;
2003
2004         return __split_vma(mm, vma, addr, new_below);
2005 }
2006
2007 /* Munmap is split into 2 main parts -- this part which finds
2008  * what needs doing, and the areas themselves, which do the
2009  * work.  This now handles partial unmappings.
2010  * Jeremy Fitzhardinge <jeremy@goop.org>
2011  */
2012 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2013 {
2014         unsigned long end;
2015         struct vm_area_struct *vma, *prev, *last;
2016
2017         if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2018                 return -EINVAL;
2019
2020         if ((len = PAGE_ALIGN(len)) == 0)
2021                 return -EINVAL;
2022
2023         /* Find the first overlapping VMA */
2024         vma = find_vma_prev(mm, start, &prev);
2025         if (!vma)
2026                 return 0;
2027         /* we have  start < vma->vm_end  */
2028
2029         /* if it doesn't overlap, we have nothing.. */
2030         end = start + len;
2031         if (vma->vm_start >= end)
2032                 return 0;
2033
2034         /*
2035          * If we need to split any vma, do it now to save pain later.
2036          *
2037          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2038          * unmapped vm_area_struct will remain in use: so lower split_vma
2039          * places tmp vma above, and higher split_vma places tmp vma below.
2040          */
2041         if (start > vma->vm_start) {
2042                 int error;
2043
2044                 /*
2045                  * Make sure that map_count on return from munmap() will
2046                  * not exceed its limit; but let map_count go just above
2047                  * its limit temporarily, to help free resources as expected.
2048                  */
2049                 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2050                         return -ENOMEM;
2051
2052                 error = __split_vma(mm, vma, start, 0);
2053                 if (error)
2054                         return error;
2055                 prev = vma;
2056         }
2057
2058         /* Does it split the last one? */
2059         last = find_vma(mm, end);
2060         if (last && end > last->vm_start) {
2061                 int error = __split_vma(mm, last, end, 1);
2062                 if (error)
2063                         return error;
2064         }
2065         vma = prev? prev->vm_next: mm->mmap;
2066
2067         /*
2068          * unlock any mlock()ed ranges before detaching vmas
2069          */
2070         if (mm->locked_vm) {
2071                 struct vm_area_struct *tmp = vma;
2072                 while (tmp && tmp->vm_start < end) {
2073                         if (tmp->vm_flags & VM_LOCKED) {
2074                                 mm->locked_vm -= vma_pages(tmp);
2075                                 munlock_vma_pages_all(tmp);
2076                         }
2077                         tmp = tmp->vm_next;
2078                 }
2079         }
2080
2081         /*
2082          * Remove the vma's, and unmap the actual pages
2083          */
2084         detach_vmas_to_be_unmapped(mm, vma, prev, end);
2085         unmap_region(mm, vma, prev, start, end);
2086
2087         /* Fix up all other VM information */
2088         remove_vma_list(mm, vma);
2089
2090         return 0;
2091 }
2092
2093 EXPORT_SYMBOL(do_munmap);
2094
2095 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2096 {
2097         int ret;
2098         struct mm_struct *mm = current->mm;
2099
2100         profile_munmap(addr);
2101
2102         down_write(&mm->mmap_sem);
2103         ret = do_munmap(mm, addr, len);
2104         up_write(&mm->mmap_sem);
2105         return ret;
2106 }
2107
2108 static inline void verify_mm_writelocked(struct mm_struct *mm)
2109 {
2110 #ifdef CONFIG_DEBUG_VM
2111         if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2112                 WARN_ON(1);
2113                 up_read(&mm->mmap_sem);
2114         }
2115 #endif
2116 }
2117
2118 /*
2119  *  this is really a simplified "do_mmap".  it only handles
2120  *  anonymous maps.  eventually we may be able to do some
2121  *  brk-specific accounting here.
2122  */
2123 unsigned long do_brk(unsigned long addr, unsigned long len)
2124 {
2125         struct mm_struct * mm = current->mm;
2126         struct vm_area_struct * vma, * prev;
2127         unsigned long flags;
2128         struct rb_node ** rb_link, * rb_parent;
2129         pgoff_t pgoff = addr >> PAGE_SHIFT;
2130         int error;
2131
2132         len = PAGE_ALIGN(len);
2133         if (!len)
2134                 return addr;
2135
2136         error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
2137         if (error)
2138                 return error;
2139
2140         flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2141
2142         error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2143         if (error & ~PAGE_MASK)
2144                 return error;
2145
2146         /*
2147          * mlock MCL_FUTURE?
2148          */
2149         if (mm->def_flags & VM_LOCKED) {
2150                 unsigned long locked, lock_limit;
2151                 locked = len >> PAGE_SHIFT;
2152                 locked += mm->locked_vm;
2153                 lock_limit = rlimit(RLIMIT_MEMLOCK);
2154                 lock_limit >>= PAGE_SHIFT;
2155                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2156                         return -EAGAIN;
2157         }
2158
2159         /*
2160          * mm->mmap_sem is required to protect against another thread
2161          * changing the mappings in case we sleep.
2162          */
2163         verify_mm_writelocked(mm);
2164
2165         /*
2166          * Clear old maps.  this also does some error checking for us
2167          */
2168  munmap_back:
2169         vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2170         if (vma && vma->vm_start < addr + len) {
2171                 if (do_munmap(mm, addr, len))
2172                         return -ENOMEM;
2173                 goto munmap_back;
2174         }
2175
2176         /* Check against address space limits *after* clearing old maps... */
2177         if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2178                 return -ENOMEM;
2179
2180         if (mm->map_count > sysctl_max_map_count)
2181                 return -ENOMEM;
2182
2183         if (security_vm_enough_memory(len >> PAGE_SHIFT))
2184                 return -ENOMEM;
2185
2186         /* Can we just expand an old private anonymous mapping? */
2187         vma = vma_merge(mm, prev, addr, addr + len, flags,
2188                                         NULL, NULL, pgoff, NULL);
2189         if (vma)
2190                 goto out;
2191
2192         /*
2193          * create a vma struct for an anonymous mapping
2194          */
2195         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2196         if (!vma) {
2197                 vm_unacct_memory(len >> PAGE_SHIFT);
2198                 return -ENOMEM;
2199         }
2200
2201         INIT_LIST_HEAD(&vma->anon_vma_chain);
2202         vma->vm_mm = mm;
2203         vma->vm_start = addr;
2204         vma->vm_end = addr + len;
2205         vma->vm_pgoff = pgoff;
2206         vma->vm_flags = flags;
2207         vma->vm_page_prot = vm_get_page_prot(flags);
2208         vma_link(mm, vma, prev, rb_link, rb_parent);
2209 out:
2210         mm->total_vm += len >> PAGE_SHIFT;
2211         if (flags & VM_LOCKED) {
2212                 if (!mlock_vma_pages_range(vma, addr, addr + len))
2213                         mm->locked_vm += (len >> PAGE_SHIFT);
2214         }
2215         return addr;
2216 }
2217
2218 EXPORT_SYMBOL(do_brk);
2219
2220 /* Release all mmaps. */
2221 void exit_mmap(struct mm_struct *mm)
2222 {
2223         struct mmu_gather *tlb;
2224         struct vm_area_struct *vma;
2225         unsigned long nr_accounted = 0;
2226         unsigned long end;
2227
2228         /* mm's last user has gone, and its about to be pulled down */
2229         mmu_notifier_release(mm);
2230
2231         if (mm->locked_vm) {
2232                 vma = mm->mmap;
2233                 while (vma) {
2234                         if (vma->vm_flags & VM_LOCKED)
2235                                 munlock_vma_pages_all(vma);
2236                         vma = vma->vm_next;
2237                 }
2238         }
2239
2240         arch_exit_mmap(mm);
2241
2242         vma = mm->mmap;
2243         if (!vma)       /* Can happen if dup_mmap() received an OOM */
2244                 return;
2245
2246         lru_add_drain();
2247         flush_cache_mm(mm);
2248         tlb = tlb_gather_mmu(mm, 1);
2249         /* update_hiwater_rss(mm) here? but nobody should be looking */
2250         /* Use -1 here to ensure all VMAs in the mm are unmapped */
2251         end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2252         vm_unacct_memory(nr_accounted);
2253
2254         free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
2255         tlb_finish_mmu(tlb, 0, end);
2256
2257         /*
2258          * Walk the list again, actually closing and freeing it,
2259          * with preemption enabled, without holding any MM locks.
2260          */
2261         while (vma)
2262                 vma = remove_vma(vma);
2263
2264         BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2265 }
2266
2267 /* Insert vm structure into process list sorted by address
2268  * and into the inode's i_mmap tree.  If vm_file is non-NULL
2269  * then i_mmap_lock is taken here.
2270  */
2271 int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2272 {
2273         struct vm_area_struct * __vma, * prev;
2274         struct rb_node ** rb_link, * rb_parent;
2275
2276         /*
2277          * The vm_pgoff of a purely anonymous vma should be irrelevant
2278          * until its first write fault, when page's anon_vma and index
2279          * are set.  But now set the vm_pgoff it will almost certainly
2280          * end up with (unless mremap moves it elsewhere before that
2281          * first wfault), so /proc/pid/maps tells a consistent story.
2282          *
2283          * By setting it to reflect the virtual start address of the
2284          * vma, merges and splits can happen in a seamless way, just
2285          * using the existing file pgoff checks and manipulations.
2286          * Similarly in do_mmap_pgoff and in do_brk.
2287          */
2288         if (!vma->vm_file) {
2289                 BUG_ON(vma->anon_vma);
2290                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2291         }
2292         __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
2293         if (__vma && __vma->vm_start < vma->vm_end)
2294                 return -ENOMEM;
2295         if ((vma->vm_flags & VM_ACCOUNT) &&
2296              security_vm_enough_memory_mm(mm, vma_pages(vma)))
2297                 return -ENOMEM;
2298         vma_link(mm, vma, prev, rb_link, rb_parent);
2299         return 0;
2300 }
2301
2302 /*
2303  * Copy the vma structure to a new location in the same mm,
2304  * prior to moving page table entries, to effect an mremap move.
2305  */
2306 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2307         unsigned long addr, unsigned long len, pgoff_t pgoff)
2308 {
2309         struct vm_area_struct *vma = *vmap;
2310         unsigned long vma_start = vma->vm_start;
2311         struct mm_struct *mm = vma->vm_mm;
2312         struct vm_area_struct *new_vma, *prev;
2313         struct rb_node **rb_link, *rb_parent;
2314         struct mempolicy *pol;
2315
2316         /*
2317          * If anonymous vma has not yet been faulted, update new pgoff
2318          * to match new location, to increase its chance of merging.
2319          */
2320         if (!vma->vm_file && !vma->anon_vma)
2321                 pgoff = addr >> PAGE_SHIFT;
2322
2323         find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
2324         new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2325                         vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2326         if (new_vma) {
2327                 /*
2328                  * Source vma may have been merged into new_vma
2329                  */
2330                 if (vma_start >= new_vma->vm_start &&
2331                     vma_start < new_vma->vm_end)
2332                         *vmap = new_vma;
2333         } else {
2334                 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2335                 if (new_vma) {
2336                         *new_vma = *vma;
2337                         pol = mpol_dup(vma_policy(vma));
2338                         if (IS_ERR(pol))
2339                                 goto out_free_vma;
2340                         INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2341                         if (anon_vma_clone(new_vma, vma))
2342                                 goto out_free_mempol;
2343                         vma_set_policy(new_vma, pol);
2344                         new_vma->vm_start = addr;
2345                         new_vma->vm_end = addr + len;
2346                         new_vma->vm_pgoff = pgoff;
2347                         if (new_vma->vm_file) {
2348                                 get_file(new_vma->vm_file);
2349                                 if (vma->vm_flags & VM_EXECUTABLE)
2350                                         added_exe_file_vma(mm);
2351                         }
2352                         if (new_vma->vm_ops && new_vma->vm_ops->open)
2353                                 new_vma->vm_ops->open(new_vma);
2354                         vma_link(mm, new_vma, prev, rb_link, rb_parent);
2355                 }
2356         }
2357         return new_vma;
2358
2359  out_free_mempol:
2360         mpol_put(pol);
2361  out_free_vma:
2362         kmem_cache_free(vm_area_cachep, new_vma);
2363         return NULL;
2364 }
2365
2366 /*
2367  * Return true if the calling process may expand its vm space by the passed
2368  * number of pages
2369  */
2370 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2371 {
2372         unsigned long cur = mm->total_vm;       /* pages */
2373         unsigned long lim;
2374
2375         lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2376
2377         if (cur + npages > lim)
2378                 return 0;
2379         return 1;
2380 }
2381
2382
2383 static int special_mapping_fault(struct vm_area_struct *vma,
2384                                 struct vm_fault *vmf)
2385 {
2386         pgoff_t pgoff;
2387         struct page **pages;
2388
2389         /*
2390          * special mappings have no vm_file, and in that case, the mm
2391          * uses vm_pgoff internally. So we have to subtract it from here.
2392          * We are allowed to do this because we are the mm; do not copy
2393          * this code into drivers!
2394          */
2395         pgoff = vmf->pgoff - vma->vm_pgoff;
2396
2397         for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2398                 pgoff--;
2399
2400         if (*pages) {
2401                 struct page *page = *pages;
2402                 get_page(page);
2403                 vmf->page = page;
2404                 return 0;
2405         }
2406
2407         return VM_FAULT_SIGBUS;
2408 }
2409
2410 /*
2411  * Having a close hook prevents vma merging regardless of flags.
2412  */
2413 static void special_mapping_close(struct vm_area_struct *vma)
2414 {
2415 }
2416
2417 static const struct vm_operations_struct special_mapping_vmops = {
2418         .close = special_mapping_close,
2419         .fault = special_mapping_fault,
2420 };
2421
2422 /*
2423  * Called with mm->mmap_sem held for writing.
2424  * Insert a new vma covering the given region, with the given flags.
2425  * Its pages are supplied by the given array of struct page *.
2426  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2427  * The region past the last page supplied will always produce SIGBUS.
2428  * The array pointer and the pages it points to are assumed to stay alive
2429  * for as long as this mapping might exist.
2430  */
2431 int install_special_mapping(struct mm_struct *mm,
2432                             unsigned long addr, unsigned long len,
2433                             unsigned long vm_flags, struct page **pages)
2434 {
2435         struct vm_area_struct *vma;
2436
2437         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2438         if (unlikely(vma == NULL))
2439                 return -ENOMEM;
2440
2441         INIT_LIST_HEAD(&vma->anon_vma_chain);
2442         vma->vm_mm = mm;
2443         vma->vm_start = addr;
2444         vma->vm_end = addr + len;
2445
2446         vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
2447         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2448
2449         vma->vm_ops = &special_mapping_vmops;
2450         vma->vm_private_data = pages;
2451
2452         if (unlikely(insert_vm_struct(mm, vma))) {
2453                 kmem_cache_free(vm_area_cachep, vma);
2454                 return -ENOMEM;
2455         }
2456
2457         mm->total_vm += len >> PAGE_SHIFT;
2458
2459         perf_event_mmap(vma);
2460
2461         return 0;
2462 }
2463
2464 static DEFINE_MUTEX(mm_all_locks_mutex);
2465
2466 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2467 {
2468         if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2469                 /*
2470                  * The LSB of head.next can't change from under us
2471                  * because we hold the mm_all_locks_mutex.
2472                  */
2473                 spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
2474                 /*
2475                  * We can safely modify head.next after taking the
2476                  * anon_vma->lock. If some other vma in this mm shares
2477                  * the same anon_vma we won't take it again.
2478                  *
2479                  * No need of atomic instructions here, head.next
2480                  * can't change from under us thanks to the
2481                  * anon_vma->lock.
2482                  */
2483                 if (__test_and_set_bit(0, (unsigned long *)
2484                                        &anon_vma->head.next))
2485                         BUG();
2486         }
2487 }
2488
2489 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2490 {
2491         if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2492                 /*
2493                  * AS_MM_ALL_LOCKS can't change from under us because
2494                  * we hold the mm_all_locks_mutex.
2495                  *
2496                  * Operations on ->flags have to be atomic because
2497                  * even if AS_MM_ALL_LOCKS is stable thanks to the
2498                  * mm_all_locks_mutex, there may be other cpus
2499                  * changing other bitflags in parallel to us.
2500                  */
2501                 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2502                         BUG();
2503                 spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);
2504         }
2505 }
2506
2507 /*
2508  * This operation locks against the VM for all pte/vma/mm related
2509  * operations that could ever happen on a certain mm. This includes
2510  * vmtruncate, try_to_unmap, and all page faults.
2511  *
2512  * The caller must take the mmap_sem in write mode before calling
2513  * mm_take_all_locks(). The caller isn't allowed to release the
2514  * mmap_sem until mm_drop_all_locks() returns.
2515  *
2516  * mmap_sem in write mode is required in order to block all operations
2517  * that could modify pagetables and free pages without need of
2518  * altering the vma layout (for example populate_range() with
2519  * nonlinear vmas). It's also needed in write mode to avoid new
2520  * anon_vmas to be associated with existing vmas.
2521  *
2522  * A single task can't take more than one mm_take_all_locks() in a row
2523  * or it would deadlock.
2524  *
2525  * The LSB in anon_vma->head.next and the AS_MM_ALL_LOCKS bitflag in
2526  * mapping->flags avoid to take the same lock twice, if more than one
2527  * vma in this mm is backed by the same anon_vma or address_space.
2528  *
2529  * We can take all the locks in random order because the VM code
2530  * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never
2531  * takes more than one of them in a row. Secondly we're protected
2532  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2533  *
2534  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
2535  * that may have to take thousand of locks.
2536  *
2537  * mm_take_all_locks() can fail if it's interrupted by signals.
2538  */
2539 int mm_take_all_locks(struct mm_struct *mm)
2540 {
2541         struct vm_area_struct *vma;
2542         struct anon_vma_chain *avc;
2543         int ret = -EINTR;
2544
2545         BUG_ON(down_read_trylock(&mm->mmap_sem));
2546
2547         mutex_lock(&mm_all_locks_mutex);
2548
2549         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2550                 if (signal_pending(current))
2551                         goto out_unlock;
2552                 if (vma->vm_file && vma->vm_file->f_mapping)
2553                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
2554         }
2555
2556         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2557                 if (signal_pending(current))
2558                         goto out_unlock;
2559                 if (vma->anon_vma)
2560                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2561                                 vm_lock_anon_vma(mm, avc->anon_vma);
2562         }
2563
2564         ret = 0;
2565
2566 out_unlock:
2567         if (ret)
2568                 mm_drop_all_locks(mm);
2569
2570         return ret;
2571 }
2572
2573 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
2574 {
2575         if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
2576                 /*
2577                  * The LSB of head.next can't change to 0 from under
2578                  * us because we hold the mm_all_locks_mutex.
2579                  *
2580                  * We must however clear the bitflag before unlocking
2581                  * the vma so the users using the anon_vma->head will
2582                  * never see our bitflag.
2583                  *
2584                  * No need of atomic instructions here, head.next
2585                  * can't change from under us until we release the
2586                  * anon_vma->lock.
2587                  */
2588                 if (!__test_and_clear_bit(0, (unsigned long *)
2589                                           &anon_vma->head.next))
2590                         BUG();
2591                 spin_unlock(&anon_vma->lock);
2592         }
2593 }
2594
2595 static void vm_unlock_mapping(struct address_space *mapping)
2596 {
2597         if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2598                 /*
2599                  * AS_MM_ALL_LOCKS can't change to 0 from under us
2600                  * because we hold the mm_all_locks_mutex.
2601                  */
2602                 spin_unlock(&mapping->i_mmap_lock);
2603                 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2604                                         &mapping->flags))
2605                         BUG();
2606         }
2607 }
2608
2609 /*
2610  * The mmap_sem cannot be released by the caller until
2611  * mm_drop_all_locks() returns.
2612  */
2613 void mm_drop_all_locks(struct mm_struct *mm)
2614 {
2615         struct vm_area_struct *vma;
2616         struct anon_vma_chain *avc;
2617
2618         BUG_ON(down_read_trylock(&mm->mmap_sem));
2619         BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
2620
2621         for (vma = mm->mmap; vma; vma = vma->vm_next) {
2622                 if (vma->anon_vma)
2623                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
2624                                 vm_unlock_anon_vma(avc->anon_vma);
2625                 if (vma->vm_file && vma->vm_file->f_mapping)
2626                         vm_unlock_mapping(vma->vm_file->f_mapping);
2627         }
2628
2629         mutex_unlock(&mm_all_locks_mutex);
2630 }
2631
2632 /*
2633  * initialise the VMA slab
2634  */
2635 void __init mmap_init(void)
2636 {
2637         int ret;
2638
2639         ret = percpu_counter_init(&vm_committed_as, 0);
2640         VM_BUG_ON(ret);
2641 }