]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/mmap.c
mm: record MAP_NORESERVE status on vmas and fix small page mprotect reservations
[linux-2.6.git] / mm / mmap.c
index f4cfc6ac08dbbc2e4bb0e937ee256d65ea1973fa..57d3b6097deb38e3f73d3319845681207b86c293 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
 #include <asm/tlb.h>
 #include <asm/mmu_context.h>
 
+#include "internal.h"
+
 #ifndef arch_mmap_check
 #define arch_mmap_check(addr, len, flags)      (0)
 #endif
 
+#ifndef arch_rebalance_pgtables
+#define arch_rebalance_pgtables(addr, len)             (addr)
+#endif
+
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end);
@@ -68,15 +74,16 @@ pgprot_t protection_map[16] = {
 
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
-       return protection_map[vm_flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+       return __pgprot(pgprot_val(protection_map[vm_flags &
+                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
+                       pgprot_val(arch_vm_get_page_prot(vm_flags)));
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
 int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio = 50;      /* default is 50% */
 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
-atomic_t vm_committed_space = ATOMIC_INIT(0);
+atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
 
 /*
  * Check that a process has enough memory to allocate a new virtual
@@ -173,7 +180,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
         * cast `allowed' as a signed long because vm_committed_space
         * sometimes has a negative value
         */
-       if (atomic_read(&vm_committed_space) < (long)allowed)
+       if (atomic_long_read(&vm_committed_space) < (long)allowed)
                return 0;
 error:
        vm_unacct_memory(pages);
@@ -226,9 +233,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        might_sleep();
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
-       if (vma->vm_file)
+       if (vma->vm_file) {
                fput(vma->vm_file);
-       mpol_free(vma_policy(vma));
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(vma->vm_mm);
+       }
+       mpol_put(vma_policy(vma));
        kmem_cache_free(vm_area_cachep, vma);
        return next;
 }
@@ -238,10 +248,16 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
        unsigned long rlim, retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
+       unsigned long min_brk;
 
        down_write(&mm->mmap_sem);
 
-       if (brk < mm->end_code)
+#ifdef CONFIG_COMPAT_BRK
+       min_brk = mm->end_code;
+#else
+       min_brk = mm->start_brk;
+#endif
+       if (brk < min_brk)
                goto out;
 
        /*
@@ -251,7 +267,8 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
         * not page aligned -Ram Gupta
         */
        rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
-       if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+       if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
+                       (mm->end_data - mm->start_data) > rlim)
                goto out;
 
        newbrk = PAGE_ALIGN(brk);
@@ -618,10 +635,13 @@ again:                    remove_next = 1 + (end > next->vm_end);
                spin_unlock(&mapping->i_mmap_lock);
 
        if (remove_next) {
-               if (file)
+               if (file) {
                        fput(file);
+                       if (next->vm_flags & VM_EXECUTABLE)
+                               removed_exe_file_vma(mm);
+               }
                mm->map_count--;
-               mpol_free(vma_policy(next));
+               mpol_put(vma_policy(next));
                kmem_cache_free(vm_area_cachep, next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
@@ -1063,7 +1083,6 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
                mapping_cap_account_dirty(vma->vm_file->f_mapping);
 }
 
-
 unsigned long mmap_region(struct file *file, unsigned long addr,
                          unsigned long len, unsigned long flags,
                          unsigned int vm_flags, unsigned long pgoff,
@@ -1091,6 +1110,9 @@ munmap_back:
        if (!may_expand_vm(mm, len >> PAGE_SHIFT))
                return -ENOMEM;
 
+       if (flags & MAP_NORESERVE)
+               vm_flags |= VM_NORESERVE;
+
        if (accountable && (!(flags & MAP_NORESERVE) ||
                            sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
                if (vm_flags & VM_SHARED) {
@@ -1150,6 +1172,8 @@ munmap_back:
                error = file->f_op->mmap(file, vma);
                if (error)
                        goto unmap_and_free_vma;
+               if (vm_flags & VM_EXECUTABLE)
+                       added_exe_file_vma(mm);
        } else if (vm_flags & VM_SHARED) {
                error = shmem_zero_setup(vma);
                if (error)
@@ -1176,22 +1200,22 @@ munmap_back:
        if (vma_wants_writenotify(vma))
                vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
 
-       if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
+       if (file && vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
-               file = vma->vm_file;
-               vma_link(mm, vma, prev, rb_link, rb_parent);
-               if (correct_wcount)
-                       atomic_inc(&inode->i_writecount);
-       } else {
-               if (file) {
-                       if (correct_wcount)
-                               atomic_inc(&inode->i_writecount);
-                       fput(file);
-               }
-               mpol_free(vma_policy(vma));
+               mpol_put(vma_policy(vma));
                kmem_cache_free(vm_area_cachep, vma);
+               fput(file);
+               if (vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(mm);
+       } else {
+               vma_link(mm, vma, prev, rb_link, rb_parent);
+               file = vma->vm_file;
        }
-out:   
+
+       /* Once vma denies write, undo our temporary denial count */
+       if (correct_wcount)
+               atomic_inc(&inode->i_writecount);
+out:
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
@@ -1423,7 +1447,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
        if (addr & ~PAGE_MASK)
                return -EINVAL;
 
-       return addr;
+       return arch_rebalance_pgtables(addr, len);
 }
 
 EXPORT_SYMBOL(get_unmapped_area);
@@ -1620,7 +1644,7 @@ static inline int expand_downwards(struct vm_area_struct *vma,
                return -ENOMEM;
 
        address &= PAGE_MASK;
-       error = security_file_mmap(0, 0, 0, 0, address, 1);
+       error = security_file_mmap(NULL, 0, 0, 0, address, 1);
        if (error)
                return error;
 
@@ -1744,7 +1768,7 @@ static void unmap_region(struct mm_struct *mm,
        update_hiwater_rss(mm);
        unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
+       free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
                                 next? next->vm_start: 0);
        tlb_finish_mmu(tlb, start, end);
 }
@@ -1808,15 +1832,18 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
        }
 
-       pol = mpol_copy(vma_policy(vma));
+       pol = mpol_dup(vma_policy(vma));
        if (IS_ERR(pol)) {
                kmem_cache_free(vm_area_cachep, new);
                return PTR_ERR(pol);
        }
        vma_set_policy(new, pol);
 
-       if (new->vm_file)
+       if (new->vm_file) {
                get_file(new->vm_file);
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       added_exe_file_vma(mm);
+       }
 
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
@@ -1941,6 +1968,10 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
        if (is_hugepage_only_range(mm, addr, len))
                return -EINVAL;
 
+       error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
+       if (error)
+               return error;
+
        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
 
        error = arch_mmap_check(addr, len, flags);
@@ -2037,7 +2068,7 @@ void exit_mmap(struct mm_struct *mm)
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
        vm_unacct_memory(nr_accounted);
-       free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
+       free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
        tlb_finish_mmu(tlb, 0, end);
 
        /*
@@ -2120,7 +2151,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
-                       pol = mpol_copy(vma_policy(vma));
+                       pol = mpol_dup(vma_policy(vma));
                        if (IS_ERR(pol)) {
                                kmem_cache_free(vm_area_cachep, new_vma);
                                return NULL;
@@ -2129,8 +2160,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                        new_vma->vm_start = addr;
                        new_vma->vm_end = addr + len;
                        new_vma->vm_pgoff = pgoff;
-                       if (new_vma->vm_file)
+                       if (new_vma->vm_file) {
                                get_file(new_vma->vm_file);
+                               if (vma->vm_flags & VM_EXECUTABLE)
+                                       added_exe_file_vma(mm);
+                       }
                        if (new_vma->vm_ops && new_vma->vm_ops->open)
                                new_vma->vm_ops->open(new_vma);
                        vma_link(mm, new_vma, prev, rb_link, rb_parent);
@@ -2156,24 +2190,31 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
 }
 
 
-static struct page *special_mapping_nopage(struct vm_area_struct *vma,
-                                          unsigned long address, int *type)
+static int special_mapping_fault(struct vm_area_struct *vma,
+                               struct vm_fault *vmf)
 {
+       pgoff_t pgoff;
        struct page **pages;
 
-       BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+       /*
+        * special mappings have no vm_file, and in that case, the mm
+        * uses vm_pgoff internally. So we have to subtract it from here.
+        * We are allowed to do this because we are the mm; do not copy
+        * this code into drivers!
+        */
+       pgoff = vmf->pgoff - vma->vm_pgoff;
 
-       address -= vma->vm_start;
-       for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
-               address -= PAGE_SIZE;
+       for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
+               pgoff--;
 
        if (*pages) {
                struct page *page = *pages;
                get_page(page);
-               return page;
+               vmf->page = page;
+               return 0;
        }
 
-       return NOPAGE_SIGBUS;
+       return VM_FAULT_SIGBUS;
 }
 
 /*
@@ -2185,7 +2226,7 @@ static void special_mapping_close(struct vm_area_struct *vma)
 
 static struct vm_operations_struct special_mapping_vmops = {
        .close = special_mapping_close,
-       .nopage = special_mapping_nopage,
+       .fault = special_mapping_fault,
 };
 
 /*
@@ -2211,7 +2252,7 @@ int install_special_mapping(struct mm_struct *mm,
        vma->vm_start = addr;
        vma->vm_end = addr + len;
 
-       vma->vm_flags = vm_flags | mm->def_flags;
+       vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 
        vma->vm_ops = &special_mapping_vmops;