sys_swapon: call swap_cgroup_swapon() earlier
[linux-2.6.git] / mm / mremap.c
index bbbbbf5..1de98d4 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/mm.h>
 #include <linux/hugetlb.h>
-#include <linux/slab.h>
 #include <linux/shm.h>
 #include <linux/ksm.h>
 #include <linux/mman.h>
 
 #include "internal.h"
 
-#ifndef arch_mmap_check
-#define arch_mmap_check(addr, len, flags)      (0)
-#endif
-
 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
@@ -46,13 +41,15 @@ static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
                return NULL;
 
        pmd = pmd_offset(pud, addr);
+       split_huge_page_pmd(mm, pmd);
        if (pmd_none_or_clear_bad(pmd))
                return NULL;
 
        return pmd;
 }
 
-static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
+static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+                           unsigned long addr)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -67,7 +64,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
        if (!pmd)
                return NULL;
 
-       if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
+       VM_BUG_ON(pmd_trans_huge(*pmd));
+       if (pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, addr))
                return NULL;
 
        return pmd;
@@ -96,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                 */
                mapping = vma->vm_file->f_mapping;
                spin_lock(&mapping->i_mmap_lock);
-               if (new_vma->vm_truncate_count &&
-                   new_vma->vm_truncate_count != vma->vm_truncate_count)
-                       new_vma->vm_truncate_count = 0;
+               new_vma->vm_truncate_count = 0;
        }
 
        /*
@@ -106,7 +102,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
         * pte locks because exclusive mmap_sem prevents deadlock.
         */
        old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
-       new_pte = pte_offset_map_nested(new_pmd, new_addr);
+       new_pte = pte_offset_map(new_pmd, new_addr);
        new_ptl = pte_lockptr(mm, new_pmd);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -124,7 +120,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        arch_leave_lazy_mmu_mode();
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
-       pte_unmap_nested(new_pte - 1);
+       pte_unmap(new_pte - 1);
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
@@ -152,7 +148,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                old_pmd = get_old_pmd(vma->vm_mm, old_addr);
                if (!old_pmd)
                        continue;
-               new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
+               new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
                if (!new_pmd)
                        break;
                next = (new_addr + PMD_SIZE) & PMD_MASK;
@@ -289,7 +285,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
        if (vma->vm_flags & VM_LOCKED) {
                unsigned long locked, lock_limit;
                locked = mm->locked_vm << PAGE_SHIFT;
-               lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+               lock_limit = rlimit(RLIMIT_MEMLOCK);
                locked += new_len - old_len;
                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
                        goto Eagain;
@@ -366,9 +362,7 @@ static unsigned long mremap_to(unsigned long addr,
        map_flags = MAP_FIXED;
        if (vma->vm_flags & VM_MAYSHARE)
                map_flags |= MAP_SHARED;
-       ret = arch_mmap_check(new_addr, new_len, map_flags);
-       if (ret)
-               goto out1;
+
        ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
                                ((addr - vma->vm_start) >> PAGE_SHIFT),
                                map_flags);
@@ -388,12 +382,9 @@ out:
 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
 {
        unsigned long end = vma->vm_end + delta;
-       unsigned long max_addr = TASK_SIZE;
-       if (vma->vm_next)
-               max_addr = vma->vm_next->vm_start;
-       if (max_addr < end || end < vma->vm_end)
+       if (end < vma->vm_end) /* overflow */
                return 0;
-       if (arch_mmap_check(vma->vm_start, end - vma->vm_start, MAP_FIXED))
+       if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
                return 0;
        if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
                              0, MAP_FIXED) & ~PAGE_MASK)
@@ -469,8 +460,11 @@ unsigned long do_mremap(unsigned long addr,
                if (vma_expandable(vma, new_len - old_len)) {
                        int pages = (new_len - old_len) >> PAGE_SHIFT;
 
-                       vma_adjust(vma, vma->vm_start,
-                               addr + new_len, vma->vm_pgoff, NULL);
+                       if (vma_adjust(vma, vma->vm_start, addr + new_len,
+                                      vma->vm_pgoff, NULL)) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
 
                        mm->total_vm += pages;
                        vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);