]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - mm/mprotect.c
mm: sync vmalloc address space page tables in alloc_vm_area()
[linux-2.6.git] / mm / mprotect.c
index 2d1bf7cf885179af5144f41998d95dc2d2b72981..5a688a2756bec54435adbd5f3c13a33fbdd2c11e 100644 (file)
@@ -78,7 +78,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
        pte_unmap_unlock(pte - 1, ptl);
 }
 
-static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
+static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
                unsigned long addr, unsigned long end, pgprot_t newprot,
                int dirty_accountable)
 {
@@ -88,13 +88,21 @@ static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
+               if (pmd_trans_huge(*pmd)) {
+                       if (next - addr != HPAGE_PMD_SIZE)
+                               split_huge_page_pmd(vma->vm_mm, pmd);
+                       else if (change_huge_pmd(vma, pmd, addr, newprot))
+                               continue;
+                       /* fall through */
+               }
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
+               change_pte_range(vma->vm_mm, pmd, addr, next, newprot,
+                                dirty_accountable);
        } while (pmd++, addr = next, addr != end);
 }
 
-static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
+static inline void change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
                unsigned long addr, unsigned long end, pgprot_t newprot,
                int dirty_accountable)
 {
@@ -106,7 +114,8 @@ static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
                next = pud_addr_end(addr, end);
                if (pud_none_or_clear_bad(pud))
                        continue;
-               change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
+               change_pmd_range(vma, pud, addr, next, newprot,
+                                dirty_accountable);
        } while (pud++, addr = next, addr != end);
 }
 
@@ -126,7 +135,8 @@ static void change_protection(struct vm_area_struct *vma,
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
+               change_pud_range(vma, pgd, addr, next, newprot,
+                                dirty_accountable);
        } while (pgd++, addr = next, addr != end);
        flush_tlb_range(vma, start, end);
 }
@@ -211,6 +221,7 @@ success:
        mmu_notifier_invalidate_range_end(mm, start, end);
        vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
        vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+       perf_event_mmap(vma);
        return 0;
 
 fail:
@@ -299,7 +310,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
                error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
                if (error)
                        goto out;
-               perf_event_mmap(vma);
                nstart = tmp;
 
                if (nstart < prev->vm_end)