mm: memory: Handle VM_NONE protected page faults
Sri Krishna chowdary [Wed, 6 Apr 2016 05:14:16 +0000 (10:14 +0530)]
Allow driver decide if page protections can be fixed.
This allows us replace zapping vma's with mprotect calls.
Zap based solution is considerably slower than mprotect
based one and hence deviating from upstream.

JIRA TMM-59

Change-Id: I3d362deda2866d5db9f1cfb037a2a35e646ced8a
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/1141383
(cherry picked from commit bc3555caae78981db5d1bb8a521d927be06bdd9e)
Reviewed-on: http://git-master/r/1168475
GVS: Gerrit_Virtual_Submit
Reviewed-by: Hayden Du <haydend@nvidia.com>

include/linux/mm.h
mm/memory.c

index 924c202..d792115 100644 (file)
@@ -237,6 +237,9 @@ struct vm_operations_struct {
        /* called by sys_remap_file_pages() to populate non-linear mapping */
        int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
                           unsigned long size, pgoff_t pgoff);
+       /* called when driver allows fixing ptes with none protections */
+       bool (*fixup_prot)(struct vm_area_struct *vma, unsigned long addr,
+                         pgoff_t pgoff);
 };
 
 struct mmu_gather;
index 9861be9..6068977 100644 (file)
@@ -3796,6 +3796,7 @@ int handle_pte_fault(struct mm_struct *mm,
 {
        pte_t entry;
        spinlock_t *ptl;
+       bool fix_prot = false;
 
        entry = *pte;
        if (!pte_present(entry)) {
@@ -3816,10 +3817,24 @@ int handle_pte_fault(struct mm_struct *mm,
        if (pte_numa(entry))
                return do_numa_page(mm, vma, address, entry, pte, pmd);
 
+       if (vma->vm_ops && vma->vm_ops->fixup_prot && vma->vm_ops->fault &&
+               (entry == pte_modify(entry, vm_get_page_prot(VM_NONE)))) {
+               pgoff_t pgoff = (((address & PAGE_MASK)
+                               - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+               if (!vma->vm_ops->fixup_prot(vma, address & PAGE_MASK, pgoff))
+                       return VM_FAULT_SIGSEGV; /* access not granted */
+               fix_prot = true;
+       }
+
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        if (unlikely(!pte_same(*pte, entry)))
                goto unlock;
+       if (fix_prot) {
+               entry = pte_modify(entry, vma->vm_page_prot);
+               vm_stat_account(mm, VM_NONE, vma->vm_file, -1);
+               vm_stat_account(mm, vma->vm_flags, vma->vm_file, 1);
+       }
        if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry))
                        return do_wp_page(mm, vma, address,