[PATCH] Handle spurious page fault for hugetlb region
[linux-2.6.git] / mm / memory.c
index ae8161f1f4595bd4d58afa0b20ecf4419ff23e71..8c88b973abc56a37270fb8516687e964c449372d 100644 (file)
@@ -2045,8 +2045,18 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
 
        inc_page_state(pgfault);
 
-       if (is_vm_hugetlb_page(vma))
-               return VM_FAULT_SIGBUS; /* mapping truncation does this. */
+       if (unlikely(is_vm_hugetlb_page(vma))) {
+               if (valid_hugetlb_file_off(vma, address))
+                       /* We get here only if there was a stale(zero) TLB entry 
+                        * (because of  HW prefetching). 
+                        * Low-level arch code (if needed) should have already
+                        * purged the stale entry as part of this fault handling.  
+                        * Here we just return.
+                        */
+                       return VM_FAULT_MINOR; 
+               else
+                       return VM_FAULT_SIGBUS; /* mapping truncation does this. */
+       }
 
        /*
         * We need the page table lock to synchronize with kswapd