mm: fix the TLB range flushed when __tlb_remove_page() runs out of slots
[linux-3.10.git] / mm / memory.c
index 61a262b..5e50800 100644 (file)
@@ -1101,6 +1101,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        spinlock_t *ptl;
        pte_t *start_pte;
        pte_t *pte;
+       unsigned long range_start = addr;
 
 again:
        init_rss_vec(rss);
@@ -1206,12 +1207,14 @@ again:
                force_flush = 0;
 
 #ifdef HAVE_GENERIC_MMU_GATHER
-               tlb->start = addr;
-               tlb->end = end;
+               tlb->start = range_start;
+               tlb->end = addr;
 #endif
                tlb_flush_mmu(tlb);
-               if (addr != end)
+               if (addr != end) {
+                       range_start = addr;
                        goto again;
+               }
        }
 
        return addr;