mm: don't access vm_flags as 'int'
[linux-2.6.git] / mm / memory.c
index 19b2d44..fc24f7d 100644 (file)
@@ -182,7 +182,7 @@ void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
 {
        __sync_task_rss_stat(task, mm);
 }
-#else
+#else /* SPLIT_RSS_COUNTING */
 
 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
@@ -191,7 +191,204 @@ static void check_sync_rss_stat(struct task_struct *task)
 {
 }
 
+#endif /* SPLIT_RSS_COUNTING */
+
+#ifdef HAVE_GENERIC_MMU_GATHER
+
+static int tlb_next_batch(struct mmu_gather *tlb)
+{
+       struct mmu_gather_batch *batch;
+
+       batch = tlb->active;
+       if (batch->next) {
+               tlb->active = batch->next;
+               return 1;
+       }
+
+       batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+       if (!batch)
+               return 0;
+
+       batch->next = NULL;
+       batch->nr   = 0;
+       batch->max  = MAX_GATHER_BATCH;
+
+       tlb->active->next = batch;
+       tlb->active = batch;
+
+       return 1;
+}
+
+/* tlb_gather_mmu
+ *     Called to initialize an (on-stack) mmu_gather structure for page-table
+ *     tear-down from @mm. The @fullmm argument is used when @mm is without
+ *     users and we're going to destroy the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+{
+       tlb->mm = mm;
+
+       tlb->fullmm     = fullmm;
+       tlb->need_flush = 0;
+       tlb->fast_mode  = (num_possible_cpus() == 1);
+       tlb->local.next = NULL;
+       tlb->local.nr   = 0;
+       tlb->local.max  = ARRAY_SIZE(tlb->__pages);
+       tlb->active     = &tlb->local;
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb->batch = NULL;
 #endif
+}
+
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+       struct mmu_gather_batch *batch;
+
+       if (!tlb->need_flush)
+               return;
+       tlb->need_flush = 0;
+       tlb_flush(tlb);
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+       tlb_table_flush(tlb);
+#endif
+
+       if (tlb_fast_mode(tlb))
+               return;
+
+       for (batch = &tlb->local; batch; batch = batch->next) {
+               free_pages_and_swap_cache(batch->pages, batch->nr);
+               batch->nr = 0;
+       }
+       tlb->active = &tlb->local;
+}
+
+/* tlb_finish_mmu
+ *     Called at the end of the shootdown operation to free up any resources
+ *     that were required.
+ */
+void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+       struct mmu_gather_batch *batch, *next;
+
+       tlb_flush_mmu(tlb);
+
+       /* keep the page table cache within bounds */
+       check_pgt_cache();
+
+       for (batch = tlb->local.next; batch; batch = next) {
+               next = batch->next;
+               free_pages((unsigned long)batch, 0);
+       }
+       tlb->local.next = NULL;
+}
+
+/* __tlb_remove_page
+ *     Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
+ *     handling the additional races in SMP caused by other CPUs caching valid
+ *     mappings in their TLBs. Returns the number of free page slots left.
+ *     When out of page slots we must call tlb_flush_mmu().
+ */
+int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+       struct mmu_gather_batch *batch;
+
+       tlb->need_flush = 1;
+
+       if (tlb_fast_mode(tlb)) {
+               free_page_and_swap_cache(page);
+               return 1; /* avoid calling tlb_flush_mmu() */
+       }
+
+       batch = tlb->active;
+       batch->pages[batch->nr++] = page;
+       if (batch->nr == batch->max) {
+               if (!tlb_next_batch(tlb))
+                       return 0;
+       }
+       VM_BUG_ON(batch->nr > batch->max);
+
+       return batch->max - batch->nr;
+}
+
+#endif /* HAVE_GENERIC_MMU_GATHER */
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+
+/*
+ * See the comment near struct mmu_table_batch.
+ */
+
+static void tlb_remove_table_smp_sync(void *arg)
+{
+       /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+       /*
+        * This isn't an RCU grace period and hence the page-tables cannot be
+        * assumed to be actually RCU-freed.
+        *
+        * It is however sufficient for software page-table walkers that rely on
+        * IRQ disabling. See the comment near struct mmu_table_batch.
+        */
+       smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+       __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+       struct mmu_table_batch *batch;
+       int i;
+
+       batch = container_of(head, struct mmu_table_batch, rcu);
+
+       for (i = 0; i < batch->nr; i++)
+               __tlb_remove_table(batch->tables[i]);
+
+       free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+       struct mmu_table_batch **batch = &tlb->batch;
+
+       if (*batch) {
+               call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+               *batch = NULL;
+       }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+       struct mmu_table_batch **batch = &tlb->batch;
+
+       tlb->need_flush = 1;
+
+       /*
+        * When there's less then two users of this mm there cannot be a
+        * concurrent page-table walk.
+        */
+       if (atomic_read(&tlb->mm->mm_users) < 2) {
+               __tlb_remove_table(table);
+               return;
+       }
+
+       if (*batch == NULL) {
+               *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+               if (*batch == NULL) {
+                       tlb_remove_table_one(table);
+                       return;
+               }
+               (*batch)->nr = 0;
+       }
+       (*batch)->tables[(*batch)->nr++] = table;
+       if ((*batch)->nr == MAX_TABLE_BATCH)
+               tlb_table_flush(tlb);
+}
+
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
 
 /*
  * If a p?d_bad entry is found while walking page tables, report
@@ -533,7 +730,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
        add_taint(TAINT_BAD_PAGE);
 }
 
-static inline int is_cow_mapping(unsigned int flags)
+static inline int is_cow_mapping(vm_flags_t flags)
 {
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
@@ -909,27 +1106,24 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pmd_t *pmd,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        struct mm_struct *mm = tlb->mm;
        int force_flush = 0;
-       pte_t *pte;
-       spinlock_t *ptl;
        int rss[NR_MM_COUNTERS];
+       spinlock_t *ptl;
+       pte_t *pte;
 
-       init_rss_vec(rss);
 again:
+       init_rss_vec(rss);
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        arch_enter_lazy_mmu_mode();
        do {
                pte_t ptent = *pte;
                if (pte_none(ptent)) {
-                       (*zap_work)--;
                        continue;
                }
 
-               (*zap_work) -= PAGE_SIZE;
-
                if (pte_present(ptent)) {
                        struct page *page;
 
@@ -998,7 +1192,7 @@ again:
                                print_bad_pte(vma, addr, ptent, NULL);
                }
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
-       } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
+       } while (pte++, addr += PAGE_SIZE, addr != end);
 
        add_mm_rss_vec(mm, rss);
        arch_leave_lazy_mmu_mode();
@@ -1022,7 +1216,7 @@ again:
 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pud_t *pud,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -1034,19 +1228,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                        if (next-addr != HPAGE_PMD_SIZE) {
                                VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem));
                                split_huge_page_pmd(vma->vm_mm, pmd);
-                       } else if (zap_huge_pmd(tlb, vma, pmd)) {
-                               (*zap_work)--;
+                       } else if (zap_huge_pmd(tlb, vma, pmd))
                                continue;
-                       }
                        /* fall through */
                }
-               if (pmd_none_or_clear_bad(pmd)) {
-                       (*zap_work)--;
+               if (pmd_none_or_clear_bad(pmd))
                        continue;
-               }
-               next = zap_pte_range(tlb, vma, pmd, addr, next,
-                                               zap_work, details);
-       } while (pmd++, addr = next, (addr != end && *zap_work > 0));
+               next = zap_pte_range(tlb, vma, pmd, addr, next, details);
+               cond_resched();
+       } while (pmd++, addr = next, addr != end);
 
        return addr;
 }
@@ -1054,7 +1244,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pgd_t *pgd,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        pud_t *pud;
        unsigned long next;
@@ -1062,13 +1252,10 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
        pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (pud_none_or_clear_bad(pud)) {
-                       (*zap_work)--;
+               if (pud_none_or_clear_bad(pud))
                        continue;
-               }
-               next = zap_pmd_range(tlb, vma, pud, addr, next,
-                                               zap_work, details);
-       } while (pud++, addr = next, (addr != end && *zap_work > 0));
+               next = zap_pmd_range(tlb, vma, pud, addr, next, details);
+       } while (pud++, addr = next, addr != end);
 
        return addr;
 }
@@ -1076,7 +1263,7 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
 static unsigned long unmap_page_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma,
                                unsigned long addr, unsigned long end,
-                               long *zap_work, struct zap_details *details)
+                               struct zap_details *details)
 {
        pgd_t *pgd;
        unsigned long next;
@@ -1090,13 +1277,10 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
        pgd = pgd_offset(vma->vm_mm, addr);
        do {
                next = pgd_addr_end(addr, end);
-               if (pgd_none_or_clear_bad(pgd)) {
-                       (*zap_work)--;
+               if (pgd_none_or_clear_bad(pgd))
                        continue;
-               }
-               next = zap_pud_range(tlb, vma, pgd, addr, next,
-                                               zap_work, details);
-       } while (pgd++, addr = next, (addr != end && *zap_work > 0));
+               next = zap_pud_range(tlb, vma, pgd, addr, next, details);
+       } while (pgd++, addr = next, addr != end);
        tlb_end_vma(tlb, vma);
        mem_cgroup_uncharge_end();
 
@@ -1141,9 +1325,7 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
                unsigned long end_addr, unsigned long *nr_accounted,
                struct zap_details *details)
 {
-       long zap_work = ZAP_BLOCK_SIZE;
        unsigned long start = start_addr;
-       spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
        struct mm_struct *mm = vma->vm_mm;
 
        mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
@@ -1176,33 +1358,15 @@ unsigned long unmap_vmas(struct mmu_gather *tlb,
                                 * Since no pte has actually been setup, it is
                                 * safe to do nothing in this case.
                                 */
-                               if (vma->vm_file) {
+                               if (vma->vm_file)
                                        unmap_hugepage_range(vma, start, end, NULL);
-                                       zap_work -= (end - start) /
-                                       pages_per_huge_page(hstate_vma(vma));
-                               }
 
                                start = end;
                        } else
-                               start = unmap_page_range(tlb, vma,
-                                               start, end, &zap_work, details);
-
-                       if (zap_work > 0) {
-                               BUG_ON(start != end);
-                               break;
-                       }
-
-                       if (need_resched() ||
-                               (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
-                               if (i_mmap_lock)
-                                       goto out;
-                               cond_resched();
-                       }
-
-                       zap_work = ZAP_BLOCK_SIZE;
+                               start = unmap_page_range(tlb, vma, start, end, details);
                }
        }
-out:
+
        mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
        return start;   /* which is now the end (or restart) address */
 }
@@ -2535,96 +2699,11 @@ unwritable_page:
        return ret;
 }
 
-/*
- * Helper functions for unmap_mapping_range().
- *
- * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
- *
- * We have to restart searching the prio_tree whenever we drop the lock,
- * since the iterator is only valid while the lock is held, and anyway
- * a later vma might be split and reinserted earlier while lock dropped.
- *
- * The list of nonlinear vmas could be handled more efficiently, using
- * a placeholder, but handle it in the same way until a need is shown.
- * It is important to search the prio_tree before nonlinear list: a vma
- * may become nonlinear and be shifted from prio_tree to nonlinear list
- * while the lock is dropped; but never shifted from list to prio_tree.
- *
- * In order to make forward progress despite restarting the search,
- * vm_truncate_count is used to mark a vma as now dealt with, so we can
- * quickly skip it next time around.  Since the prio_tree search only
- * shows us those vmas affected by unmapping the range in question, we
- * can't efficiently keep all vmas in step with mapping->truncate_count:
- * so instead reset them all whenever it wraps back to 0 (then go to 1).
- * mapping->truncate_count and vma->vm_truncate_count are protected by
- * i_mmap_lock.
- *
- * In order to make forward progress despite repeatedly restarting some
- * large vma, note the restart_addr from unmap_vmas when it breaks out:
- * and restart from that address when we reach that vma again.  It might
- * have been split or merged, shrunk or extended, but never shifted: so
- * restart_addr remains valid so long as it remains in the vma's range.
- * unmap_mapping_range forces truncate_count to leap over page-aligned
- * values so we can save vma's restart_addr in its truncate_count field.
- */
-#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
-
-static void reset_vma_truncate_counts(struct address_space *mapping)
-{
-       struct vm_area_struct *vma;
-       struct prio_tree_iter iter;
-
-       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
-               vma->vm_truncate_count = 0;
-       list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
-               vma->vm_truncate_count = 0;
-}
-
-static int unmap_mapping_range_vma(struct vm_area_struct *vma,
+static void unmap_mapping_range_vma(struct vm_area_struct *vma,
                unsigned long start_addr, unsigned long end_addr,
                struct zap_details *details)
 {
-       unsigned long restart_addr;
-       int need_break;
-
-       /*
-        * files that support invalidating or truncating portions of the
-        * file from under mmaped areas must have their ->fault function
-        * return a locked page (and set VM_FAULT_LOCKED in the return).
-        * This provides synchronisation against concurrent unmapping here.
-        */
-
-again:
-       restart_addr = vma->vm_truncate_count;
-       if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
-               start_addr = restart_addr;
-               if (start_addr >= end_addr) {
-                       /* Top of vma has been split off since last time */
-                       vma->vm_truncate_count = details->truncate_count;
-                       return 0;
-               }
-       }
-
-       restart_addr = zap_page_range(vma, start_addr,
-                                       end_addr - start_addr, details);
-       need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
-
-       if (restart_addr >= end_addr) {
-               /* We have now completed this vma: mark it so */
-               vma->vm_truncate_count = details->truncate_count;
-               if (!need_break)
-                       return 0;
-       } else {
-               /* Note restart_addr in vma's truncate_count field */
-               vma->vm_truncate_count = restart_addr;
-               if (!need_break)
-                       goto again;
-       }
-
-       spin_unlock(details->i_mmap_lock);
-       cond_resched();
-       spin_lock(details->i_mmap_lock);
-       return -EINTR;
+       zap_page_range(vma, start_addr, end_addr - start_addr, details);
 }
 
 static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
@@ -2634,12 +2713,8 @@ static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
        struct prio_tree_iter iter;
        pgoff_t vba, vea, zba, zea;
 
-restart:
        vma_prio_tree_foreach(vma, &iter, root,
                        details->first_index, details->last_index) {
-               /* Skip quickly over those we have already dealt with */
-               if (vma->vm_truncate_count == details->truncate_count)
-                       continue;
 
                vba = vma->vm_pgoff;
                vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
@@ -2651,11 +2726,10 @@ restart:
                if (zea > vea)
                        zea = vea;
 
-               if (unmap_mapping_range_vma(vma,
+               unmap_mapping_range_vma(vma,
                        ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
                        ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
-                               details) < 0)
-                       goto restart;
+                               details);
        }
 }
 
@@ -2670,15 +2744,9 @@ static inline void unmap_mapping_range_list(struct list_head *head,
         * across *all* the pages in each nonlinear VMA, not just the pages
         * whose virtual address lies outside the file truncation point.
         */
-restart:
        list_for_each_entry(vma, head, shared.vm_set.list) {
-               /* Skip quickly over those we have already dealt with */
-               if (vma->vm_truncate_count == details->truncate_count)
-                       continue;
                details->nonlinear_vma = vma;
-               if (unmap_mapping_range_vma(vma, vma->vm_start,
-                                       vma->vm_end, details) < 0)
-                       goto restart;
+               unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
        }
 }
 
@@ -2717,26 +2785,14 @@ void unmap_mapping_range(struct address_space *mapping,
        details.last_index = hba + hlen - 1;
        if (details.last_index < details.first_index)
                details.last_index = ULONG_MAX;
-       details.i_mmap_lock = &mapping->i_mmap_lock;
 
-       mutex_lock(&mapping->unmap_mutex);
-       spin_lock(&mapping->i_mmap_lock);
-
-       /* Protect against endless unmapping loops */
-       mapping->truncate_count++;
-       if (unlikely(is_restart_addr(mapping->truncate_count))) {
-               if (mapping->truncate_count == 0)
-                       reset_vma_truncate_counts(mapping);
-               mapping->truncate_count++;
-       }
-       details.truncate_count = mapping->truncate_count;
 
+       mutex_lock(&mapping->i_mmap_mutex);
        if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
-       spin_unlock(&mapping->i_mmap_lock);
-       mutex_unlock(&mapping->unmap_mutex);
+       mutex_unlock(&mapping->i_mmap_mutex);
 }
 EXPORT_SYMBOL(unmap_mapping_range);