Revert "ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on pre-ARMv6 CPUs"
Bo Yan [Fri, 15 Mar 2013 02:26:17 +0000 (19:26 -0700)]
This reverts commit b9d4d42ad901cc848ac87f1cb8923fded3645568.

Change-Id: Icdc220a988b0e6b145466148fc922b5f8e5cdba8
Signed-off-by: Bo Yan <byan@nvidia.com>
Reviewed-on: http://git-master/r/209826
GVS: Gerrit_Virtual_Submit
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>

arch/arm/include/asm/mmu.h
arch/arm/include/asm/mmu_context.h

index 1496565..20b43d6 100644 (file)
@@ -34,4 +34,13 @@ typedef struct {
 
 #endif
 
+/*
+ * switch_mm() may do a full cache flush over the context switch,
+ * so enable interrupts over the context switch to avoid high
+ * latency.
+ */
+#ifndef CONFIG_CPU_HAS_ASID
+#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
+#endif
+
 #endif
index 0306bc6..8da4b9c 100644 (file)
@@ -105,41 +105,20 @@ static inline void finish_arch_post_lock_switch(void)
 
 #else  /* !CONFIG_CPU_HAS_ASID */
 
-#ifdef CONFIG_MMU
-
 static inline void check_and_switch_context(struct mm_struct *mm,
                                            struct task_struct *tsk)
 {
+#ifdef CONFIG_MMU
        if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
                __check_kvm_seq(mm);
-
-       if (irqs_disabled())
-               /*
-                * cpu_switch_mm() needs to flush the VIVT caches. To avoid
-                * high interrupt latencies, defer the call and continue
-                * running with the old mm. Since we only support UP systems
-                * on non-ASID CPUs, the old mm will remain valid until the
-                * finish_arch_post_lock_switch() call.
-                */
-               set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
-       else
-               cpu_switch_mm(mm->pgd, mm);
-}
-
-#define finish_arch_post_lock_switch \
-       finish_arch_post_lock_switch
-static inline void finish_arch_post_lock_switch(void)
-{
-       if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
-               struct mm_struct *mm = current->mm;
-               cpu_switch_mm(mm->pgd, mm);
-       }
+       cpu_switch_mm(mm->pgd, mm);
+#endif
 }
 
-#endif /* CONFIG_MMU */
-
 #define init_new_context(tsk,mm)       0
 
+#define finish_arch_post_lock_switch() do { } while (0)
+
 #endif /* CONFIG_CPU_HAS_ASID */
 
 #define destroy_context(mm)            do { } while(0)