| From b74acbf48fad2b1aa38062eea32ced3f4d9b4628 Mon Sep 17 00:00:00 2001 |
| From: Thomas Gleixner <tglx@linutronix.de> |
| Date: Wed, 31 Oct 2012 12:04:11 +0100 |
| Subject: [PATCH 239/366] arm: Add support for lazy preemption |
| |
| Implement the arm pieces for lazy preempt. |
| |
| Signed-off-by: Thomas Gleixner <tglx@linutronix.de> |
| --- |
| arch/arm/Kconfig | 1 + |
| arch/arm/include/asm/thread_info.h | 3 +++ |
| arch/arm/kernel/asm-offsets.c | 1 + |
| arch/arm/kernel/entry-armv.S | 13 +++++++++++-- |
| arch/arm/kernel/signal.c | 3 ++- |
| 5 files changed, 18 insertions(+), 3 deletions(-) |
| |
| diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig |
| index 5e24da7..cb54374 100644 |
| --- a/arch/arm/Kconfig |
| +++ b/arch/arm/Kconfig |
| @@ -69,6 +69,7 @@ config ARM |
| select HAVE_PERF_EVENTS |
| select HAVE_PERF_REGS |
| select HAVE_PERF_USER_STACK_DUMP |
| + select HAVE_PREEMPT_LAZY |
| select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) |
| select HAVE_REGS_AND_STACK_ACCESS_API |
| select HAVE_SYSCALL_TRACEPOINTS |
| diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h |
| index 776757d..46cc07b 100644 |
| --- a/arch/arm/include/asm/thread_info.h |
| +++ b/arch/arm/include/asm/thread_info.h |
| @@ -49,6 +49,7 @@ struct cpu_context_save { |
| struct thread_info { |
| unsigned long flags; /* low level flags */ |
| int preempt_count; /* 0 => preemptable, <0 => bug */ |
| + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ |
| mm_segment_t addr_limit; /* address limit */ |
| struct task_struct *task; /* main task structure */ |
| __u32 cpu; /* cpu */ |
| @@ -143,6 +144,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, |
| #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ |
| #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ |
| #define TIF_SECCOMP 7 /* seccomp syscall filtering active */ |
| +#define TIF_NEED_RESCHED_LAZY 8 |
| |
| #define TIF_NOHZ 12 /* in adaptive nohz mode */ |
| #define TIF_USING_IWMMXT 17 |
| @@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, |
| #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
| #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
| #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
| +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) |
| #define _TIF_UPROBE (1 << TIF_UPROBE) |
| #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
| #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
| diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c |
| index a586cfe..8ce8606 100644 |
| --- a/arch/arm/kernel/asm-offsets.c |
| +++ b/arch/arm/kernel/asm-offsets.c |
| @@ -55,6 +55,7 @@ int main(void) |
| BLANK(); |
| DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
| DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); |
| + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); |
| DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); |
| DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
| DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
| diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S |
| index 3ce377f..d66b1ae 100644 |
| --- a/arch/arm/kernel/entry-armv.S |
| +++ b/arch/arm/kernel/entry-armv.S |
| @@ -215,11 +215,18 @@ __irq_svc: |
| #ifdef CONFIG_PREEMPT |
| get_thread_info tsk |
| ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
| - ldr r0, [tsk, #TI_FLAGS] @ get flags |
| teq r8, #0 @ if preempt count != 0 |
| + bne 1f @ return from exeption |
| + ldr r0, [tsk, #TI_FLAGS] @ get flags |
| + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set |
| + blne svc_preempt @ preempt! |
| + |
| + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count |
| + teq r8, #0 @ if preempt lazy count != 0 |
| movne r0, #0 @ force flags to 0 |
| - tst r0, #_TIF_NEED_RESCHED |
| + tst r0, #_TIF_NEED_RESCHED_LAZY |
| blne svc_preempt |
| +1: |
| #endif |
| |
| svc_exit r5, irq = 1 @ return from exception |
| @@ -234,6 +241,8 @@ svc_preempt: |
| 1: bl preempt_schedule_irq @ irq en/disable is done inside |
| ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS |
| tst r0, #_TIF_NEED_RESCHED |
| + bne 1b |
| + tst r0, #_TIF_NEED_RESCHED_LAZY |
| reteq r8 @ go again |
| b 1b |
| #endif |
| diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c |
| index 7b8f214..96541e0 100644 |
| --- a/arch/arm/kernel/signal.c |
| +++ b/arch/arm/kernel/signal.c |
| @@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) |
| */ |
| trace_hardirqs_off(); |
| do { |
| - if (likely(thread_flags & _TIF_NEED_RESCHED)) { |
| + if (likely(thread_flags & (_TIF_NEED_RESCHED | |
| + _TIF_NEED_RESCHED_LAZY))) { |
| schedule(); |
| } else { |
| if (unlikely(!user_mode(regs))) |
| -- |
| 1.9.1 |
| |