m68knommu: ColdFire add support for kernel preemption
Sebastian Siewior [Thu, 1 May 2008 02:16:29 +0000 (12:16 +1000)]
As the subject says this patch adds the support for kernel preemption
on m68knommu Coldfire. I thing the same changes could be applied to
68360 & 68328 but since I don't have the HW for testing, I don't touch it.

Signed-off-by: Sebastian Siewior <bigeasy@linutronix.de>
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

arch/m68knommu/kernel/asm-offsets.c
arch/m68knommu/platform/coldfire/entry.S

index fd0c685..c785d07 100644 (file)
@@ -87,6 +87,7 @@ int main(void)
        DEFINE(TI_TASK, offsetof(struct thread_info, task));
        DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+       DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
        DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
 
        return 0;
index 111b66d..1af7c1d 100644 (file)
@@ -106,6 +106,22 @@ ret_from_exception:
        btst    #5,%sp@(PT_SR)          /* check if returning to kernel */
        jeq     Luser_return            /* if so, skip resched, signals */
 
+#ifdef CONFIG_PREEMPT
+       movel   %sp,%d1                 /* get thread_info pointer */
+       andl    #-THREAD_SIZE,%d1       /* at base of kernel stack */
+       movel   %d1,%a0
+       movel   %a0@(TI_FLAGS),%d1      /* get thread_info->flags */
+       andl    #_TIF_NEED_RESCHED,%d1
+       jeq     Lkernel_return
+
+       movel   %a0@(TI_PREEMPTCOUNT),%d1
+       cmpl    #0,%d1
+       jne     Lkernel_return
+
+       pea     Lkernel_return
+       jmp     preempt_schedule_irq    /* preempt the kernel */
+#endif
+
 Lkernel_return:
        moveml  %sp@,%d1-%d5/%a0-%a2
        lea     %sp@(32),%sp            /* space for 8 regs */