diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index fd0c685a7f1..c785d07c02c 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c @@ -87,6 +87,7 @@ int main(void) DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); return 0; diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S index 111b66dc737..1af7c1d650f 100644 --- a/arch/m68knommu/platform/coldfire/entry.S +++ b/arch/m68knommu/platform/coldfire/entry.S @@ -106,6 +106,22 @@ ret_from_exception: btst #5,%sp@(PT_SR) /* check if returning to kernel */ jeq Luser_return /* if so, skip resched, signals */ +#ifdef CONFIG_PREEMPT + movel %sp,%d1 /* get thread_info pointer */ + andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ + movel %d1,%a0 + movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ + andl #_TIF_NEED_RESCHED,%d1 + jeq Lkernel_return + + movel %a0@(TI_PREEMPTCOUNT),%d1 + cmpl #0,%d1 + jne Lkernel_return + + pea Lkernel_return + jmp preempt_schedule_irq /* preempt the kernel */ +#endif + Lkernel_return: moveml %sp@,%d1-%d5/%a0-%a2 lea %sp@(32),%sp /* space for 8 regs */