diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 2824609319c..1a6320290d2 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -40,6 +40,7 @@ static inline void discard_lazy_cpu_state(void) #ifdef CONFIG_ALTIVEC extern void flush_altivec_to_thread(struct task_struct *); extern void giveup_altivec(struct task_struct *); +extern void giveup_altivec_notask(void); #else static inline void flush_altivec_to_thread(struct task_struct *t) { diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4937c969009..bb58f41fc04 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -124,7 +124,7 @@ void enable_kernel_altivec(void) if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) giveup_altivec(current); else - giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ + giveup_altivec_notask(); #else giveup_altivec(last_task_used_altivec); #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 4d5a3edff49..e830289d2e4 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -89,6 +89,16 @@ _GLOBAL(load_up_altivec) /* restore registers and return */ blr +_GLOBAL(giveup_altivec_notask) + mfmsr r3 + andis. r4,r3,MSR_VEC@h + bnelr /* Already enabled? */ + oris r3,r3,MSR_VEC@h + SYNC + MTMSRD(r3) /* enable use of VMX now */ + isync + blr + /* * giveup_altivec(tsk) * Disable VMX for the task given as the argument,