MSR_VEC
msr_check_and_clear(MSR_VEC);
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
mathflags |= MSR_VEC | MSR_VSX;
mathflags |= MSR_VEC;
new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
if (usermsr & MSR_VEC)
{MSR_VEC, "VEC"},
childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
msr &= ~MSR_VEC;
msr_check_and_set(MSR_VEC);
msr_check_and_clear(MSR_VEC);
cpumsr = msr_check_and_set(MSR_VEC);
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
if (tsk->thread.regs->msr & MSR_VEC) {
WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
if (msr & MSR_VEC)
msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
(current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
msr_all_available |= MSR_VEC;
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
if (usermsr & MSR_VEC)
if ((!(msr & MSR_VEC)) && should_restore_altivec())
new_msr |= MSR_VEC;
if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
if (new_msr & MSR_VEC)
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
if (usermsr & MSR_VEC)
if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
msr |= MSR_VEC;
if (msr & MSR_VEC)
msr |= MSR_VEC;
if (msr & MSR_VEC)
regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
if (msr & MSR_VEC)
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_VEC) {
regs_set_return_msr(regs, regs->msr | MSR_VEC);
msr |= MSR_VEC;
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
if (msr & MSR_VEC)
msr |= MSR_VEC;
if (msr & MSR_VEC)
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
if (v_regs != NULL && (msr & MSR_VEC) != 0) {
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_VEC) {
regs_set_return_msr(regs, regs->msr | MSR_VEC);
msr_mask = MSR_VEC;
msr |= MSR_VEC;
msr_needed |= MSR_VEC;
ext_msr = MSR_VEC;
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
(MSR_FP | MSR_VEC | MSR_VSX);
else if (ext_diff == MSR_VEC)
msr |= MSR_FP | MSR_VEC;
if (msr & MSR_VEC) {
if (current->thread.regs->msr & MSR_VEC)
msr = MSR_FP | MSR_VEC | MSR_VSX;
if (msr & MSR_VEC) {
if (lost_ext & MSR_VEC) {
if (!(current->thread.regs->msr & MSR_VEC)) {
current->thread.regs->msr |= MSR_VEC;
if (current->thread.regs->msr & MSR_VEC)
MSR_VEC);
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
if (regs->msr & MSR_VEC) {
if (!user_mode(regs) && !(regs->msr & MSR_VEC))
msrbit = MSR_VEC;
if (!user_mode(regs) && !(regs->msr & MSR_VEC))
msrbit = MSR_VEC;
if (regs->msr & MSR_VEC)
if (regs->msr & MSR_VEC)
if (regs->msr & MSR_VEC) {
regs->msr |= MSR_VEC;