MSR_TS_MASK
#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
current->thread.regs->msr & ~MSR_TS_MASK);
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
guest_msr &= ~(MSR_TS_MASK);
guest_msr &= ~(MSR_TS_MASK);
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
msr &= ~MSR_TS_MASK;
new_msr |= msr & MSR_TS_MASK;
if (l2_regs.msr & MSR_TS_MASK)
if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
if (l2_regs.msr & MSR_TS_MASK)
vcpu->arch.shregs.msr & MSR_TS_MASK)
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
new_msr |= msr & MSR_TS_MASK;
MSR_TM | MSR_TS_MASK;
(vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
(old_msr & (MSR_TS_MASK)))) {
old_msr &= ~(MSR_TS_MASK);
old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));