MSR_DE
#define MSR_SINGLESTEP (MSR_DE)
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CDRR1 value */ \
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \
mtmsr(mfmsr() & ~MSR_DE);
regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE);
{MSR_DE, "DE"},
mtmsr(mfmsr() & ~MSR_DE);
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
regs_set_return_msr(regs, regs->msr | MSR_DE);
regs_set_return_msr(regs, regs->msr | MSR_DE);
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
regs_set_return_msr(regs, regs->msr | MSR_DE);
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
child->thread.regs->msr & ~MSR_DE);
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
new_msr |= MSR_DE;
new_msr &= ~MSR_DE;
regs_set_return_msr(regs, regs->msr | MSR_DE);
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
regs_set_return_msr(regs, regs->msr | MSR_DE);
regs_set_return_msr(regs, regs->msr & ~MSR_DE);
regs_set_return_msr(regs, regs->msr | MSR_DE);
if (prot_bitmap & MSR_DE)
if (prot_bitmap & MSR_DE)
kvm_guest_protect_msr(vcpu, MSR_DE, false);
kvm_guest_protect_msr(vcpu, MSR_DE, true);
vcpu->arch.shadow_msr &= ~MSR_DE;
vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
vcpu->arch.shared->msr |= MSR_DE;
vcpu->arch.shadow_msr |= MSR_DE;
vcpu->arch.shared->msr &= ~MSR_DE;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
allowed = vcpu->arch.shared->msr & MSR_DE;
if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
regs_set_return_msr(regs, regs->msr | MSR_DE);
if (regs->msr & MSR_DE) {