EFER_LMA
if (!(efer & EFER_LMA))
if (!(efer & EFER_LMA))
if (efer & EFER_LMA)
if (efer & EFER_LMA) {
if (efer & EFER_LMA) {
if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA) &&
if (efer & EFER_LMA) {
ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
*reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
if (efer & EFER_LMA)
if (efer & EFER_LMA)
if (efer & EFER_LMA)
if (efer & EFER_LMA) {
BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
if (__kvm_emulate_msr_write(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
vcpu->arch.efer |= EFER_LMA;
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
vcpu->arch.efer &= ~EFER_LMA;
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
if (!(efer & EFER_LMA))
return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
if (guest_efer & EFER_LMA)
!!(vcpu->arch.efer & EFER_LMA)))
CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
vcpu->arch.efer |= (EFER_LMA | EFER_LME);
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX;
ignore_bits |= EFER_LMA | EFER_LME;
if (guest_efer & EFER_LMA)
if (!(guest_efer & EFER_LMA))
if (efer & EFER_LMA)
if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
vcpu->arch.efer | (EFER_LMA | EFER_LME));
vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
if (sregs->efer & EFER_LMA || sregs->cs.l)
!(sregs2->efer & EFER_LMA);
if (efer & (EFER_LME | EFER_LMA) &&
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
return !!(vcpu->arch.efer & EFER_LMA);
trampoline_header->efer = efer & ~EFER_LMA;
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);