EFER_LME
#define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX)
if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
if (vcpu->arch.efer & EFER_LME) {
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
efer &= ~EFER_LME;
return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
vcpu->arch.efer |= (EFER_LMA | EFER_LME);
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
vcpu->arch.efer = EFER_SCE | EFER_LME | EFER_LMA | EFER_NX;
ignore_bits |= EFER_LMA | EFER_LME;
guest_efer &= ~EFER_LME;
if (vcpu->arch.efer & EFER_LME) {
vcpu->arch.efer | (EFER_LMA | EFER_LME));
vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
if (efer & (EFER_LME | EFER_LMA) &&
(vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
(run->s.regs.sregs.efer & EFER_LME),
!!(run->s.regs.sregs.efer & EFER_LME));