VCPU_SREG_GS
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
if (seg <= VCPU_SREG_GS && !seg_desc.s) {
tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
if (segment > VCPU_SREG_GS &&
if (ctxt->modrm_reg > VCPU_SREG_GS)
if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
op->val = VCPU_SREG_GS;
ctxt->seg_override = VCPU_SREG_GS;
enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS);
enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
rsm_load_seg_32(vcpu, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS);
rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
case VCPU_SREG_GS: return &save01->gs;
case VCPU_SREG_GS:
__vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
if (!data_segment_valid(vcpu, VCPU_SREG_GS))
seg_setup(VCPU_SREG_GS);
kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);