to_tdx
u64 vp_enter_ret = to_tdx(vcpu)->vp_enter_ret;
struct vcpu_tdx *tdx = to_tdx(vcpu);
td_management_write8(to_tdx(vcpu), TD_VCPU_PEND_NMI, 1);
kvm_rax_write(vcpu, to_tdx(vcpu)->vp_enter_args.r10);
kvm_rbx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r11);
kvm_rcx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r12);
kvm_rdx_write(vcpu, to_tdx(vcpu)->vp_enter_args.r13);
kvm_rsi_write(vcpu, to_tdx(vcpu)->vp_enter_args.r14);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
td_vmcs_write64(to_tdx(vcpu), SHARED_EPT_POINTER, root_hpa);
struct vcpu_tdx *tdx = to_tdx(vcpu);
u64 eeq_type = to_tdx(vcpu)->ext_exit_qualification & TDX_EXT_EXIT_QUAL_TYPE_MASK;
gpa_t gpa = to_tdx(vcpu)->exit_gpa;
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
return to_tdx(vcpu)->vp_enter_args.r10;
return to_tdx(vcpu)->vp_enter_args.r11;
to_tdx(vcpu)->vp_enter_args.r10 = val;
to_tdx(vcpu)->vp_enter_args.r11 = val;
list_del(&to_tdx(vcpu)->cpu_list);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
if (to_tdx(vcpu)->state != VCPU_TD_STATE_UNINITIALIZED) {
err = tdh_vp_flush(&to_tdx(vcpu)->vp);
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);
!to_tdx(vcpu)->vp_enter_args.r12;
to_tdx(vcpu)->vp_enter_args.r12)
td_state_non_arch_read64(to_tdx(vcpu), TD_VCPU_STATE_DETAILS_NON_ARCH);
struct vcpu_tdx *tdx = to_tdx(vcpu);
if (unlikely(to_tdx(vcpu)->state != VCPU_TD_STATE_INITIALIZED ||
struct vcpu_tdx *tdx = to_tdx(vcpu);
struct vcpu_tdx *tdx = to_tdx(vcpu);