kvm_tdx
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
if (kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
if (kvm_host.xss != (kvm_tdx->xfam & kvm_caps.supported_xss))
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
KVM_BUG_ON(!kvm_tdx->page_add_src, kvm))
err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn_to_page(pfn),
kvm_tdx->page_add_src, &entry, &level_state);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
err = tdh_mem_page_aug(&kvm_tdx->td, gpa, tdx_level, page, &entry, &level_state);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE))
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE))
err = tdh_do_no_vcpus(tdh_mem_track, kvm, &kvm_tdx->td);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm))
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
err = tdh_do_no_vcpus(tdh_mem_range_block, kvm, &kvm_tdx->td, gpa,
err = tdh_do_no_vcpus(tdh_mem_page_remove, kvm, &kvm_tdx->td, gpa,
err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
kvm_tdx->hkid = ret;
kvm_tdx->misc_cg = get_current_misc_cg();
ret = misc_cg_try_charge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
kvm_tdx->td.tdcs_nr_pages = tdx_sysinfo->td_ctrl.tdcs_base_size / PAGE_SIZE;
kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1;
tdcs_pages = kzalloc_objs(*kvm_tdx->td.tdcs_pages,
kvm_tdx->td.tdcs_nr_pages);
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
kvm_tdx->td.tdr_page = tdr_page;
err = tdh_mng_create(&kvm_tdx->td, kvm_tdx->hkid);
kvm_tdx, true);
kvm_tdx->td.tdcs_pages = tdcs_pages;
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
err = tdh_mng_addcx(&kvm_tdx->td, tdcs_pages[i]);
err = tdh_mng_init(&kvm_tdx->td, __pa(td_params), &rcx);
for (; i < kvm_tdx->td.tdcs_nr_pages; i++) {
if (!kvm_tdx->td.tdcs_pages)
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
kvm_tdx->td.tdcs_pages = NULL;
kvm_tdx->td.tdr_page = NULL;
tdx_hkid_free(kvm_tdx);
static u64 tdx_td_metadata_field_read(struct kvm_tdx *tdx, u64 field_id,
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
err = tdx_td_metadata_field_read(kvm_tdx, field_id, &ebx_eax);
err = tdx_td_metadata_field_read(kvm_tdx, field_id, &edx_ecx);
static inline void tdx_hkid_free(struct kvm_tdx *kvm_tdx)
tdx_guest_keyid_free(kvm_tdx->hkid);
kvm_tdx->hkid = -1;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
misc_cg_uncharge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
if (kvm_tdx->state != TD_STATE_UNINITIALIZED)
put_misc_cg(kvm_tdx->misc_cg);
kvm_tdx->misc_cg = NULL;
kvm_tdx->tsc_offset = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_OFFSET);
static inline bool is_hkid_assigned(struct kvm_tdx *kvm_tdx)
kvm_tdx->tsc_multiplier = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_MULTIPLIER);
kvm_tdx->attributes = td_params->attributes;
kvm_tdx->xfam = td_params->xfam;
kvm_tdx->state = TD_STATE_INITIALIZED;
return kvm_tdx->hkid > 0;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
cmd->hw_error = tdh_mr_finalize(&kvm_tdx->td);
kvm_tdx->state = TD_STATE_RUNNABLE;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
tdx->vp.tdcx_pages = kcalloc(kvm_tdx->td.tdcx_nr_pages, sizeof(*tdx->vp.tdcx_pages),
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
err = tdh_vp_create(&kvm_tdx->td, &tdx->vp);
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
for (; i < kvm_tdx->td.tdcx_nr_pages; i++) {
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
struct kvm_tdx *__kvm_tdx = to_kvm_tdx(kvm); \
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (KVM_BUG_ON(kvm_tdx->page_add_src, kvm))
kvm_tdx->page_add_src = src_page;
kvm_tdx->page_add_src = NULL;
err = tdh_mr_extend(&kvm_tdx->td, gpa + i, &entry, &level_state);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (kvm_tdx->state == TD_STATE_RUNNABLE)
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_tdx);
vt_x86_ops.vm_size = max_t(unsigned int, vt_x86_ops.vm_size, sizeof(struct kvm_tdx));
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (!is_hkid_assigned(kvm_tdx))
err = tdh_mng_vpflushdone(&kvm_tdx->td);
kvm_tdx->hkid);
err = tdh_mng_key_freeid(&kvm_tdx->td);
kvm_tdx->hkid);
tdx_hkid_free(kvm_tdx);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
if (is_hkid_assigned(kvm_tdx))
if (kvm_tdx->td.tdcs_pages) {
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
if (!kvm_tdx->td.tdcs_pages[i])
tdx_reclaim_control_page(kvm_tdx->td.tdcs_pages[i]);
kfree(kvm_tdx->td.tdcs_pages);
kvm_tdx->td.tdcs_pages = NULL;
if (!kvm_tdx->td.tdr_page)
if (__tdx_reclaim_page(kvm_tdx->td.tdr_page))
err = tdh_phymem_page_wbinvd_tdr(&kvm_tdx->td);
tdx_quirk_reset_page(kvm_tdx->td.tdr_page);
__free_page(kvm_tdx->td.tdr_page);
kvm_tdx->td.tdr_page = NULL;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
kvm_tdx->state = TD_STATE_UNINITIALIZED;
struct kvm_tdx *kvm_tdx = param;
err = tdh_mng_key_config(&kvm_tdx->td);
if (TDX_BUG_ON(err, TDH_MNG_KEY_CONFIG, &kvm_tdx->kvm))
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
kvm_tdx->state = TD_STATE_UNINITIALIZED;
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
if (kvm_tdx->state != TD_STATE_INITIALIZED)
vcpu->arch.tsc_offset = kvm_tdx->tsc_offset;
vcpu->arch.tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
vcpu->arch.l1_tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
if ((kvm_tdx->xfam & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE)
static __always_inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm)
return container_of(kvm, struct kvm_tdx, kvm);
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
if (is_hkid_assigned(kvm_tdx))
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
err = tdh_mng_rd(&kvm_tdx->td, TDCS_EXEC(field), &data);