Symbol: kvm_tdx
arch/x86/kvm/vmx/tdx.c
1040
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
arch/x86/kvm/vmx/tdx.c
1050
if (kvm_host.xcr0 != (kvm_tdx->xfam & kvm_caps.supported_xcr0))
arch/x86/kvm/vmx/tdx.c
1057
if (kvm_host.xss != (kvm_tdx->xfam & kvm_caps.supported_xss))
arch/x86/kvm/vmx/tdx.c
1627
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
1634
KVM_BUG_ON(!kvm_tdx->page_add_src, kvm))
arch/x86/kvm/vmx/tdx.c
1637
err = tdh_mem_page_add(&kvm_tdx->td, gpa, pfn_to_page(pfn),
arch/x86/kvm/vmx/tdx.c
1638
kvm_tdx->page_add_src, &entry, &level_state);
arch/x86/kvm/vmx/tdx.c
1652
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
1658
err = tdh_mem_page_aug(&kvm_tdx->td, gpa, tdx_level, page, &entry, &level_state);
arch/x86/kvm/vmx/tdx.c
1671
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
1693
if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE))
arch/x86/kvm/vmx/tdx.c
1744
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
1748
if (unlikely(kvm_tdx->state != TD_STATE_RUNNABLE))
arch/x86/kvm/vmx/tdx.c
1758
err = tdh_do_no_vcpus(tdh_mem_track, kvm, &kvm_tdx->td);
arch/x86/kvm/vmx/tdx.c
1767
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
1777
if (KVM_BUG_ON(is_hkid_assigned(kvm_tdx), kvm))
arch/x86/kvm/vmx/tdx.c
1792
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
1810
err = tdh_do_no_vcpus(tdh_mem_range_block, kvm, &kvm_tdx->td, gpa,
arch/x86/kvm/vmx/tdx.c
1826
err = tdh_do_no_vcpus(tdh_mem_page_remove, kvm, &kvm_tdx->td, gpa,
arch/x86/kvm/vmx/tdx.c
1831
err = tdh_phymem_page_wbinvd_hkid((u16)kvm_tdx->hkid, page);
arch/x86/kvm/vmx/tdx.c
2381
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
2392
kvm_tdx->hkid = ret;
arch/x86/kvm/vmx/tdx.c
2393
kvm_tdx->misc_cg = get_current_misc_cg();
arch/x86/kvm/vmx/tdx.c
2394
ret = misc_cg_try_charge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
arch/x86/kvm/vmx/tdx.c
2406
kvm_tdx->td.tdcs_nr_pages = tdx_sysinfo->td_ctrl.tdcs_base_size / PAGE_SIZE;
arch/x86/kvm/vmx/tdx.c
2408
kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1;
arch/x86/kvm/vmx/tdx.c
2409
tdcs_pages = kzalloc_objs(*kvm_tdx->td.tdcs_pages,
arch/x86/kvm/vmx/tdx.c
2410
kvm_tdx->td.tdcs_nr_pages);
arch/x86/kvm/vmx/tdx.c
2414
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
2450
kvm_tdx->td.tdr_page = tdr_page;
arch/x86/kvm/vmx/tdx.c
2451
err = tdh_mng_create(&kvm_tdx->td, kvm_tdx->hkid);
arch/x86/kvm/vmx/tdx.c
2481
kvm_tdx, true);
arch/x86/kvm/vmx/tdx.c
2492
kvm_tdx->td.tdcs_pages = tdcs_pages;
arch/x86/kvm/vmx/tdx.c
2493
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
2494
err = tdh_mng_addcx(&kvm_tdx->td, tdcs_pages[i]);
arch/x86/kvm/vmx/tdx.c
2506
err = tdh_mng_init(&kvm_tdx->td, __pa(td_params), &rcx);
arch/x86/kvm/vmx/tdx.c
2532
for (; i < kvm_tdx->td.tdcs_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
2538
if (!kvm_tdx->td.tdcs_pages)
arch/x86/kvm/vmx/tdx.c
2551
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
2556
kvm_tdx->td.tdcs_pages = NULL;
arch/x86/kvm/vmx/tdx.c
2561
kvm_tdx->td.tdr_page = NULL;
arch/x86/kvm/vmx/tdx.c
2564
tdx_hkid_free(kvm_tdx);
arch/x86/kvm/vmx/tdx.c
2569
static u64 tdx_td_metadata_field_read(struct kvm_tdx *tdx, u64 field_id,
arch/x86/kvm/vmx/tdx.c
2586
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
arch/x86/kvm/vmx/tdx.c
2619
err = tdx_td_metadata_field_read(kvm_tdx, field_id, &ebx_eax);
arch/x86/kvm/vmx/tdx.c
2627
err = tdx_td_metadata_field_read(kvm_tdx, field_id, &edx_ecx);
arch/x86/kvm/vmx/tdx.c
266
static inline void tdx_hkid_free(struct kvm_tdx *kvm_tdx)
arch/x86/kvm/vmx/tdx.c
268
tdx_guest_keyid_free(kvm_tdx->hkid);
arch/x86/kvm/vmx/tdx.c
269
kvm_tdx->hkid = -1;
arch/x86/kvm/vmx/tdx.c
2709
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
271
misc_cg_uncharge(MISC_CG_RES_TDX, kvm_tdx->misc_cg, 1);
arch/x86/kvm/vmx/tdx.c
2718
if (kvm_tdx->state != TD_STATE_UNINITIALIZED)
arch/x86/kvm/vmx/tdx.c
272
put_misc_cg(kvm_tdx->misc_cg);
arch/x86/kvm/vmx/tdx.c
273
kvm_tdx->misc_cg = NULL;
arch/x86/kvm/vmx/tdx.c
2759
kvm_tdx->tsc_offset = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_OFFSET);
arch/x86/kvm/vmx/tdx.c
276
static inline bool is_hkid_assigned(struct kvm_tdx *kvm_tdx)
arch/x86/kvm/vmx/tdx.c
2760
kvm_tdx->tsc_multiplier = td_tdcs_exec_read64(kvm_tdx, TD_TDCS_EXEC_TSC_MULTIPLIER);
arch/x86/kvm/vmx/tdx.c
2761
kvm_tdx->attributes = td_params->attributes;
arch/x86/kvm/vmx/tdx.c
2762
kvm_tdx->xfam = td_params->xfam;
arch/x86/kvm/vmx/tdx.c
2769
kvm_tdx->state = TD_STATE_INITIALIZED;
arch/x86/kvm/vmx/tdx.c
278
return kvm_tdx->hkid > 0;
arch/x86/kvm/vmx/tdx.c
2815
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
2817
if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
arch/x86/kvm/vmx/tdx.c
2820
cmd->hw_error = tdh_mr_finalize(&kvm_tdx->td);
arch/x86/kvm/vmx/tdx.c
2826
kvm_tdx->state = TD_STATE_RUNNABLE;
arch/x86/kvm/vmx/tdx.c
2884
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
arch/x86/kvm/vmx/tdx.c
2902
tdx->vp.tdcx_pages = kcalloc(kvm_tdx->td.tdcx_nr_pages, sizeof(*tdx->vp.tdcx_pages),
arch/x86/kvm/vmx/tdx.c
2909
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
2918
err = tdh_vp_create(&kvm_tdx->td, &tdx->vp);
arch/x86/kvm/vmx/tdx.c
2924
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
2931
for (; i < kvm_tdx->td.tdcx_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
2959
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
309
struct kvm_tdx *__kvm_tdx = to_kvm_tdx(kvm); \
arch/x86/kvm/vmx/tdx.c
3123
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
3128
if (KVM_BUG_ON(kvm_tdx->page_add_src, kvm))
arch/x86/kvm/vmx/tdx.c
3134
kvm_tdx->page_add_src = src_page;
arch/x86/kvm/vmx/tdx.c
3136
kvm_tdx->page_add_src = NULL;
arch/x86/kvm/vmx/tdx.c
3149
err = tdh_mr_extend(&kvm_tdx->td, gpa + i, &entry, &level_state);
arch/x86/kvm/vmx/tdx.c
3161
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
3171
if (kvm_tdx->state == TD_STATE_RUNNABLE)
arch/x86/kvm/vmx/tdx.c
3226
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
3238
if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
arch/x86/kvm/vmx/tdx.c
3262
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
arch/x86/kvm/vmx/tdx.c
3266
if (!is_hkid_assigned(kvm_tdx) || kvm_tdx->state == TD_STATE_RUNNABLE)
arch/x86/kvm/vmx/tdx.c
3589
KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_tdx);
arch/x86/kvm/vmx/tdx.c
3595
vt_x86_ops.vm_size = max_t(unsigned int, vt_x86_ops.vm_size, sizeof(struct kvm_tdx));
arch/x86/kvm/vmx/tdx.c
494
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
501
if (!is_hkid_assigned(kvm_tdx))
arch/x86/kvm/vmx/tdx.c
524
err = tdh_mng_vpflushdone(&kvm_tdx->td);
arch/x86/kvm/vmx/tdx.c
529
kvm_tdx->hkid);
arch/x86/kvm/vmx/tdx.c
549
err = tdh_mng_key_freeid(&kvm_tdx->td);
arch/x86/kvm/vmx/tdx.c
552
kvm_tdx->hkid);
arch/x86/kvm/vmx/tdx.c
554
tdx_hkid_free(kvm_tdx);
arch/x86/kvm/vmx/tdx.c
566
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
575
if (is_hkid_assigned(kvm_tdx))
arch/x86/kvm/vmx/tdx.c
578
if (kvm_tdx->td.tdcs_pages) {
arch/x86/kvm/vmx/tdx.c
579
for (i = 0; i < kvm_tdx->td.tdcs_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.c
580
if (!kvm_tdx->td.tdcs_pages[i])
arch/x86/kvm/vmx/tdx.c
583
tdx_reclaim_control_page(kvm_tdx->td.tdcs_pages[i]);
arch/x86/kvm/vmx/tdx.c
585
kfree(kvm_tdx->td.tdcs_pages);
arch/x86/kvm/vmx/tdx.c
586
kvm_tdx->td.tdcs_pages = NULL;
arch/x86/kvm/vmx/tdx.c
589
if (!kvm_tdx->td.tdr_page)
arch/x86/kvm/vmx/tdx.c
592
if (__tdx_reclaim_page(kvm_tdx->td.tdr_page))
arch/x86/kvm/vmx/tdx.c
600
err = tdh_phymem_page_wbinvd_tdr(&kvm_tdx->td);
arch/x86/kvm/vmx/tdx.c
604
tdx_quirk_reset_page(kvm_tdx->td.tdr_page);
arch/x86/kvm/vmx/tdx.c
606
__free_page(kvm_tdx->td.tdr_page);
arch/x86/kvm/vmx/tdx.c
607
kvm_tdx->td.tdr_page = NULL;
arch/x86/kvm/vmx/tdx.c
612
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
616
kvm_tdx->state = TD_STATE_UNINITIALIZED;
arch/x86/kvm/vmx/tdx.c
621
struct kvm_tdx *kvm_tdx = param;
arch/x86/kvm/vmx/tdx.c
625
err = tdh_mng_key_config(&kvm_tdx->td);
arch/x86/kvm/vmx/tdx.c
626
if (TDX_BUG_ON(err, TDH_MNG_KEY_CONFIG, &kvm_tdx->kvm))
arch/x86/kvm/vmx/tdx.c
634
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
arch/x86/kvm/vmx/tdx.c
671
kvm_tdx->state = TD_STATE_UNINITIALIZED;
arch/x86/kvm/vmx/tdx.c
678
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
arch/x86/kvm/vmx/tdx.c
681
if (kvm_tdx->state != TD_STATE_INITIALIZED)
arch/x86/kvm/vmx/tdx.c
704
vcpu->arch.tsc_offset = kvm_tdx->tsc_offset;
arch/x86/kvm/vmx/tdx.c
706
vcpu->arch.tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
arch/x86/kvm/vmx/tdx.c
707
vcpu->arch.l1_tsc_scaling_ratio = kvm_tdx->tsc_multiplier;
arch/x86/kvm/vmx/tdx.c
712
if ((kvm_tdx->xfam & XFEATURE_MASK_XTILE) == XFEATURE_MASK_XTILE)
arch/x86/kvm/vmx/tdx.c
80
static __always_inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm)
arch/x86/kvm/vmx/tdx.c
82
return container_of(kvm, struct kvm_tdx, kvm);
arch/x86/kvm/vmx/tdx.c
873
struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
arch/x86/kvm/vmx/tdx.c
888
if (is_hkid_assigned(kvm_tdx))
arch/x86/kvm/vmx/tdx.c
892
for (i = 0; i < kvm_tdx->td.tdcx_nr_pages; i++) {
arch/x86/kvm/vmx/tdx.h
83
static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
arch/x86/kvm/vmx/tdx.h
87
err = tdh_mng_rd(&kvm_tdx->td, TDCS_EXEC(field), &data);