kvm_svm
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
struct kvm_svm *kvm_svm;
hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
if (kvm_svm->avic_vm_id != vm_id)
vcpu = kvm_get_vcpu(&kvm_svm->kvm, vcpu_idx);
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
if (kvm_svm->avic_physical_id_table)
kvm_svm->avic_physical_id_table = (void *)__get_free_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
if (!kvm_svm->avic_physical_id_table)
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
free_page((unsigned long)kvm_svm->avic_logical_id_table);
free_pages((unsigned long)kvm_svm->avic_physical_id_table,
hash_del(&kvm_svm->hnode);
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
struct kvm_svm *k2;
kvm_svm->avic_logical_id_table = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
if (!kvm_svm->avic_logical_id_table)
kvm_svm->avic_vm_id = vm_id;
hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
vmcb->control.avic_logical_id = __sme_set(__pa(kvm_svm->avic_logical_id_table));
vmcb->control.avic_physical_id = __sme_set(__pa(kvm_svm->avic_physical_id_table));
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
WRITE_ONCE(kvm_svm->avic_physical_id_table[id], new_entry);
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
avic_logical_id_table = kvm_svm->avic_logical_id_table;
struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
return &kvm_svm->avic_logical_id_table[index];
.vm_size = sizeof(struct kvm_svm),
KVM_SANITY_CHECK_VM_STRUCT_SIZE(kvm_svm);
static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
return container_of(kvm, struct kvm_svm, kvm);