root/arch/x86/hyperv/ivm.c
// SPDX-License-Identifier: GPL-2.0
/*
 * Hyper-V Isolation VM interface with paravisor and hypervisor
 *
 * Author:
 *  Tianyu Lan <Tianyu.Lan@microsoft.com>
 */

#include <linux/bitfield.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/export.h>
#include <asm/svm.h>
#include <asm/sev.h>
#include <asm/io.h>
#include <asm/coco.h>
#include <asm/mem_encrypt.h>
#include <asm/set_memory.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/mtrr.h>
#include <asm/io_apic.h>
#include <asm/realmode.h>
#include <asm/e820/api.h>
#include <asm/desc.h>
#include <asm/msr.h>
#include <asm/segment.h>
#include <uapi/asm/vmx.h>

#ifdef CONFIG_AMD_MEM_ENCRYPT

#define GHCB_USAGE_HYPERV_CALL  1

union hv_ghcb {
        struct ghcb ghcb;
        struct {
                u64 hypercalldata[509];
                u64 outputgpa;
                union {
                        union {
                                struct {
                                        u32 callcode        : 16;
                                        u32 isfast          : 1;
                                        u32 reserved1       : 14;
                                        u32 isnested        : 1;
                                        u32 countofelements : 12;
                                        u32 reserved2       : 4;
                                        u32 repstartindex   : 12;
                                        u32 reserved3       : 4;
                                };
                                u64 asuint64;
                        } hypercallinput;
                        union {
                                struct {
                                        u16 callstatus;
                                        u16 reserved1;
                                        u32 elementsprocessed : 12;
                                        u32 reserved2         : 20;
                                };
                                u64 asunit64;
                        } hypercalloutput;
                };
                u64 reserved2;
        } hypercall;
} __packed __aligned(HV_HYP_PAGE_SIZE);

/* Only used in an SNP VM with the paravisor */
static u16 hv_ghcb_version __ro_after_init;

/* Functions only used in an SNP VM with the paravisor go here. */
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
        union hv_ghcb *hv_ghcb;
        void **ghcb_base;
        unsigned long flags;
        u64 status;

        if (!hv_ghcb_pg)
                return -EFAULT;

        WARN_ON(in_nmi());

        local_irq_save(flags);
        ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
        hv_ghcb = (union hv_ghcb *)*ghcb_base;
        if (!hv_ghcb) {
                local_irq_restore(flags);
                return -EFAULT;
        }

        hv_ghcb->ghcb.protocol_version = GHCB_PROTOCOL_MAX;
        hv_ghcb->ghcb.ghcb_usage = GHCB_USAGE_HYPERV_CALL;

        hv_ghcb->hypercall.outputgpa = (u64)output;
        hv_ghcb->hypercall.hypercallinput.asuint64 = 0;
        hv_ghcb->hypercall.hypercallinput.callcode = control;

        if (input_size)
                memcpy(hv_ghcb->hypercall.hypercalldata, input, input_size);

        VMGEXIT();

        hv_ghcb->ghcb.ghcb_usage = 0xffffffff;
        memset(hv_ghcb->ghcb.save.valid_bitmap, 0,
               sizeof(hv_ghcb->ghcb.save.valid_bitmap));

        status = hv_ghcb->hypercall.hypercalloutput.callstatus;

        local_irq_restore(flags);

        return status;
}

static inline u64 rd_ghcb_msr(void)
{
        return native_rdmsrq(MSR_AMD64_SEV_ES_GHCB);
}

static inline void wr_ghcb_msr(u64 val)
{
        native_wrmsrq(MSR_AMD64_SEV_ES_GHCB, val);
}

static enum es_result hv_ghcb_hv_call(struct ghcb *ghcb, u64 exit_code,
                                   u64 exit_info_1, u64 exit_info_2)
{
        /* Fill in protocol and format specifiers */
        ghcb->protocol_version = hv_ghcb_version;
        ghcb->ghcb_usage       = GHCB_DEFAULT_USAGE;

        ghcb_set_sw_exit_code(ghcb, exit_code);
        ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
        ghcb_set_sw_exit_info_2(ghcb, exit_info_2);

        VMGEXIT();

        if (ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0))
                return ES_VMM_ERROR;
        else
                return ES_OK;
}

void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason)
{
        u64 val = GHCB_MSR_TERM_REQ;

        /* Tell the hypervisor what went wrong. */
        val |= GHCB_SEV_TERM_REASON(set, reason);

        /* Request Guest Termination from Hypervisor */
        wr_ghcb_msr(val);
        VMGEXIT();

        while (true)
                asm volatile("hlt\n" : : : "memory");
}

bool hv_ghcb_negotiate_protocol(void)
{
        u64 ghcb_gpa;
        u64 val;

        /* Save ghcb page gpa. */
        ghcb_gpa = rd_ghcb_msr();

        /* Do the GHCB protocol version negotiation */
        wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
        VMGEXIT();
        val = rd_ghcb_msr();

        if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
                return false;

        if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
            GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
                return false;

        hv_ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val),
                             GHCB_PROTOCOL_MAX);

        /* Write ghcb page back after negotiating protocol. */
        wr_ghcb_msr(ghcb_gpa);
        VMGEXIT();

        return true;
}

static void hv_ghcb_msr_write(u64 msr, u64 value)
{
        union hv_ghcb *hv_ghcb;
        void **ghcb_base;
        unsigned long flags;

        if (!hv_ghcb_pg)
                return;

        WARN_ON(in_nmi());

        local_irq_save(flags);
        ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
        hv_ghcb = (union hv_ghcb *)*ghcb_base;
        if (!hv_ghcb) {
                local_irq_restore(flags);
                return;
        }

        ghcb_set_rcx(&hv_ghcb->ghcb, msr);
        ghcb_set_rax(&hv_ghcb->ghcb, lower_32_bits(value));
        ghcb_set_rdx(&hv_ghcb->ghcb, upper_32_bits(value));

        if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 1, 0))
                pr_warn("Fail to write msr via ghcb %llx.\n", msr);

        local_irq_restore(flags);
}

static void hv_ghcb_msr_read(u64 msr, u64 *value)
{
        union hv_ghcb *hv_ghcb;
        void **ghcb_base;
        unsigned long flags;

        /* Check size of union hv_ghcb here. */
        BUILD_BUG_ON(sizeof(union hv_ghcb) != HV_HYP_PAGE_SIZE);

        if (!hv_ghcb_pg)
                return;

        WARN_ON(in_nmi());

        local_irq_save(flags);
        ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
        hv_ghcb = (union hv_ghcb *)*ghcb_base;
        if (!hv_ghcb) {
                local_irq_restore(flags);
                return;
        }

        ghcb_set_rcx(&hv_ghcb->ghcb, msr);
        if (hv_ghcb_hv_call(&hv_ghcb->ghcb, SVM_EXIT_MSR, 0, 0))
                pr_warn("Fail to read msr via ghcb %llx.\n", msr);
        else
                *value = (u64)lower_32_bits(hv_ghcb->ghcb.save.rax)
                        | ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
        local_irq_restore(flags);
}

/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);

/* Functions only used in an SNP VM without the paravisor go here. */

#define hv_populate_vmcb_seg(seg, gdtr_base)                    \
do {                                                            \
        if (seg.selector) {                                     \
                seg.base = 0;                                   \
                seg.limit = HV_AP_SEGMENT_LIMIT;                \
                seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5);    \
                seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
        }                                                       \
} while (0)                                                     \

static int snp_set_vmsa(void *va, bool vmsa)
{
        u64 attrs;

        /*
         * Running at VMPL0 allows the kernel to change the VMSA bit for a page
         * using the RMPADJUST instruction. However, for the instruction to
         * succeed it must target the permissions of a lesser privileged
         * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
         * instruction in the AMD64 APM Volume 3).
         */
        attrs = 1;
        if (vmsa)
                attrs |= RMPADJUST_VMSA_PAGE_BIT;

        return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
}

static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
{
        int err;

        err = snp_set_vmsa(vmsa, false);
        if (err)
                pr_err("clear VMSA page failed (%u), leaking page\n", err);
        else
                free_page((unsigned long)vmsa);
}

int hv_snp_boot_ap(u32 apic_id, unsigned long start_ip, unsigned int cpu)
{
        struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
                __get_free_page(GFP_KERNEL | __GFP_ZERO);
        struct sev_es_save_area *cur_vmsa;
        struct desc_ptr gdtr;
        u64 ret, retry = 5;
        struct hv_enable_vp_vtl *start_vp_input;
        unsigned long flags;
        int vp_index;

        if (!vmsa)
                return -ENOMEM;

        /* Find the Hyper-V VP index which might be not the same as APIC ID */
        vp_index = hv_apicid_to_vp_index(apic_id);
        if (vp_index < 0 || vp_index > ms_hyperv.max_vp_index)
                return -EINVAL;

        native_store_gdt(&gdtr);

        vmsa->gdtr.base = gdtr.address;
        vmsa->gdtr.limit = gdtr.size;

        savesegment(es, vmsa->es.selector);
        hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);

        savesegment(cs, vmsa->cs.selector);
        hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);

        savesegment(ss, vmsa->ss.selector);
        hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);

        savesegment(ds, vmsa->ds.selector);
        hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);

        vmsa->efer = native_read_msr(MSR_EFER);

        vmsa->cr4 = native_read_cr4();
        vmsa->cr3 = __native_read_cr3();
        vmsa->cr0 = native_read_cr0();

        vmsa->xcr0 = 1;
        vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
        vmsa->rip = (u64)secondary_startup_64_no_verify;
        vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];

        /*
         * Set the SNP-specific fields for this VMSA:
         *   VMPL level
         *   SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
         */
        vmsa->vmpl = 0;
        vmsa->sev_features = sev_status >> 2;

        ret = snp_set_vmsa(vmsa, true);
        if (ret) {
                pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
                free_page((u64)vmsa);
                return ret;
        }

        local_irq_save(flags);
        start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
        memset(start_vp_input, 0, sizeof(*start_vp_input));
        start_vp_input->partition_id = -1;
        start_vp_input->vp_index = vp_index;
        start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
        *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;

        do {
                ret = hv_do_hypercall(HVCALL_START_VP,
                                      start_vp_input, NULL);
        } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);

        local_irq_restore(flags);

        if (!hv_result_success(ret)) {
                pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
                snp_cleanup_vmsa(vmsa);
                vmsa = NULL;
        }

        cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
        /* Free up any previous VMSA page */
        if (cur_vmsa)
                snp_cleanup_vmsa(cur_vmsa);

        /* Record the current VMSA page */
        per_cpu(hv_sev_vmsa, cpu) = vmsa;

        return ret;
}

u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2)
{
        u64 hv_status;

        register u64 __r8 asm("r8") = param2;
        asm volatile("vmmcall"
                     : "=a" (hv_status),
                       "+c" (control), "+d" (param1), "+r" (__r8)
                     : : "cc", "memory", "r9", "r10", "r11");

        return hv_status;
}

#else
static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
u64 hv_snp_hypercall(u64 control, u64 param1, u64 param2) { return U64_MAX; }
#endif /* CONFIG_AMD_MEM_ENCRYPT */

#ifdef CONFIG_INTEL_TDX_GUEST
static void hv_tdx_msr_write(u64 msr, u64 val)
{
        struct tdx_module_args args = {
                .r10 = TDX_HYPERCALL_STANDARD,
                .r11 = EXIT_REASON_MSR_WRITE,
                .r12 = msr,
                .r13 = val,
        };

        u64 ret = __tdx_hypercall(&args);

        WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
}

static void hv_tdx_msr_read(u64 msr, u64 *val)
{
        struct tdx_module_args args = {
                .r10 = TDX_HYPERCALL_STANDARD,
                .r11 = EXIT_REASON_MSR_READ,
                .r12 = msr,
        };

        u64 ret = __tdx_hypercall(&args);

        if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
                *val = 0;
        else
                *val = args.r11;
}

u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
{
        struct tdx_module_args args = { };

        args.r10 = control;
        args.rdx = param1;
        args.r8  = param2;

        (void)__tdx_hypercall(&args);

        return args.r11;
}

#else
static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2) { return U64_MAX; }
#endif /* CONFIG_INTEL_TDX_GUEST */

#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
void hv_ivm_msr_write(u64 msr, u64 value)
{
        if (!ms_hyperv.paravisor_present)
                return;

        if (hv_isolation_type_tdx())
                hv_tdx_msr_write(msr, value);
        else if (hv_isolation_type_snp())
                hv_ghcb_msr_write(msr, value);
}

void hv_ivm_msr_read(u64 msr, u64 *value)
{
        if (!ms_hyperv.paravisor_present)
                return;

        if (hv_isolation_type_tdx())
                hv_tdx_msr_read(msr, value);
        else if (hv_isolation_type_snp())
                hv_ghcb_msr_read(msr, value);
}

/*
 * Keep track of the PFN regions which were shared with the host. The access
 * must be revoked upon kexec/kdump (see hv_ivm_clear_host_access()).
 */
struct hv_enc_pfn_region {
        struct list_head list;
        u64 pfn;
        int count;
};

static LIST_HEAD(hv_list_enc);
static DEFINE_RAW_SPINLOCK(hv_list_enc_lock);

static int hv_list_enc_add(const u64 *pfn_list, int count)
{
        struct hv_enc_pfn_region *ent;
        unsigned long flags;
        u64 pfn;
        int i;

        for (i = 0; i < count; i++) {
                pfn = pfn_list[i];

                raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
                /* Check if the PFN already exists in some region first */
                list_for_each_entry(ent, &hv_list_enc, list) {
                        if ((ent->pfn <= pfn) && (ent->pfn + ent->count - 1 >= pfn))
                                /* Nothing to do - pfn is already in the list */
                                goto unlock_done;
                }

                /*
                 * Check if the PFN is adjacent to an existing region. Growing
                 * a region can make it adjacent to another one but merging is
                 * not (yet) implemented for simplicity. A PFN cannot be added
                 * to two regions to keep the logic in hv_list_enc_remove()
                 * correct.
                 */
                list_for_each_entry(ent, &hv_list_enc, list) {
                        if (ent->pfn + ent->count == pfn) {
                                /* Grow existing region up */
                                ent->count++;
                                goto unlock_done;
                        } else if (pfn + 1 == ent->pfn) {
                                /* Grow existing region down */
                                ent->pfn--;
                                ent->count++;
                                goto unlock_done;
                        }
                }
                raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);

                /* No adjacent region found -- create a new one */
                ent = kzalloc_obj(struct hv_enc_pfn_region);
                if (!ent)
                        return -ENOMEM;

                ent->pfn = pfn;
                ent->count = 1;

                raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
                list_add(&ent->list, &hv_list_enc);

unlock_done:
                raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
        }

        return 0;
}

static int hv_list_enc_remove(const u64 *pfn_list, int count)
{
        struct hv_enc_pfn_region *ent, *t;
        struct hv_enc_pfn_region new_region;
        unsigned long flags;
        u64 pfn;
        int i;

        for (i = 0; i < count; i++) {
                pfn = pfn_list[i];

                raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
                list_for_each_entry_safe(ent, t, &hv_list_enc, list) {
                        if (pfn == ent->pfn + ent->count - 1) {
                                /* Removing tail pfn */
                                ent->count--;
                                if (!ent->count) {
                                        list_del(&ent->list);
                                        kfree(ent);
                                }
                                goto unlock_done;
                        } else if (pfn == ent->pfn) {
                                /* Removing head pfn */
                                ent->count--;
                                ent->pfn++;
                                if (!ent->count) {
                                        list_del(&ent->list);
                                        kfree(ent);
                                }
                                goto unlock_done;
                        } else if (pfn > ent->pfn && pfn < ent->pfn + ent->count - 1) {
                                /*
                                 * Removing a pfn in the middle. Cut off the tail
                                 * of the existing region and create a template for
                                 * the new one.
                                 */
                                new_region.pfn = pfn + 1;
                                new_region.count = ent->count - (pfn - ent->pfn + 1);
                                ent->count = pfn - ent->pfn;
                                goto unlock_split;
                        }

                }
unlock_done:
                raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
                continue;

unlock_split:
                raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);

                ent = kzalloc_obj(struct hv_enc_pfn_region);
                if (!ent)
                        return -ENOMEM;

                ent->pfn = new_region.pfn;
                ent->count = new_region.count;

                raw_spin_lock_irqsave(&hv_list_enc_lock, flags);
                list_add(&ent->list, &hv_list_enc);
                raw_spin_unlock_irqrestore(&hv_list_enc_lock, flags);
        }

        return 0;
}

/* Stop new private<->shared conversions */
static void hv_vtom_kexec_begin(void)
{
        if (!IS_ENABLED(CONFIG_KEXEC_CORE))
                return;

        /*
         * Crash kernel reaches here with interrupts disabled: can't wait for
         * conversions to finish.
         *
         * If race happened, just report and proceed.
         */
        if (!set_memory_enc_stop_conversion())
                pr_warn("Failed to stop shared<->private conversions\n");
}

static void hv_vtom_kexec_finish(void)
{
        struct hv_gpa_range_for_visibility *input;
        struct hv_enc_pfn_region *ent;
        unsigned long flags;
        u64 hv_status;
        int cur, i;

        local_irq_save(flags);
        input = *this_cpu_ptr(hyperv_pcpu_input_arg);

        if (unlikely(!input))
                goto out;

        list_for_each_entry(ent, &hv_list_enc, list) {
                for (i = 0, cur = 0; i < ent->count; i++) {
                        input->gpa_page_list[cur] = ent->pfn + i;
                        cur++;

                        if (cur == HV_MAX_MODIFY_GPA_REP_COUNT || i == ent->count - 1) {
                                input->partition_id = HV_PARTITION_ID_SELF;
                                input->host_visibility = VMBUS_PAGE_NOT_VISIBLE;
                                input->reserved0 = 0;
                                input->reserved1 = 0;
                                hv_status = hv_do_rep_hypercall(
                                        HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY,
                                        cur, 0, input, NULL);
                                WARN_ON_ONCE(!hv_result_success(hv_status));
                                cur = 0;
                        }
                }

        }

out:
        local_irq_restore(flags);
}

/*
 * hv_mark_gpa_visibility - Set pages visible to host via hvcall.
 *
 * In Isolation VM, all guest memory is encrypted from host and guest
 * needs to set memory visible to host via hvcall before sharing memory
 * with host.
 */
static int hv_mark_gpa_visibility(u16 count, const u64 pfn[],
                           enum hv_mem_host_visibility visibility)
{
        struct hv_gpa_range_for_visibility *input;
        u64 hv_status;
        unsigned long flags;
        int ret;

        /* no-op if partition isolation is not enabled */
        if (!hv_is_isolation_supported())
                return 0;

        if (count > HV_MAX_MODIFY_GPA_REP_COUNT) {
                pr_err("Hyper-V: GPA count:%d exceeds supported:%lu\n", count,
                        HV_MAX_MODIFY_GPA_REP_COUNT);
                return -EINVAL;
        }

        if (visibility == VMBUS_PAGE_NOT_VISIBLE)
                ret = hv_list_enc_remove(pfn, count);
        else
                ret = hv_list_enc_add(pfn, count);
        if (ret)
                return ret;

        local_irq_save(flags);
        input = *this_cpu_ptr(hyperv_pcpu_input_arg);

        if (unlikely(!input)) {
                local_irq_restore(flags);
                return -EINVAL;
        }

        input->partition_id = HV_PARTITION_ID_SELF;
        input->host_visibility = visibility;
        input->reserved0 = 0;
        input->reserved1 = 0;
        memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn));
        hv_status = hv_do_rep_hypercall(
                        HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count,
                        0, input, NULL);
        local_irq_restore(flags);

        if (hv_result_success(hv_status))
                return 0;

        if (visibility == VMBUS_PAGE_NOT_VISIBLE)
                ret = hv_list_enc_add(pfn, count);
        else
                ret = hv_list_enc_remove(pfn, count);
        /*
         * There's no good way to recover from -ENOMEM here, the accounting is
         * wrong either way.
         */
        WARN_ON_ONCE(ret);

        return -EFAULT;
}

/*
 * When transitioning memory between encrypted and decrypted, the caller
 * of set_memory_encrypted() or set_memory_decrypted() is responsible for
 * ensuring that the memory isn't in use and isn't referenced while the
 * transition is in progress.  The transition has multiple steps, and the
 * memory is in an inconsistent state until all steps are complete. A
 * reference while the state is inconsistent could result in an exception
 * that can't be cleanly fixed up.
 *
 * But the Linux kernel load_unaligned_zeropad() mechanism could cause a
 * stray reference that can't be prevented by the caller, so Linux has
 * specific code to handle this case. But when the #VC and #VE exceptions
 * routed to a paravisor, the specific code doesn't work. To avoid this
 * problem, mark the pages as "not present" while the transition is in
 * progress. If load_unaligned_zeropad() causes a stray reference, a normal
 * page fault is generated instead of #VC or #VE, and the page-fault-based
 * handlers for load_unaligned_zeropad() resolve the reference.  When the
 * transition is complete, hv_vtom_set_host_visibility() marks the pages
 * as "present" again.
 */
static int hv_vtom_clear_present(unsigned long kbuffer, int pagecount, bool enc)
{
        return set_memory_np(kbuffer, pagecount);
}

/*
 * hv_vtom_set_host_visibility - Set specified memory visible to host.
 *
 * In Isolation VM, all guest memory is encrypted from host and guest
 * needs to set memory visible to host via hvcall before sharing memory
 * with host. This function works as wrap of hv_mark_gpa_visibility()
 * with memory base and size.
 */
static int hv_vtom_set_host_visibility(unsigned long kbuffer, int pagecount, bool enc)
{
        enum hv_mem_host_visibility visibility = enc ?
                        VMBUS_PAGE_NOT_VISIBLE : VMBUS_PAGE_VISIBLE_READ_WRITE;
        u64 *pfn_array;
        phys_addr_t paddr;
        int i, pfn, err;
        void *vaddr;
        int ret = 0;

        pfn_array = kmalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
        if (!pfn_array) {
                ret = -ENOMEM;
                goto err_set_memory_p;
        }

        for (i = 0, pfn = 0; i < pagecount; i++) {
                /*
                 * Use slow_virt_to_phys() because the PRESENT bit has been
                 * temporarily cleared in the PTEs.  slow_virt_to_phys() works
                 * without the PRESENT bit while virt_to_hvpfn() or similar
                 * does not.
                 */
                vaddr = (void *)kbuffer + (i * HV_HYP_PAGE_SIZE);
                paddr = slow_virt_to_phys(vaddr);
                pfn_array[pfn] = paddr >> HV_HYP_PAGE_SHIFT;
                pfn++;

                if (pfn == HV_MAX_MODIFY_GPA_REP_COUNT || i == pagecount - 1) {
                        ret = hv_mark_gpa_visibility(pfn, pfn_array,
                                                     visibility);
                        if (ret)
                                goto err_free_pfn_array;
                        pfn = 0;
                }
        }

err_free_pfn_array:
        kfree(pfn_array);

err_set_memory_p:
        /*
         * Set the PTE PRESENT bits again to revert what hv_vtom_clear_present()
         * did. Do this even if there is an error earlier in this function in
         * order to avoid leaving the memory range in a "broken" state. Setting
         * the PRESENT bits shouldn't fail, but return an error if it does.
         */
        err = set_memory_p(kbuffer, pagecount);
        if (err && !ret)
                ret = err;

        return ret;
}

static bool hv_vtom_tlb_flush_required(bool private)
{
        /*
         * Since hv_vtom_clear_present() marks the PTEs as "not present"
         * and flushes the TLB, they can't be in the TLB. That makes the
         * flush controlled by this function redundant, so return "false".
         */
        return false;
}

static bool hv_vtom_cache_flush_required(void)
{
        return false;
}

static bool hv_is_private_mmio(u64 addr)
{
        /*
         * Hyper-V always provides a single IO-APIC in a guest VM.
         * When a paravisor is used, it is emulated by the paravisor
         * in the guest context and must be mapped private.
         */
        if (addr >= HV_IOAPIC_BASE_ADDRESS &&
            addr < (HV_IOAPIC_BASE_ADDRESS + PAGE_SIZE))
                return true;

        /* Same with a vTPM */
        if (addr >= VTPM_BASE_ADDRESS &&
            addr < (VTPM_BASE_ADDRESS + PAGE_SIZE))
                return true;

        return false;
}

void __init hv_vtom_init(void)
{
        enum hv_isolation_type type = hv_get_isolation_type();

        switch (type) {
        case HV_ISOLATION_TYPE_VBS:
                fallthrough;
        /*
         * By design, a VM using vTOM doesn't see the SEV setting,
         * so SEV initialization is bypassed and sev_status isn't set.
         * Set it here to indicate a vTOM VM.
         *
         * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
         * defined as 0ULL, to which we can't assigned a value.
         */
#ifdef CONFIG_AMD_MEM_ENCRYPT
        case HV_ISOLATION_TYPE_SNP:
                sev_status = MSR_AMD64_SNP_VTOM;
                cc_vendor = CC_VENDOR_AMD;
                break;
#endif

        case HV_ISOLATION_TYPE_TDX:
                cc_vendor = CC_VENDOR_INTEL;
                break;

        default:
                panic("hv_vtom_init: unsupported isolation type %d\n", type);
        }

        cc_set_mask(ms_hyperv.shared_gpa_boundary);
        physical_mask &= ms_hyperv.shared_gpa_boundary - 1;

        x86_platform.hyper.is_private_mmio = hv_is_private_mmio;
        x86_platform.guest.enc_cache_flush_required = hv_vtom_cache_flush_required;
        x86_platform.guest.enc_tlb_flush_required = hv_vtom_tlb_flush_required;
        x86_platform.guest.enc_status_change_prepare = hv_vtom_clear_present;
        x86_platform.guest.enc_status_change_finish = hv_vtom_set_host_visibility;
        x86_platform.guest.enc_kexec_begin = hv_vtom_kexec_begin;
        x86_platform.guest.enc_kexec_finish = hv_vtom_kexec_finish;

        /* Set WB as the default cache mode. */
        guest_force_mtrr_state(NULL, 0, MTRR_TYPE_WRBACK);
}

#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */

enum hv_isolation_type hv_get_isolation_type(void)
{
        if (!(ms_hyperv.priv_high & HV_ISOLATION))
                return HV_ISOLATION_TYPE_NONE;
        return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
}
EXPORT_SYMBOL_GPL(hv_get_isolation_type);

/*
 * hv_is_isolation_supported - Check system runs in the Hyper-V
 * isolation VM.
 */
bool hv_is_isolation_supported(void)
{
        if (!cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
                return false;

        if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
                return false;

        return hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE;
}

DEFINE_STATIC_KEY_FALSE(isolation_type_snp);

/*
 * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based
 * isolation VM.
 */
bool hv_isolation_type_snp(void)
{
        return static_branch_unlikely(&isolation_type_snp);
}

DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
/*
 * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
 * isolated VM.
 */
bool hv_isolation_type_tdx(void)
{
        return static_branch_unlikely(&isolation_type_tdx);
}