root/sys/dev/pci/drm/amd/amdgpu/vcn_v2_0.c
/*
 * Copyright 2018 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/firmware.h>
#include <drm/drm_drv.h>

#include "amdgpu.h"
#include "amdgpu_vcn.h"
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_pm.h"
#include "amdgpu_psp.h"
#include "mmsch_v2_0.h"
#include "vcn_v2_0.h"

#include "vcn/vcn_2_0_0_offset.h"
#include "vcn/vcn_2_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"

#define VCN_VID_SOC_ADDRESS_2_0                                 0x1fa00
#define VCN1_VID_SOC_ADDRESS_3_0                                0x48200
#define VCN1_AON_SOC_ADDRESS_3_0                                0x48000

#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET                        0x1fd
#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET                    0x503
#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET                  0x504
#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET                  0x505
#define mmUVD_NO_OP_INTERNAL_OFFSET                             0x53f
#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET                       0x54a
#define mmUVD_SCRATCH9_INTERNAL_OFFSET                          0xc01d

#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET                   0x1e1
#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET         0x5a6
#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET          0x5a7
#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET                       0x1e2

static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_0[] = {
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK),
        SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE)
};

static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
                                 enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
                                   struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst);

/**
 * vcn_v2_0_early_init - set function pointers and load microcode
 *
 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 *
 * Set ring and irq function pointers
 * Load microcode from filesystem
 */
static int vcn_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;

        if (amdgpu_sriov_vf(adev))
                adev->vcn.inst[0].num_enc_rings = 1;
        else
                adev->vcn.inst[0].num_enc_rings = 2;

        adev->vcn.inst->set_pg_state = vcn_v2_0_set_pg_state;
        vcn_v2_0_set_dec_ring_funcs(adev);
        vcn_v2_0_set_enc_ring_funcs(adev);
        vcn_v2_0_set_irq_funcs(adev);

        return amdgpu_vcn_early_init(adev, 0);
}

/**
 * vcn_v2_0_sw_init - sw init for VCN block
 *
 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 *
 * Load firmware and sw initialization
 */
static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_ring *ring;
        int i, r;
        struct amdgpu_device *adev = ip_block->adev;
        struct amdgpu_fw_shared *fw_shared;

        /* VCN DEC TRAP */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
                              VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT,
                              &adev->vcn.inst->irq);
        if (r)
                return r;

        /* VCN ENC TRAP */
        for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
                                      i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE,
                                      &adev->vcn.inst->irq);
                if (r)
                        return r;
        }

        r = amdgpu_vcn_sw_init(adev, 0);
        if (r)
                return r;

        amdgpu_vcn_setup_ucode(adev, 0);

        r = amdgpu_vcn_resume(adev, 0);
        if (r)
                return r;

        ring = &adev->vcn.inst->ring_dec;

        ring->use_doorbell = true;
        ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
        ring->vm_hub = AMDGPU_MMHUB0(0);

        snprintf(ring->name, sizeof(ring->name), "vcn_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
                             AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;

        adev->vcn.inst[0].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
        adev->vcn.inst[0].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
        adev->vcn.inst[0].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
        adev->vcn.inst[0].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
        adev->vcn.inst[0].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
        adev->vcn.inst[0].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;

        adev->vcn.inst[0].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
        adev->vcn.inst->external.scratch9 = SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9);
        adev->vcn.inst[0].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
        adev->vcn.inst->external.data0 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0);
        adev->vcn.inst[0].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
        adev->vcn.inst->external.data1 = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1);
        adev->vcn.inst[0].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
        adev->vcn.inst->external.cmd = SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD);
        adev->vcn.inst[0].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
        adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);

        for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
                enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);

                ring = &adev->vcn.inst->ring_enc[i];
                ring->use_doorbell = true;
                ring->vm_hub = AMDGPU_MMHUB0(0);
                if (!amdgpu_sriov_vf(adev))
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
                else
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
                snprintf(ring->name, sizeof(ring->name), "vcn_enc%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
                                     hw_prio, NULL);
                if (r)
                        return r;
        }

        adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
        adev->vcn.inst[0].reset = vcn_v2_0_reset;

        adev->vcn.supported_reset =
                amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
        if (!amdgpu_sriov_vf(adev))
                adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;

        r = amdgpu_virt_alloc_mm_table(adev);
        if (r)
                return r;

        fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
        fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);

        if (amdgpu_vcnfw_log)
                amdgpu_vcn_fwlog_init(adev->vcn.inst);

        r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
        if (r)
                return r;

        r = amdgpu_vcn_sysfs_reset_mask_init(adev);
        if (r)
                return r;

        return 0;
}

/**
 * vcn_v2_0_sw_fini - sw fini for VCN block
 *
 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 *
 * VCN suspend and free up sw allocation
 */
static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
        int r, idx;
        struct amdgpu_device *adev = ip_block->adev;
        struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;

        if (drm_dev_enter(adev_to_drm(adev), &idx)) {
                fw_shared->present_flag_0 = 0;
                drm_dev_exit(idx);
        }

        amdgpu_virt_free_mm_table(adev);

        r = amdgpu_vcn_suspend(adev, 0);
        if (r)
                return r;

        amdgpu_vcn_sysfs_reset_mask_fini(adev);

        amdgpu_vcn_sw_fini(adev, 0);

        return 0;
}

/**
 * vcn_v2_0_hw_init - start and test VCN block
 *
 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 *
 * Initialize the hardware, boot up the VCPU and do some testing
 */
static int vcn_v2_0_hw_init(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        int i, r;

        adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
                                             ring->doorbell_index, 0);

        if (amdgpu_sriov_vf(adev))
                vcn_v2_0_start_sriov(adev);

        r = amdgpu_ring_test_helper(ring);
        if (r)
                return r;

        //Disable vcn decode for sriov
        if (amdgpu_sriov_vf(adev))
                ring->sched.ready = false;

        for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
                ring = &adev->vcn.inst->ring_enc[i];
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
        }

        return 0;
}

/**
 * vcn_v2_0_hw_fini - stop the hardware block
 *
 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 *
 * Stop the VCN block, mark ring as not ready any more
 */
static int vcn_v2_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;
        struct amdgpu_vcn_inst *vinst = adev->vcn.inst;

        cancel_delayed_work_sync(&vinst->idle_work);

        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
            (vinst->cur_state != AMD_PG_STATE_GATE &&
             RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
                vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);

        return 0;
}

/**
 * vcn_v2_0_suspend - suspend VCN block
 *
 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 *
 * HW fini and suspend VCN block
 */
static int vcn_v2_0_suspend(struct amdgpu_ip_block *ip_block)
{
        int r;

        r = vcn_v2_0_hw_fini(ip_block);
        if (r)
                return r;

        r = amdgpu_vcn_suspend(ip_block->adev, 0);

        return r;
}

/**
 * vcn_v2_0_resume - resume VCN block
 *
 * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
 *
 * Resume firmware and hw init VCN block
 */
static int vcn_v2_0_resume(struct amdgpu_ip_block *ip_block)
{
        int r;

        r = amdgpu_vcn_resume(ip_block->adev, 0);
        if (r)
                return r;

        r = vcn_v2_0_hw_init(ip_block);

        return r;
}

/**
 * vcn_v2_0_mc_resume - memory controller programming
 *
 * @vinst: Pointer to the VCN instance structure
 *
 * Let the VCN memory controller know it's offsets
 */
static void vcn_v2_0_mc_resume(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
        uint32_t offset;

        if (amdgpu_sriov_vf(adev))
                return;

        /* cache window 0: fw */
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                        (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
                        (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
                offset = 0;
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                        lower_32_bits(adev->vcn.inst->gpu_addr));
                WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
                        upper_32_bits(adev->vcn.inst->gpu_addr));
                offset = size;
                WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
                        AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
        }

        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);

        /* cache window 1: stack */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
                lower_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
                upper_32_bits(adev->vcn.inst->gpu_addr + offset));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);

        /* cache window 2: context */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
                lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
                upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);

        /* non-cache window */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
                lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
                upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
        WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
                AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));

        WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}

static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst,
                                        bool indirect)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);
        uint32_t offset;

        /* cache window 0: fw */
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                if (!indirect) {
                        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                                UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0, indirect);
                        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                                UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0, indirect);
                        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                                UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
                } else {
                        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                                UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
                        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                                UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
                        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                                UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
                }
                offset = 0;
        } else {
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst->gpu_addr), 0, indirect);
                offset = size;
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
                        AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
        }

        if (!indirect)
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
        else
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);

        /* cache window 1: stack */
        if (!indirect) {
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0, indirect);
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        } else {
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
                WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                        UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
        }
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);

        /* cache window 2: context */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
                lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
                upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);

        /* non-cache window */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
                lower_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
                upper_32_bits(adev->vcn.inst->fw_shared.gpu_addr), 0, indirect);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
                AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);

        /* VCN global tiling registers */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}

/**
 * vcn_v2_0_disable_clock_gating - disable VCN clock gating
 *
 * @vinst: VCN instance
 *
 * Disable clock gating for VCN block
 */
static void vcn_v2_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t data;

        if (amdgpu_sriov_vf(adev))
                return;

        /* UVD disable CGC */
        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
                data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
        else
                data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
        data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
        data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);

        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_GATE);
        data &= ~(UVD_CGC_GATE__SYS_MASK
                | UVD_CGC_GATE__UDEC_MASK
                | UVD_CGC_GATE__MPEG2_MASK
                | UVD_CGC_GATE__REGS_MASK
                | UVD_CGC_GATE__RBC_MASK
                | UVD_CGC_GATE__LMI_MC_MASK
                | UVD_CGC_GATE__LMI_UMC_MASK
                | UVD_CGC_GATE__IDCT_MASK
                | UVD_CGC_GATE__MPRD_MASK
                | UVD_CGC_GATE__MPC_MASK
                | UVD_CGC_GATE__LBSI_MASK
                | UVD_CGC_GATE__LRBBM_MASK
                | UVD_CGC_GATE__UDEC_RE_MASK
                | UVD_CGC_GATE__UDEC_CM_MASK
                | UVD_CGC_GATE__UDEC_IT_MASK
                | UVD_CGC_GATE__UDEC_DB_MASK
                | UVD_CGC_GATE__UDEC_MP_MASK
                | UVD_CGC_GATE__WCB_MASK
                | UVD_CGC_GATE__VCPU_MASK
                | UVD_CGC_GATE__SCPU_MASK);
        WREG32_SOC15(VCN, 0, mmUVD_CGC_GATE, data);

        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
        data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
                | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
                | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
                | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
                | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
                | UVD_CGC_CTRL__SYS_MODE_MASK
                | UVD_CGC_CTRL__UDEC_MODE_MASK
                | UVD_CGC_CTRL__MPEG2_MODE_MASK
                | UVD_CGC_CTRL__REGS_MODE_MASK
                | UVD_CGC_CTRL__RBC_MODE_MASK
                | UVD_CGC_CTRL__LMI_MC_MODE_MASK
                | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
                | UVD_CGC_CTRL__IDCT_MODE_MASK
                | UVD_CGC_CTRL__MPRD_MODE_MASK
                | UVD_CGC_CTRL__MPC_MODE_MASK
                | UVD_CGC_CTRL__LBSI_MODE_MASK
                | UVD_CGC_CTRL__LRBBM_MODE_MASK
                | UVD_CGC_CTRL__WCB_MODE_MASK
                | UVD_CGC_CTRL__VCPU_MODE_MASK
                | UVD_CGC_CTRL__SCPU_MODE_MASK);
        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);

        /* turn on */
        data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE);
        data |= (UVD_SUVD_CGC_GATE__SRE_MASK
                | UVD_SUVD_CGC_GATE__SIT_MASK
                | UVD_SUVD_CGC_GATE__SMP_MASK
                | UVD_SUVD_CGC_GATE__SCM_MASK
                | UVD_SUVD_CGC_GATE__SDB_MASK
                | UVD_SUVD_CGC_GATE__SRE_H264_MASK
                | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
                | UVD_SUVD_CGC_GATE__SIT_H264_MASK
                | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
                | UVD_SUVD_CGC_GATE__SCM_H264_MASK
                | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
                | UVD_SUVD_CGC_GATE__SDB_H264_MASK
                | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
                | UVD_SUVD_CGC_GATE__SCLR_MASK
                | UVD_SUVD_CGC_GATE__UVD_SC_MASK
                | UVD_SUVD_CGC_GATE__ENT_MASK
                | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
                | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
                | UVD_SUVD_CGC_GATE__SITE_MASK
                | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
                | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
                | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
                | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
                | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
        WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_GATE, data);

        data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
        data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
                | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
                | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
                | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
        WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}

static void vcn_v2_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst,
                uint8_t sram_sel, uint8_t indirect)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t reg_data = 0;

        /* enable sw clock gating control */
        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
                reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
        else
                reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
        reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
        reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
        reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
                 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
                 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
                 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
                 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
                 UVD_CGC_CTRL__SYS_MODE_MASK |
                 UVD_CGC_CTRL__UDEC_MODE_MASK |
                 UVD_CGC_CTRL__MPEG2_MODE_MASK |
                 UVD_CGC_CTRL__REGS_MODE_MASK |
                 UVD_CGC_CTRL__RBC_MODE_MASK |
                 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
                 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
                 UVD_CGC_CTRL__IDCT_MODE_MASK |
                 UVD_CGC_CTRL__MPRD_MODE_MASK |
                 UVD_CGC_CTRL__MPC_MODE_MASK |
                 UVD_CGC_CTRL__LBSI_MODE_MASK |
                 UVD_CGC_CTRL__LRBBM_MODE_MASK |
                 UVD_CGC_CTRL__WCB_MODE_MASK |
                 UVD_CGC_CTRL__VCPU_MODE_MASK |
                 UVD_CGC_CTRL__SCPU_MODE_MASK);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);

        /* turn off clock gating */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);

        /* turn on SUVD clock gating */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);

        /* turn on sw mode in UVD_SUVD_CGC_CTRL */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}

/**
 * vcn_v2_0_enable_clock_gating - enable VCN clock gating
 *
 * @vinst: VCN instance
 *
 * Enable clock gating for VCN block
 */
static void vcn_v2_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t data = 0;

        if (amdgpu_sriov_vf(adev))
                return;

        /* enable UVD CGC */
        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
        if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
                data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
        else
                data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
        data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
        data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);

        data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
        data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
                | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
                | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
                | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
                | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
                | UVD_CGC_CTRL__SYS_MODE_MASK
                | UVD_CGC_CTRL__UDEC_MODE_MASK
                | UVD_CGC_CTRL__MPEG2_MODE_MASK
                | UVD_CGC_CTRL__REGS_MODE_MASK
                | UVD_CGC_CTRL__RBC_MODE_MASK
                | UVD_CGC_CTRL__LMI_MC_MODE_MASK
                | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
                | UVD_CGC_CTRL__IDCT_MODE_MASK
                | UVD_CGC_CTRL__MPRD_MODE_MASK
                | UVD_CGC_CTRL__MPC_MODE_MASK
                | UVD_CGC_CTRL__LBSI_MODE_MASK
                | UVD_CGC_CTRL__LRBBM_MODE_MASK
                | UVD_CGC_CTRL__WCB_MODE_MASK
                | UVD_CGC_CTRL__VCPU_MODE_MASK
                | UVD_CGC_CTRL__SCPU_MODE_MASK);
        WREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL, data);

        data = RREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL);
        data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
                | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
                | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
                | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
                | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
        WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data);
}

static void vcn_v2_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t data = 0;

        if (amdgpu_sriov_vf(adev))
                return;

        if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
                data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);

                WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
                SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS,
                        UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0, 0xFFFFF);
        } else {
                data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
                        | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);
                WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);
                SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0,  0xFFFFF);
        }

        /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS,
         * UVDU_PWR_STATUS are 0 (power on) */

        data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
        data &= ~0x103;
        if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
                data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
                        UVD_POWER_STATUS__UVD_PG_EN_MASK;

        WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);
}

static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t data = 0;

        if (amdgpu_sriov_vf(adev))
                return;

        if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
                /* Before power off, this indicator has to be turned on */
                data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
                data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
                data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
                WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data);


                data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
                        | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT);

                WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data);

                data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
                        | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT);
                SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFF);
        }
}

static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
        struct amdgpu_device *adev = vinst->adev;
        struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        int ret;

        vcn_v2_0_enable_static_power_gating(vinst);

        /* enable dynamic power gating mode */
        tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS);
        tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
        tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
        WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp);

        if (indirect)
                adev->vcn.inst->dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst->dpg_sram_cpu_addr;

        /* enable clock gating */
        vcn_v2_0_clock_gating_dpg_mode(vinst, 0, indirect);

        /* enable VCPU clock */
        tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
        tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
        tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK;
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);

        /* disable master interupt */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);

        /* setup mmUVD_LMI_CTRL */
        tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
                UVD_LMI_CTRL__REQ_MODE_MASK |
                UVD_LMI_CTRL__CRC_RESET_MASK |
                UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
                UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
                UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
                (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
                0x00100000L);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);

        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_MPC_CNTL),
                0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);

        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_MPC_SET_MUXA0),
                ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
                 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
                 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);

        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_MPC_SET_MUXB0),
                ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
                 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
                 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);

        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_MPC_SET_MUX),
                ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
                 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);

        vcn_v2_0_mc_resume_dpg_mode(vinst, indirect);

        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);

        /* release VCPU reset to boot */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_SOFT_RESET), 0, 0, indirect);

        /* enable LMI MC and UMC channels */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_LMI_CTRL2),
                0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0, indirect);

        /* enable master interrupt */
        WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
                UVD, 0, mmUVD_MASTINT_EN),
                UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);

        if (indirect) {
                ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
                if (ret) {
                        dev_err(adev->dev, "vcn sram load failed %d\n", ret);
                        return ret;
                }
        }

        /* force RBC into idle state */
        rb_bufsz = order_base_2(ring->ring_size);
        tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);

        /* Stall DPG before WPTR/RPTR reset */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
                UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
                ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
        fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;

        /* set the write pointer delay */
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);

        /* set the wb address */
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
                (upper_32_bits(ring->gpu_addr) >> 2));

        /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
                upper_32_bits(ring->gpu_addr));

        /* Initialize the ring buffer's read and write pointers */
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);

        WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0);

        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                lower_32_bits(ring->wptr));

        fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
        /* Unstall DPG */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
                0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);

        /* Keeping one read-back to ensure all register writes are done,
         * otherwise it may introduce race conditions.
         */
        RREG32_SOC15(UVD, 0, mmUVD_STATUS);

        return 0;
}

static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
        struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
        uint32_t rb_bufsz, tmp;
        uint32_t lmi_swap_cntl;
        int i, j, r;

        if (adev->pm.dpm_enabled)
                amdgpu_dpm_enable_vcn(adev, true, 0);

        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
                return vcn_v2_0_start_dpg_mode(vinst, adev->vcn.inst->indirect_sram);

        vcn_v2_0_disable_static_power_gating(vinst);

        /* set uvd status busy */
        tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
        WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp);

        /*SW clock gating */
        vcn_v2_0_disable_clock_gating(vinst);

        /* enable VCPU clock */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
                UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);

        /* disable master interrupt */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
                ~UVD_MASTINT_EN__VCPU_EN_MASK);

        /* setup mmUVD_LMI_CTRL */
        tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL);
        WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp |
                UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
                UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
                UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
                UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);

        /* setup mmUVD_MPC_CNTL */
        tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL);
        tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
        tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
        WREG32_SOC15(VCN, 0, mmUVD_MPC_CNTL, tmp);

        /* setup UVD_MPC_SET_MUXA0 */
        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0,
                ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
                (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
                (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
                (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));

        /* setup UVD_MPC_SET_MUXB0 */
        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0,
                ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
                (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
                (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
                (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));

        /* setup mmUVD_MPC_SET_MUX */
        WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX,
                ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
                (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
                (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));

        vcn_v2_0_mc_resume(vinst);

        /* release VCPU reset to boot */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
                ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);

        /* enable LMI MC and UMC channels */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
                ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);

        tmp = RREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET);
        tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
        tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
        WREG32_SOC15(VCN, 0, mmUVD_SOFT_RESET, tmp);

        /* disable byte swapping */
        lmi_swap_cntl = 0;
#ifdef __BIG_ENDIAN
        /* swap (8 in 32) RB and IB */
        lmi_swap_cntl = 0xa;
#endif
        WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);

        for (i = 0; i < 10; ++i) {
                uint32_t status;

                for (j = 0; j < 100; ++j) {
                        status = RREG32_SOC15(UVD, 0, mmUVD_STATUS);
                        if (status & 2)
                                break;
                        mdelay(10);
                }
                r = 0;
                if (status & 2)
                        break;

                DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
                WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
                        UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
                        ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
                mdelay(10);
                WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
                        ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
                mdelay(10);
                r = -1;
        }

        if (r) {
                DRM_ERROR("VCN decode not responding, giving up!!!\n");
                return r;
        }

        /* enable master interrupt */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
                UVD_MASTINT_EN__VCPU_EN_MASK,
                ~UVD_MASTINT_EN__VCPU_EN_MASK);

        /* clear the busy bit of VCN_STATUS */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
                ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));

        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_VMID, 0);

        /* force RBC into idle state */
        rb_bufsz = order_base_2(ring->ring_size);
        tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
        tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);

        fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
        /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
                upper_32_bits(ring->gpu_addr));

        /* Initialize the ring buffer's read and write pointers */
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0);

        ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                        lower_32_bits(ring->wptr));
        fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;

        fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
        ring = &adev->vcn.inst->ring_enc[0];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
        fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;

        fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
        ring = &adev->vcn.inst->ring_enc[1];
        WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
        WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
        fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;

        /* Keeping one read-back to ensure all register writes are done,
         * otherwise it may introduce race conditions.
         */
        RREG32_SOC15(UVD, 0, mmUVD_STATUS);

        return 0;
}

static int vcn_v2_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
        uint32_t tmp;

        vcn_v2_0_pause_dpg_mode(vinst, &state);
        /* Wait for power status to be 1 */
        SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

        /* wait for read ptr to be equal to write ptr */
        tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
        SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);

        tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
        SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);

        tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
        SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);

        SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

        /* disable dynamic power gating mode */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
                        ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);

        /* Keeping one read-back to ensure all register writes are done,
         * otherwise it may introduce race conditions.
         */
        RREG32_SOC15(UVD, 0, mmUVD_STATUS);

        return 0;
}

static int vcn_v2_0_stop(struct amdgpu_vcn_inst *vinst)
{
        struct amdgpu_device *adev = vinst->adev;
        uint32_t tmp;
        int r;

        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
                r = vcn_v2_0_stop_dpg_mode(vinst);
                if (r)
                        return r;
                goto power_off;
        }

        /* wait for uvd idle */
        r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
        if (r)
                return r;

        tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
                UVD_LMI_STATUS__READ_CLEAN_MASK |
                UVD_LMI_STATUS__WRITE_CLEAN_MASK |
                UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
        r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
        if (r)
                return r;

        /* stall UMC channel */
        tmp = RREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2);
        tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
        WREG32_SOC15(VCN, 0, mmUVD_LMI_CTRL2, tmp);

        tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
                UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
        r = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_LMI_STATUS, tmp, tmp);
        if (r)
                return r;

        /* disable VCPU clock */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0,
                ~(UVD_VCPU_CNTL__CLK_EN_MASK));

        /* reset LMI UMC */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
                UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK,
                ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);

        /* reset LMI */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
                UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
                ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);

        /* reset VCPU */
        WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
                UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
                ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);

        /* clear status */
        WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0);

        vcn_v2_0_enable_clock_gating(vinst);
        vcn_v2_0_enable_static_power_gating(vinst);

        /* Keeping one read-back to ensure all register writes are done,
         * otherwise it may introduce race conditions.
         */
        RREG32_SOC15(VCN, 0, mmUVD_STATUS);

power_off:
        if (adev->pm.dpm_enabled)
                amdgpu_dpm_enable_vcn(adev, false, 0);

        return 0;
}

static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
                                   struct dpg_pause_state *new_state)
{
        struct amdgpu_device *adev = vinst->adev;
        int inst_idx = vinst->inst;
        struct amdgpu_ring *ring;
        uint32_t reg_data = 0;
        int ret_code;

        /* pause/unpause if state is changed */
        if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
                DRM_DEBUG("dpg pause state changed %d -> %d",
                        adev->vcn.inst[inst_idx].pause_state.fw_based,  new_state->fw_based);
                reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
                        (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);

                if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
                        ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, 0x1,
                                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);

                        if (!ret_code) {
                                struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
                                /* pause DPG */
                                reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
                                WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);

                                /* wait for ACK */
                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
                                           UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);

                                /* Stall DPG before WPTR/RPTR reset */
                                WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
                                           UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
                                           ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
                                /* Restore */
                                fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst->ring_enc[0];
                                ring->wptr = 0;
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
                                fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;

                                fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
                                ring = &adev->vcn.inst->ring_enc[1];
                                ring->wptr = 0;
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
                                WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
                                WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
                                fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;

                                fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
                                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
                                           RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
                                fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
                                /* Unstall DPG */
                                WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
                                           0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);

                                SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
                                           UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
                                           UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
                        }
                } else {
                        /* unpause dpg, no need to wait */
                        reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
                        WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
                }
                adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
        }

        return 0;
}

static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst)
{
        int r;

        r = vcn_v2_0_stop(vinst);
        if (r)
                return r;
        return vcn_v2_0_start(vinst);
}

static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;

        return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE);
}

static int vcn_v2_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;
        int ret;

        ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE,
                UVD_STATUS__IDLE);

        return ret;
}

static int vcn_v2_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
                                          enum amd_clockgating_state state)
{
        struct amdgpu_device *adev = ip_block->adev;
        bool enable = (state == AMD_CG_STATE_GATE);

        if (amdgpu_sriov_vf(adev))
                return 0;

        if (enable) {
                /* wait for STATUS to clear */
                if (!vcn_v2_0_is_idle(ip_block))
                        return -EBUSY;
                vcn_v2_0_enable_clock_gating(&adev->vcn.inst[0]);
        } else {
                /* disable HW gating and enable Sw gating */
                vcn_v2_0_disable_clock_gating(&adev->vcn.inst[0]);
        }
        return 0;
}

/**
 * vcn_v2_0_dec_ring_get_rptr - get read pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware read pointer
 */
static uint64_t vcn_v2_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
}

/**
 * vcn_v2_0_dec_ring_get_wptr - get write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware write pointer
 */
static uint64_t vcn_v2_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        if (ring->use_doorbell)
                return *ring->wptr_cpu_addr;
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR);
}

/**
 * vcn_v2_0_dec_ring_set_wptr - set write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Commits the write pointer to the hardware
 */
static void vcn_v2_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
                WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2,
                        lower_32_bits(ring->wptr) | 0x80000000);

        if (ring->use_doorbell) {
                *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
                WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
        } else {
                WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
        }
}

/**
 * vcn_v2_0_dec_ring_insert_start - insert a start command
 *
 * @ring: amdgpu_ring pointer
 *
 * Write a start command to the ring.
 */
void vcn_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
        amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
}

/**
 * vcn_v2_0_dec_ring_insert_end - insert a end command
 *
 * @ring: amdgpu_ring pointer
 *
 * Write a end command to the ring.
 */
void vcn_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[0].internal.cmd, 0));
        amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_END << 1));
}

/**
 * vcn_v2_0_dec_ring_insert_nop - insert a nop command
 *
 * @ring: amdgpu_ring pointer
 * @count: the number of NOP packets to insert
 *
 * Write a nop command to the ring.
 */
void vcn_v2_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
        struct amdgpu_device *adev = ring->adev;
        int i;

        WARN_ON(ring->wptr % 2 || count % 2);

        for (i = 0; i < count / 2; i++) {
                amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.nop, 0));
                amdgpu_ring_write(ring, 0);
        }
}

/**
 * vcn_v2_0_dec_ring_emit_fence - emit an fence & trap command
 *
 * @ring: amdgpu_ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Write a fence and a trap command to the ring.
 */
void vcn_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
                                unsigned flags)
{
        struct amdgpu_device *adev = ring->adev;

        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.context_id, 0));
        amdgpu_ring_write(ring, seq);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
        amdgpu_ring_write(ring, addr & 0xffffffff);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
        amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_FENCE << 1));

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
        amdgpu_ring_write(ring, 0);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
        amdgpu_ring_write(ring, 0);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));

        amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_TRAP << 1));
}

/**
 * vcn_v2_0_dec_ring_emit_ib - execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @job: job to retrieve vmid from
 * @ib: indirect buffer to execute
 * @flags: unused
 *
 * Write ring commands to execute the indirect buffer
 */
void vcn_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
                               struct amdgpu_job *job,
                               struct amdgpu_ib *ib,
                               uint32_t flags)
{
        struct amdgpu_device *adev = ring->adev;
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_vmid, 0));
        amdgpu_ring_write(ring, vmid);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_low, 0));
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_bar_high, 0));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.ib_size, 0));
        amdgpu_ring_write(ring, ib->length_dw);
}

void vcn_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
                                uint32_t val, uint32_t mask)
{
        struct amdgpu_device *adev = ring->adev;

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
        amdgpu_ring_write(ring, reg << 2);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
        amdgpu_ring_write(ring, val);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.gp_scratch8, 0));
        amdgpu_ring_write(ring, mask);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));

        amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_REG_READ_COND_WAIT << 1));
}

void vcn_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                unsigned vmid, uint64_t pd_addr)
{
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
        uint32_t data0, data1, mask;

        pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);

        /* wait for register write */
        data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance;
        data1 = lower_32_bits(pd_addr);
        mask = 0xffffffff;
        vcn_v2_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
}

void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
                                uint32_t reg, uint32_t val)
{
        struct amdgpu_device *adev = ring->adev;

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data0, 0));
        amdgpu_ring_write(ring, reg << 2);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.data1, 0));
        amdgpu_ring_write(ring, val);

        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));

        amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_WRITE_REG << 1));
}

/**
 * vcn_v2_0_enc_ring_get_rptr - get enc read pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware enc read pointer
 */
static uint64_t vcn_v2_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        if (ring == &adev->vcn.inst->ring_enc[0])
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR);
        else
                return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2);
}

 /**
 * vcn_v2_0_enc_ring_get_wptr - get enc write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Returns the current hardware enc write pointer
 */
static uint64_t vcn_v2_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        if (ring == &adev->vcn.inst->ring_enc[0]) {
                if (ring->use_doorbell)
                        return *ring->wptr_cpu_addr;
                else
                        return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
        } else {
                if (ring->use_doorbell)
                        return *ring->wptr_cpu_addr;
                else
                        return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
        }
}

 /**
 * vcn_v2_0_enc_ring_set_wptr - set enc write pointer
 *
 * @ring: amdgpu_ring pointer
 *
 * Commits the enc write pointer to the hardware
 */
static void vcn_v2_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        if (ring == &adev->vcn.inst->ring_enc[0]) {
                if (ring->use_doorbell) {
                        *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
                } else {
                        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
                }
        } else {
                if (ring->use_doorbell) {
                        *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
                        WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
                } else {
                        WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
                }
        }
}

/**
 * vcn_v2_0_enc_ring_emit_fence - emit an enc fence & trap command
 *
 * @ring: amdgpu_ring pointer
 * @addr: address
 * @seq: sequence number
 * @flags: fence related flags
 *
 * Write enc a fence and a trap command to the ring.
 */
void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
                                u64 seq, unsigned flags)
{
        WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);

        amdgpu_ring_write(ring, VCN_ENC_CMD_FENCE);
        amdgpu_ring_write(ring, addr);
        amdgpu_ring_write(ring, upper_32_bits(addr));
        amdgpu_ring_write(ring, seq);
        amdgpu_ring_write(ring, VCN_ENC_CMD_TRAP);
}

void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{
        amdgpu_ring_write(ring, VCN_ENC_CMD_END);
}

/**
 * vcn_v2_0_enc_ring_emit_ib - enc execute indirect buffer
 *
 * @ring: amdgpu_ring pointer
 * @job: job to retrive vmid from
 * @ib: indirect buffer to execute
 * @flags: unused
 *
 * Write enc ring commands to execute the indirect buffer
 */
void vcn_v2_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
                               struct amdgpu_job *job,
                               struct amdgpu_ib *ib,
                               uint32_t flags)
{
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);

        amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
        amdgpu_ring_write(ring, vmid);
        amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, ib->length_dw);
}

void vcn_v2_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
                                uint32_t val, uint32_t mask)
{
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
        amdgpu_ring_write(ring, reg << 2);
        amdgpu_ring_write(ring, mask);
        amdgpu_ring_write(ring, val);
}

void vcn_v2_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                unsigned int vmid, uint64_t pd_addr)
{
        struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];

        pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);

        /* wait for reg writes */
        vcn_v2_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 +
                                        vmid * hub->ctx_addr_distance,
                                        lower_32_bits(pd_addr), 0xffffffff);
}

void vcn_v2_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
{
        amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
        amdgpu_ring_write(ring, reg << 2);
        amdgpu_ring_write(ring, val);
}

static int vcn_v2_0_set_interrupt_state(struct amdgpu_device *adev,
                                        struct amdgpu_irq_src *source,
                                        unsigned type,
                                        enum amdgpu_interrupt_state state)
{
        return 0;
}

static int vcn_v2_0_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
{
        DRM_DEBUG("IH: VCN TRAP\n");

        switch (entry->src_id) {
        case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
                amdgpu_fence_process(&adev->vcn.inst->ring_dec);
                break;
        case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
                amdgpu_fence_process(&adev->vcn.inst->ring_enc[0]);
                break;
        case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
                amdgpu_fence_process(&adev->vcn.inst->ring_enc[1]);
                break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
                break;
        }

        return 0;
}

int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        uint32_t tmp = 0;
        unsigned i;
        int r;

        if (amdgpu_sriov_vf(adev))
                return 0;

        WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 4);
        if (r)
                return r;
        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0));
        amdgpu_ring_write(ring, VCN_DEC_KMD_CMD | (VCN_DEC_CMD_PACKET_START << 1));
        amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);
        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
                if (tmp == 0xDEADBEEF)
                        break;
                udelay(1);
        }

        if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;

        return r;
}


static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
                                 enum amd_powergating_state state)
{
        /* This doesn't actually powergate the VCN block.
         * That's done in the dpm code via the SMC.  This
         * just re-inits the block as necessary.  The actual
         * gating still happens in the dpm code.  We should
         * revisit this when there is a cleaner line between
         * the smc and the hw blocks
         */
        int ret;
        struct amdgpu_device *adev = vinst->adev;

        if (amdgpu_sriov_vf(adev)) {
                vinst->cur_state = AMD_PG_STATE_UNGATE;
                return 0;
        }

        if (state == vinst->cur_state)
                return 0;

        if (state == AMD_PG_STATE_GATE)
                ret = vcn_v2_0_stop(vinst);
        else
                ret = vcn_v2_0_start(vinst);

        if (!ret)
                vinst->cur_state = state;

        return ret;
}

static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
                                struct amdgpu_mm_table *table)
{
        uint32_t data = 0, loop;
        uint64_t addr = table->gpu_addr;
        struct mmsch_v2_0_init_header *header;
        uint32_t size;
        int i;

        header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
        size = header->header_size + header->vcn_table_size;

        /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
         * of memory descriptor location
         */
        WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
        WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));

        /* 2, update vmid of descriptor */
        data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
        data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
        /* use domain0 for MM scheduler */
        data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
        WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);

        /* 3, notify mmsch about the size of this descriptor */
        WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);

        /* 4, set resp to zero */
        WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);

        adev->vcn.inst->ring_dec.wptr = 0;
        adev->vcn.inst->ring_dec.wptr_old = 0;
        vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);

        for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i) {
                adev->vcn.inst->ring_enc[i].wptr = 0;
                adev->vcn.inst->ring_enc[i].wptr_old = 0;
                vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
        }

        /* 5, kick off the initialization and wait until
         * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
         */
        WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);

        data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
        loop = 1000;
        while ((data & 0x10000002) != 0x10000002) {
                udelay(10);
                data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
                loop--;
                if (!loop)
                        break;
        }

        if (!loop) {
                DRM_ERROR("failed to init MMSCH, " \
                        "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
                return -EBUSY;
        }

        return 0;
}

static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
{
        int r;
        uint32_t tmp;
        struct amdgpu_ring *ring;
        uint32_t offset, size;
        uint32_t table_size = 0;
        struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
        struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
        struct mmsch_v2_0_cmd_end end = { {0} };
        struct mmsch_v2_0_init_header *header;
        uint32_t *init_table = adev->virt.mm_table.cpu_addr;

        /* This path only programs VCN instance 0. */

        header = (struct mmsch_v2_0_init_header *)init_table;
        direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
        direct_rd_mod_wt.cmd_header.command_type =
                MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
        end.cmd_header.command_type = MMSCH_COMMAND__END;

        if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
                header->version = MMSCH_VERSION;
                header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;

                header->vcn_table_offset = header->header_size;

                init_table += header->vcn_table_offset;

                size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[0].fw->size + 4);

                MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS),
                        0xFFFFFFFF, 0x00000004);

                /* mc resume*/
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                        MMSCH_V2_0_INSERT_DIRECT_WT(
                                SOC15_REG_OFFSET(UVD, 0,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo);
                        MMSCH_V2_0_INSERT_DIRECT_WT(
                                SOC15_REG_OFFSET(UVD, 0,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi);
                        offset = 0;
                } else {
                        MMSCH_V2_0_INSERT_DIRECT_WT(
                                SOC15_REG_OFFSET(UVD, 0,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
                                lower_32_bits(adev->vcn.inst->gpu_addr));
                        MMSCH_V2_0_INSERT_DIRECT_WT(
                                SOC15_REG_OFFSET(UVD, 0,
                                        mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
                                upper_32_bits(adev->vcn.inst->gpu_addr));
                        offset = size;
                }

                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
                        0);
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0),
                        size);

                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0,
                                mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst->gpu_addr + offset));
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0,
                                mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst->gpu_addr + offset));
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1),
                        0);
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1),
                        AMDGPU_VCN_STACK_SIZE);

                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0,
                                mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
                        lower_32_bits(adev->vcn.inst->gpu_addr + offset +
                                AMDGPU_VCN_STACK_SIZE));
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0,
                                mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
                        upper_32_bits(adev->vcn.inst->gpu_addr + offset +
                                AMDGPU_VCN_STACK_SIZE));
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2),
                        0);
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2),
                        AMDGPU_VCN_CONTEXT_SIZE);

                for (r = 0; r < adev->vcn.inst[0].num_enc_rings; ++r) {
                        ring = &adev->vcn.inst->ring_enc[r];
                        ring->wptr = 0;
                        MMSCH_V2_0_INSERT_DIRECT_WT(
                                SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO),
                                lower_32_bits(ring->gpu_addr));
                        MMSCH_V2_0_INSERT_DIRECT_WT(
                                SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI),
                                upper_32_bits(ring->gpu_addr));
                        MMSCH_V2_0_INSERT_DIRECT_WT(
                                SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE),
                                ring->ring_size / 4);
                }

                ring = &adev->vcn.inst->ring_dec;
                ring->wptr = 0;
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0,
                                mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
                        lower_32_bits(ring->gpu_addr));
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0,
                                mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
                        upper_32_bits(ring->gpu_addr));
                /* force RBC into idle state */
                tmp = order_base_2(ring->ring_size);
                tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
                tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
                MMSCH_V2_0_INSERT_DIRECT_WT(
                        SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);

                /* add end packet */
                tmp = sizeof(struct mmsch_v2_0_cmd_end);
                memcpy((void *)init_table, &end, tmp);
                table_size += (tmp / 4);
                header->vcn_table_size = table_size;

        }
        return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}

static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
        .name = "vcn_v2_0",
        .early_init = vcn_v2_0_early_init,
        .sw_init = vcn_v2_0_sw_init,
        .sw_fini = vcn_v2_0_sw_fini,
        .hw_init = vcn_v2_0_hw_init,
        .hw_fini = vcn_v2_0_hw_fini,
        .suspend = vcn_v2_0_suspend,
        .resume = vcn_v2_0_resume,
        .is_idle = vcn_v2_0_is_idle,
        .wait_for_idle = vcn_v2_0_wait_for_idle,
        .set_clockgating_state = vcn_v2_0_set_clockgating_state,
        .set_powergating_state = vcn_set_powergating_state,
        .dump_ip_state = amdgpu_vcn_dump_ip_state,
        .print_ip_state = amdgpu_vcn_print_ip_state,
};

static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCN_DEC,
        .align_mask = 0xf,
        .secure_submission_supported = true,
        .get_rptr = vcn_v2_0_dec_ring_get_rptr,
        .get_wptr = vcn_v2_0_dec_ring_get_wptr,
        .set_wptr = vcn_v2_0_dec_ring_set_wptr,
        .emit_frame_size =
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
                8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
                14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
                6,
        .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
        .emit_ib = vcn_v2_0_dec_ring_emit_ib,
        .emit_fence = vcn_v2_0_dec_ring_emit_fence,
        .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
        .test_ring = vcn_v2_0_dec_ring_test_ring,
        .test_ib = amdgpu_vcn_dec_ring_test_ib,
        .insert_nop = vcn_v2_0_dec_ring_insert_nop,
        .insert_start = vcn_v2_0_dec_ring_insert_start,
        .insert_end = vcn_v2_0_dec_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_vcn_ring_begin_use,
        .end_use = amdgpu_vcn_ring_end_use,
        .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
        .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
        .reset = amdgpu_vcn_ring_reset,
};

static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCN_ENC,
        .align_mask = 0x3f,
        .nop = VCN_ENC_CMD_NO_OP,
        .get_rptr = vcn_v2_0_enc_ring_get_rptr,
        .get_wptr = vcn_v2_0_enc_ring_get_wptr,
        .set_wptr = vcn_v2_0_enc_ring_set_wptr,
        .emit_frame_size =
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
                4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
                5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
                1, /* vcn_v2_0_enc_ring_insert_end */
        .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
        .emit_ib = vcn_v2_0_enc_ring_emit_ib,
        .emit_fence = vcn_v2_0_enc_ring_emit_fence,
        .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
        .test_ring = amdgpu_vcn_enc_ring_test_ring,
        .test_ib = amdgpu_vcn_enc_ring_test_ib,
        .insert_nop = amdgpu_ring_insert_nop,
        .insert_end = vcn_v2_0_enc_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = amdgpu_vcn_ring_begin_use,
        .end_use = amdgpu_vcn_ring_end_use,
        .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
        .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
        .reset = amdgpu_vcn_ring_reset,
};

static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev)
{
        adev->vcn.inst->ring_dec.funcs = &vcn_v2_0_dec_ring_vm_funcs;
}

static void vcn_v2_0_set_enc_ring_funcs(struct amdgpu_device *adev)
{
        int i;

        for (i = 0; i < adev->vcn.inst[0].num_enc_rings; ++i)
                adev->vcn.inst->ring_enc[i].funcs = &vcn_v2_0_enc_ring_vm_funcs;
}

static const struct amdgpu_irq_src_funcs vcn_v2_0_irq_funcs = {
        .set = vcn_v2_0_set_interrupt_state,
        .process = vcn_v2_0_process_interrupt,
};

static void vcn_v2_0_set_irq_funcs(struct amdgpu_device *adev)
{
        adev->vcn.inst->irq.num_types = adev->vcn.inst[0].num_enc_rings + 1;
        adev->vcn.inst->irq.funcs = &vcn_v2_0_irq_funcs;
}

const struct amdgpu_ip_block_version vcn_v2_0_ip_block =
{
                .type = AMD_IP_BLOCK_TYPE_VCN,
                .major = 2,
                .minor = 0,
                .rev = 0,
                .funcs = &vcn_v2_0_ip_funcs,
};