root/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>

#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_atomfirmware.h"
#include "amdgpu_pm.h"

#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"

#include "vega10_enum.h"

#include "soc15_common.h"
#include "clearstate_gfx9.h"
#include "v9_structs.h"

#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"

#include "amdgpu_ras.h"

#include "amdgpu_ring_mux.h"
#include "gfx_v9_4.h"
#include "gfx_v9_0.h"
#include "gfx_v9_0_cleaner_shader.h"
#include "gfx_v9_4_2.h"

#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
#include "asic_reg/gc/gc_9_0_default.h"

#define GFX9_NUM_GFX_RINGS     1
#define GFX9_NUM_SW_GFX_RINGS  2
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L

#define mmGCEA_PROBE_MAP                        0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX               0

MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega10_me.bin");
MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");

MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega12_me.bin");
MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");

MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
MODULE_FIRMWARE("amdgpu/vega20_me.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");

MODULE_FIRMWARE("amdgpu/raven_ce.bin");
MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
MODULE_FIRMWARE("amdgpu/raven_me.bin");
MODULE_FIRMWARE("amdgpu/raven_mec.bin");
MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven_rlc.bin");

MODULE_FIRMWARE("amdgpu/picasso_ce.bin");
MODULE_FIRMWARE("amdgpu/picasso_pfp.bin");
MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");

MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
MODULE_FIRMWARE("amdgpu/raven2_me.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");

MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");

MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
MODULE_FIRMWARE("amdgpu/renoir_me.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");

MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_pfp.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_me.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");

MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");

#define mmTCP_CHAN_STEER_0_ARCT                                                         0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX                                                        0
#define mmTCP_CHAN_STEER_1_ARCT                                                         0x0b04
#define mmTCP_CHAN_STEER_1_ARCT_BASE_IDX                                                        0
#define mmTCP_CHAN_STEER_2_ARCT                                                         0x0b09
#define mmTCP_CHAN_STEER_2_ARCT_BASE_IDX                                                        0
#define mmTCP_CHAN_STEER_3_ARCT                                                         0x0b0a
#define mmTCP_CHAN_STEER_3_ARCT_BASE_IDX                                                        0
#define mmTCP_CHAN_STEER_4_ARCT                                                         0x0b0b
#define mmTCP_CHAN_STEER_4_ARCT_BASE_IDX                                                        0
#define mmTCP_CHAN_STEER_5_ARCT                                                         0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX                                                        0

#define mmGOLDEN_TSC_COUNT_UPPER_Renoir                0x0025
#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX       1
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir                0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX       1

static const struct amdgpu_hwip_reg_entry gc_reg_list_9[] = {
        SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS2),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT1),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_STALLED_STAT2),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STALLED_STAT1),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STALLED_STAT1),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_BUSY_STAT),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_BUSY_STAT),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_BUSY_STAT),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPF_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_GFX_ERROR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_BASE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_RPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB_WPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_BASE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_RPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB0_WPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_BASE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_RPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB1_WPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_BASE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_RB2_WPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_CMD_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_CMD_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_CMD_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_CMD_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_LO),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BASE_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB1_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_LO),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BASE_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_IB2_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_LO),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BASE_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB1_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_LO),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BASE_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_IB2_BUFSZ),
        SOC15_REG_ENTRY_STR(GC, 0, mmCPF_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmCPC_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmCPG_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmGDS_PROTECTION_FAULT),
        SOC15_REG_ENTRY_STR(GC, 0, mmGDS_VM_PROTECTION_FAULT),
        SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmIA_UTCL1_CNTL),
        SOC15_REG_ENTRY_STR(GC, 0, mmPA_CL_CNTL_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmRMI_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmSQC_DCACHE_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmSQC_ICACHE_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmSQ_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmTCP_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmWD_UTCL1_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL),
        SOC15_REG_ENTRY_STR(GC, 0, mmVM_L2_PROTECTION_FAULT_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_DEBUG),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_CNTL),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_INSTR_PNTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC1_INSTR_PNTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC2_INSTR_PNTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_INSTR_PNTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_INSTR_PNTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CPC_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_STAT),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_COMMAND),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_MESSAGE),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_1),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_ARGUMENT_2),
        SOC15_REG_ENTRY_STR(GC, 0, mmSMU_RLC_RESPONSE),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SAFE_MODE),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_SMU_SAFE_MODE),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_INT_STAT),
        SOC15_REG_ENTRY_STR(GC, 0, mmRLC_GPM_GENERAL_6),
        /* SE status registers */
        SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE0),
        SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE1),
        SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE2),
        SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3),
        /* packet headers */
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_CE_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_PFP_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_ME_HEADER_DUMP)
};

static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9[] = {
        /* compute queue registers */
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_VMID),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ACTIVE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PERSISTENT_STATE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PIPE_PRIORITY),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUEUE_PRIORITY),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUANTUM),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_RPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_CONTROL),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_RPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_CONTROL),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_REQUEST),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_CONTROL),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_RPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_EVENTS),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_LO),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_CONTROL),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_OFFSET),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_SIZE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_WG_STATE_OFFSET),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_SIZE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GDS_RESOURCE_STATE),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ERROR),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR_MEM),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GFX_STATUS),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP),
        SOC15_REG_ENTRY_STR(GC, 0, mmCP_MEC_ME1_HEADER_DUMP)
};

enum ta_ras_gfx_subblock {
        /*CPC*/
        TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
        TA_RAS_BLOCK__GFX_CPC_SCRATCH = TA_RAS_BLOCK__GFX_CPC_INDEX_START,
        TA_RAS_BLOCK__GFX_CPC_UCODE,
        TA_RAS_BLOCK__GFX_DC_STATE_ME1,
        TA_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
        TA_RAS_BLOCK__GFX_DC_RESTORE_ME1,
        TA_RAS_BLOCK__GFX_DC_STATE_ME2,
        TA_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
        TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
        TA_RAS_BLOCK__GFX_CPC_INDEX_END = TA_RAS_BLOCK__GFX_DC_RESTORE_ME2,
        /* CPF*/
        TA_RAS_BLOCK__GFX_CPF_INDEX_START,
        TA_RAS_BLOCK__GFX_CPF_ROQ_ME2 = TA_RAS_BLOCK__GFX_CPF_INDEX_START,
        TA_RAS_BLOCK__GFX_CPF_ROQ_ME1,
        TA_RAS_BLOCK__GFX_CPF_TAG,
        TA_RAS_BLOCK__GFX_CPF_INDEX_END = TA_RAS_BLOCK__GFX_CPF_TAG,
        /* CPG*/
        TA_RAS_BLOCK__GFX_CPG_INDEX_START,
        TA_RAS_BLOCK__GFX_CPG_DMA_ROQ = TA_RAS_BLOCK__GFX_CPG_INDEX_START,
        TA_RAS_BLOCK__GFX_CPG_DMA_TAG,
        TA_RAS_BLOCK__GFX_CPG_TAG,
        TA_RAS_BLOCK__GFX_CPG_INDEX_END = TA_RAS_BLOCK__GFX_CPG_TAG,
        /* GDS*/
        TA_RAS_BLOCK__GFX_GDS_INDEX_START,
        TA_RAS_BLOCK__GFX_GDS_MEM = TA_RAS_BLOCK__GFX_GDS_INDEX_START,
        TA_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
        TA_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
        TA_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
        TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
        TA_RAS_BLOCK__GFX_GDS_INDEX_END = TA_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
        /* SPI*/
        TA_RAS_BLOCK__GFX_SPI_SR_MEM,
        /* SQ*/
        TA_RAS_BLOCK__GFX_SQ_INDEX_START,
        TA_RAS_BLOCK__GFX_SQ_SGPR = TA_RAS_BLOCK__GFX_SQ_INDEX_START,
        TA_RAS_BLOCK__GFX_SQ_LDS_D,
        TA_RAS_BLOCK__GFX_SQ_LDS_I,
        TA_RAS_BLOCK__GFX_SQ_VGPR, /* VGPR = SP*/
        TA_RAS_BLOCK__GFX_SQ_INDEX_END = TA_RAS_BLOCK__GFX_SQ_VGPR,
        /* SQC (3 ranges)*/
        TA_RAS_BLOCK__GFX_SQC_INDEX_START,
        /* SQC range 0*/
        TA_RAS_BLOCK__GFX_SQC_INDEX0_START = TA_RAS_BLOCK__GFX_SQC_INDEX_START,
        TA_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
                TA_RAS_BLOCK__GFX_SQC_INDEX0_START,
        TA_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
        TA_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
        TA_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
        TA_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
        TA_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
        TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
        TA_RAS_BLOCK__GFX_SQC_INDEX0_END =
                TA_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
        /* SQC range 1*/
        TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
                TA_RAS_BLOCK__GFX_SQC_INDEX1_START,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
        TA_RAS_BLOCK__GFX_SQC_INDEX1_END =
                TA_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
        /* SQC range 2*/
        TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
                TA_RAS_BLOCK__GFX_SQC_INDEX2_START,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
        TA_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
        TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
        TA_RAS_BLOCK__GFX_SQC_INDEX2_END =
                TA_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
        TA_RAS_BLOCK__GFX_SQC_INDEX_END = TA_RAS_BLOCK__GFX_SQC_INDEX2_END,
        /* TA*/
        TA_RAS_BLOCK__GFX_TA_INDEX_START,
        TA_RAS_BLOCK__GFX_TA_FS_DFIFO = TA_RAS_BLOCK__GFX_TA_INDEX_START,
        TA_RAS_BLOCK__GFX_TA_FS_AFIFO,
        TA_RAS_BLOCK__GFX_TA_FL_LFIFO,
        TA_RAS_BLOCK__GFX_TA_FX_LFIFO,
        TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
        TA_RAS_BLOCK__GFX_TA_INDEX_END = TA_RAS_BLOCK__GFX_TA_FS_CFIFO,
        /* TCA*/
        TA_RAS_BLOCK__GFX_TCA_INDEX_START,
        TA_RAS_BLOCK__GFX_TCA_HOLE_FIFO = TA_RAS_BLOCK__GFX_TCA_INDEX_START,
        TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
        TA_RAS_BLOCK__GFX_TCA_INDEX_END = TA_RAS_BLOCK__GFX_TCA_REQ_FIFO,
        /* TCC (5 sub-ranges)*/
        TA_RAS_BLOCK__GFX_TCC_INDEX_START,
        /* TCC range 0*/
        TA_RAS_BLOCK__GFX_TCC_INDEX0_START = TA_RAS_BLOCK__GFX_TCC_INDEX_START,
        TA_RAS_BLOCK__GFX_TCC_CACHE_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX0_START,
        TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
        TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
        TA_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
        TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
        TA_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
        TA_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
        TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
        TA_RAS_BLOCK__GFX_TCC_INDEX0_END = TA_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
        /* TCC range 1*/
        TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
        TA_RAS_BLOCK__GFX_TCC_IN_USE_DEC = TA_RAS_BLOCK__GFX_TCC_INDEX1_START,
        TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
        TA_RAS_BLOCK__GFX_TCC_INDEX1_END =
                TA_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
        /* TCC range 2*/
        TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
        TA_RAS_BLOCK__GFX_TCC_RETURN_DATA = TA_RAS_BLOCK__GFX_TCC_INDEX2_START,
        TA_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
        TA_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
        TA_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
        TA_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
        TA_RAS_BLOCK__GFX_TCC_SRC_FIFO,
        TA_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
        TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
        TA_RAS_BLOCK__GFX_TCC_INDEX2_END =
                TA_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
        /* TCC range 3*/
        TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
        TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO = TA_RAS_BLOCK__GFX_TCC_INDEX3_START,
        TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
        TA_RAS_BLOCK__GFX_TCC_INDEX3_END =
                TA_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
        /* TCC range 4*/
        TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
        TA_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
                TA_RAS_BLOCK__GFX_TCC_INDEX4_START,
        TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
        TA_RAS_BLOCK__GFX_TCC_INDEX4_END =
                TA_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
        TA_RAS_BLOCK__GFX_TCC_INDEX_END = TA_RAS_BLOCK__GFX_TCC_INDEX4_END,
        /* TCI*/
        TA_RAS_BLOCK__GFX_TCI_WRITE_RAM,
        /* TCP*/
        TA_RAS_BLOCK__GFX_TCP_INDEX_START,
        TA_RAS_BLOCK__GFX_TCP_CACHE_RAM = TA_RAS_BLOCK__GFX_TCP_INDEX_START,
        TA_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
        TA_RAS_BLOCK__GFX_TCP_CMD_FIFO,
        TA_RAS_BLOCK__GFX_TCP_VM_FIFO,
        TA_RAS_BLOCK__GFX_TCP_DB_RAM,
        TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
        TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
        TA_RAS_BLOCK__GFX_TCP_INDEX_END = TA_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
        /* TD*/
        TA_RAS_BLOCK__GFX_TD_INDEX_START,
        TA_RAS_BLOCK__GFX_TD_SS_FIFO_LO = TA_RAS_BLOCK__GFX_TD_INDEX_START,
        TA_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
        TA_RAS_BLOCK__GFX_TD_CS_FIFO,
        TA_RAS_BLOCK__GFX_TD_INDEX_END = TA_RAS_BLOCK__GFX_TD_CS_FIFO,
        /* EA (3 sub-ranges)*/
        TA_RAS_BLOCK__GFX_EA_INDEX_START,
        /* EA range 0*/
        TA_RAS_BLOCK__GFX_EA_INDEX0_START = TA_RAS_BLOCK__GFX_EA_INDEX_START,
        TA_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM = TA_RAS_BLOCK__GFX_EA_INDEX0_START,
        TA_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
        TA_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
        TA_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
        TA_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
        TA_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
        TA_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
        TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
        TA_RAS_BLOCK__GFX_EA_INDEX0_END = TA_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
        /* EA range 1*/
        TA_RAS_BLOCK__GFX_EA_INDEX1_START,
        TA_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM = TA_RAS_BLOCK__GFX_EA_INDEX1_START,
        TA_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
        TA_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
        TA_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
        TA_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
        TA_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
        TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
        TA_RAS_BLOCK__GFX_EA_INDEX1_END = TA_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
        /* EA range 2*/
        TA_RAS_BLOCK__GFX_EA_INDEX2_START,
        TA_RAS_BLOCK__GFX_EA_MAM_D0MEM = TA_RAS_BLOCK__GFX_EA_INDEX2_START,
        TA_RAS_BLOCK__GFX_EA_MAM_D1MEM,
        TA_RAS_BLOCK__GFX_EA_MAM_D2MEM,
        TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
        TA_RAS_BLOCK__GFX_EA_INDEX2_END = TA_RAS_BLOCK__GFX_EA_MAM_D3MEM,
        TA_RAS_BLOCK__GFX_EA_INDEX_END = TA_RAS_BLOCK__GFX_EA_INDEX2_END,
        /* UTC VM L2 bank*/
        TA_RAS_BLOCK__UTC_VML2_BANK_CACHE,
        /* UTC VM walker*/
        TA_RAS_BLOCK__UTC_VML2_WALKER,
        /* UTC ATC L2 2MB cache*/
        TA_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
        /* UTC ATC L2 4KB cache*/
        TA_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
        TA_RAS_BLOCK__GFX_MAX
};

struct ras_gfx_subblock {
        unsigned char *name;
        int ta_subblock;
        int hw_supported_error_type;
        int sw_supported_error_type;
};

#define AMDGPU_RAS_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h)                             \
        [AMDGPU_RAS_BLOCK__##subblock] = {                                     \
                #subblock,                                                     \
                TA_RAS_BLOCK__##subblock,                                      \
                ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)),                  \
                (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)),                  \
        }

static const struct ras_gfx_subblock ras_gfx_subblocks[] = {
        AMDGPU_RAS_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
                             0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
                             0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
                             0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
                             1),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
                             0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
                             0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
                             0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
                             0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
                             1),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
                             1),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
                             1),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
                             0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
                             0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
        AMDGPU_RAS_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
};

static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};

static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};

static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
};

static const struct soc15_reg_golden golden_settings_gc_9_1[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};

static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
};

static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0xff7fffff, 0x04000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x7f0fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0xff8fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x7f8fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x26013041),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x26013041),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0xff0fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0xff0fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0xff0fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0xff0fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0xff0fffff, 0x08000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x3f8fffff, 0x08000080),
};

static const struct soc15_reg_golden golden_settings_gc_9_1_rn[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0xff7fffff, 0x0a000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x24000042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x24000042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_PROBE_MAP, 0xffffffff, 0x0000cccc),
};

static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
};

static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
};

static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000)
};

static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
{
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_0_ARCT, 0x3fffffff, 0x346f0a4e),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_1_ARCT, 0x3fffffff, 0x1c642ca),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_2_ARCT, 0x3fffffff, 0x26f45098),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000)
};

static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
        {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
        {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
};

static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
{
        mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
        mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
        mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
        mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
        mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
        mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
        mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
        mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
};

static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
{
        mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
        mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
        mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
        mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
        mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
        mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
        mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
        mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};

#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
#define RAVEN2_GB_ADDR_CONFIG_GOLDEN 0x26013041

static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
                                struct amdgpu_cu_info *cu_info);
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
                                          void *ras_error_status);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
                                     void *inject_if, uint32_t instance_mask);
static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
                                              unsigned int vmid);
static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);

static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
                                uint64_t queue_mask)
{
        struct amdgpu_device *adev = kiq_ring->adev;
        u64 shader_mc_addr;

        /* Cleaner shader MC address */
        shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;

        amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
        amdgpu_ring_write(kiq_ring,
                PACKET3_SET_RESOURCES_VMID_MASK(0) |
                /* vmid_mask:0* queue_type:0 (KIQ) */
                PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
        amdgpu_ring_write(kiq_ring,
                        lower_32_bits(queue_mask));     /* queue mask lo */
        amdgpu_ring_write(kiq_ring,
                        upper_32_bits(queue_mask));     /* queue mask hi */
        amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
        amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
        amdgpu_ring_write(kiq_ring, 0); /* oac mask */
        amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
}

static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
                                 struct amdgpu_ring *ring)
{
        uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
        uint64_t wptr_addr = ring->wptr_gpu_addr;
        uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;

        amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
        /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
        amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
                         PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
                         PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
                         PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
                         PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
                         PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
                         /*queue_type: normal compute queue */
                         PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
                         /* alloc format: all_on_one_pipe */
                         PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
                         PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
                         /* num_queues: must be 1 */
                         PACKET3_MAP_QUEUES_NUM_QUEUES(1));
        amdgpu_ring_write(kiq_ring,
                        PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
        amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
        amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
        amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
        amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}

static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
                                   struct amdgpu_ring *ring,
                                   enum amdgpu_unmap_queues_action action,
                                   u64 gpu_addr, u64 seq)
{
        uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;

        amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
        amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
                          PACKET3_UNMAP_QUEUES_ACTION(action) |
                          PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
                          PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
                          PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
        amdgpu_ring_write(kiq_ring,
                        PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));

        if (action == PREEMPT_QUEUES_NO_UNMAP) {
                amdgpu_ring_write(kiq_ring, lower_32_bits(ring->wptr & ring->buf_mask));
                amdgpu_ring_write(kiq_ring, 0);
                amdgpu_ring_write(kiq_ring, 0);

        } else {
                amdgpu_ring_write(kiq_ring, 0);
                amdgpu_ring_write(kiq_ring, 0);
                amdgpu_ring_write(kiq_ring, 0);
        }
}

static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
                                   struct amdgpu_ring *ring,
                                   u64 addr,
                                   u64 seq)
{
        uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;

        amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
        amdgpu_ring_write(kiq_ring,
                          PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
                          PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
                          PACKET3_QUERY_STATUS_COMMAND(2));
        /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
        amdgpu_ring_write(kiq_ring,
                        PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
                        PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
        amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
        amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
        amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
        amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
}

static void gfx_v9_0_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
                                uint16_t pasid, uint32_t flush_type,
                                bool all_hub)
{
        amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
        amdgpu_ring_write(kiq_ring,
                        PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
                        PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
                        PACKET3_INVALIDATE_TLBS_PASID(pasid) |
                        PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
}


static void gfx_v9_0_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
                                        uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
                                        uint32_t xcc_id, uint32_t vmid)
{
        struct amdgpu_device *adev = kiq_ring->adev;
        unsigned i;

        /* enter save mode */
        amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
        mutex_lock(&adev->srbm_mutex);
        soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, 0);

        if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
                WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 0x2);
                WREG32_SOC15(GC, 0, mmSPI_COMPUTE_QUEUE_RESET, 0x1);
                /* wait till dequeue take effects */
                for (i = 0; i < adev->usec_timeout; i++) {
                        if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
                                break;
                        udelay(1);
                }
                if (i >= adev->usec_timeout)
                        dev_err(adev->dev, "fail to wait on hqd deactive\n");
        } else {
                dev_err(adev->dev, "reset queue_type(%d) not supported\n", queue_type);
        }

        soc15_grbm_select(adev, 0, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
        /* exit safe mode */
        amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
}

static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
        .kiq_set_resources = gfx_v9_0_kiq_set_resources,
        .kiq_map_queues = gfx_v9_0_kiq_map_queues,
        .kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
        .kiq_query_status = gfx_v9_0_kiq_query_status,
        .kiq_invalidate_tlbs = gfx_v9_0_kiq_invalidate_tlbs,
        .kiq_reset_hw_queue = gfx_v9_0_kiq_reset_hw_queue,
        .set_resources_size = 8,
        .map_queues_size = 7,
        .unmap_queues_size = 6,
        .query_status_size = 7,
        .invalidate_tlbs_size = 2,
};

static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
{
        adev->gfx.kiq[0].pmf = &gfx_v9_0_kiq_pm4_funcs;
}

static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_0,
                                                ARRAY_SIZE(golden_settings_gc_9_0));
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_0_vg10,
                                                ARRAY_SIZE(golden_settings_gc_9_0_vg10));
                break;
        case IP_VERSION(9, 2, 1):
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_2_1,
                                                ARRAY_SIZE(golden_settings_gc_9_2_1));
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_2_1_vg12,
                                                ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
                break;
        case IP_VERSION(9, 4, 0):
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_0,
                                                ARRAY_SIZE(golden_settings_gc_9_0));
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_0_vg20,
                                                ARRAY_SIZE(golden_settings_gc_9_0_vg20));
                break;
        case IP_VERSION(9, 4, 1):
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_4_1_arct,
                                                ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
                break;
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
                soc15_program_register_sequence(adev, golden_settings_gc_9_1,
                                                ARRAY_SIZE(golden_settings_gc_9_1));
                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        soc15_program_register_sequence(adev,
                                                        golden_settings_gc_9_1_rv2,
                                                        ARRAY_SIZE(golden_settings_gc_9_1_rv2));
                else
                        soc15_program_register_sequence(adev,
                                                        golden_settings_gc_9_1_rv1,
                                                        ARRAY_SIZE(golden_settings_gc_9_1_rv1));
                break;
         case IP_VERSION(9, 3, 0):
                soc15_program_register_sequence(adev,
                                                golden_settings_gc_9_1_rn,
                                                ARRAY_SIZE(golden_settings_gc_9_1_rn));
                return; /* for renoir, don't need common goldensetting */
        case IP_VERSION(9, 4, 2):
                gfx_v9_4_2_init_golden_registers(adev,
                                                 adev->smuio.funcs->get_die_id(adev));
                break;
        default:
                break;
        }

        if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
            (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)))
                soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
                                                (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}

static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
                                       bool wc, uint32_t reg, uint32_t val)
{
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
                                WRITE_DATA_DST_SEL(0) |
                                (wc ? WR_CONFIRM : 0));
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, val);
}

static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
                                  int mem_space, int opt, uint32_t addr0,
                                  uint32_t addr1, uint32_t ref, uint32_t mask,
                                  uint32_t inv)
{
        amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
        amdgpu_ring_write(ring,
                                 /* memory (1) or register (0) */
                                 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
                                 WAIT_REG_MEM_OPERATION(opt) | /* wait */
                                 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
                                 WAIT_REG_MEM_ENGINE(eng_sel)));

        if (mem_space)
                BUG_ON(addr0 & 0x3); /* Dword align */
        amdgpu_ring_write(ring, addr0);
        amdgpu_ring_write(ring, addr1);
        amdgpu_ring_write(ring, ref);
        amdgpu_ring_write(ring, mask);
        amdgpu_ring_write(ring, inv); /* poll interval */
}

static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
        uint32_t tmp = 0;
        unsigned i;
        int r;

        WREG32(scratch, 0xCAFEDEAD);
        r = amdgpu_ring_alloc(ring, 3);
        if (r)
                return r;

        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
        amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
        amdgpu_ring_write(ring, 0xDEADBEEF);
        amdgpu_ring_commit(ring);

        for (i = 0; i < adev->usec_timeout; i++) {
                tmp = RREG32(scratch);
                if (tmp == 0xDEADBEEF)
                        break;
                udelay(1);
        }

        if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;
        return r;
}

static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib ib;
        struct dma_fence *f = NULL;

        unsigned index;
        uint64_t gpu_addr;
        uint32_t tmp;
        long r;

        r = amdgpu_device_wb_get(adev, &index);
        if (r)
                return r;

        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));

        r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;

        ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
        ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
        ib.ptr[2] = lower_32_bits(gpu_addr);
        ib.ptr[3] = upper_32_bits(gpu_addr);
        ib.ptr[4] = 0xDEADBEEF;
        ib.length_dw = 5;

        r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r)
                goto err2;

        r = dma_fence_wait_timeout(f, false, timeout);
        if (r == 0) {
                r = -ETIMEDOUT;
                goto err2;
        } else if (r < 0) {
                goto err2;
        }

        tmp = adev->wb.wb[index];
        if (tmp == 0xDEADBEEF)
                r = 0;
        else
                r = -EINVAL;

err2:
        amdgpu_ib_free(&ib, NULL);
        dma_fence_put(f);
err1:
        amdgpu_device_wb_free(adev, index);
        return r;
}


static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
{
        amdgpu_ucode_release(&adev->gfx.pfp_fw);
        amdgpu_ucode_release(&adev->gfx.me_fw);
        amdgpu_ucode_release(&adev->gfx.ce_fw);
        amdgpu_ucode_release(&adev->gfx.rlc_fw);
        amdgpu_ucode_release(&adev->gfx.mec_fw);
        amdgpu_ucode_release(&adev->gfx.mec2_fw);

        kfree(adev->gfx.rlc.register_list_format);
}

static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
{
        adev->gfx.me_fw_write_wait = false;
        adev->gfx.mec_fw_write_wait = false;

        if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) &&
            (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) &&
            ((adev->gfx.mec_fw_version < 0x000001a5) ||
             (adev->gfx.mec_feature_version < 46) ||
             (adev->gfx.pfp_fw_version < 0x000000b7) ||
             (adev->gfx.pfp_feature_version < 46)))
                drm_warn_once(adev_to_drm(adev),
                              "CP firmware version too old, please update!");

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
                if ((adev->gfx.me_fw_version >= 0x0000009c) &&
                    (adev->gfx.me_feature_version >= 42) &&
                    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
                    (adev->gfx.pfp_feature_version >= 42))
                        adev->gfx.me_fw_write_wait = true;

                if ((adev->gfx.mec_fw_version >=  0x00000193) &&
                    (adev->gfx.mec_feature_version >= 42))
                        adev->gfx.mec_fw_write_wait = true;
                break;
        case IP_VERSION(9, 2, 1):
                if ((adev->gfx.me_fw_version >= 0x0000009c) &&
                    (adev->gfx.me_feature_version >= 44) &&
                    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
                    (adev->gfx.pfp_feature_version >= 44))
                        adev->gfx.me_fw_write_wait = true;

                if ((adev->gfx.mec_fw_version >=  0x00000196) &&
                    (adev->gfx.mec_feature_version >= 44))
                        adev->gfx.mec_fw_write_wait = true;
                break;
        case IP_VERSION(9, 4, 0):
                if ((adev->gfx.me_fw_version >= 0x0000009c) &&
                    (adev->gfx.me_feature_version >= 44) &&
                    (adev->gfx.pfp_fw_version >=  0x000000b2) &&
                    (adev->gfx.pfp_feature_version >= 44))
                        adev->gfx.me_fw_write_wait = true;

                if ((adev->gfx.mec_fw_version >=  0x00000197) &&
                    (adev->gfx.mec_feature_version >= 44))
                        adev->gfx.mec_fw_write_wait = true;
                break;
        case IP_VERSION(9, 1, 0):
        case IP_VERSION(9, 2, 2):
                if ((adev->gfx.me_fw_version >= 0x0000009c) &&
                    (adev->gfx.me_feature_version >= 42) &&
                    (adev->gfx.pfp_fw_version >=  0x000000b1) &&
                    (adev->gfx.pfp_feature_version >= 42))
                        adev->gfx.me_fw_write_wait = true;

                if ((adev->gfx.mec_fw_version >=  0x00000192) &&
                    (adev->gfx.mec_feature_version >= 42))
                        adev->gfx.mec_fw_write_wait = true;
                break;
        default:
                adev->gfx.me_fw_write_wait = true;
                adev->gfx.mec_fw_write_wait = true;
                break;
        }
}

struct amdgpu_gfxoff_quirk {
        u16 chip_vendor;
        u16 chip_device;
        u16 subsys_vendor;
        u16 subsys_device;
        u8 revision;
};

static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
        /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
        /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
        { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
        /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
        /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
        { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
        /* https://bbs.openkylin.top/t/topic/171497 */
        { 0x1002, 0x15d8, 0x19e5, 0x3e14, 0xc2 },
        /* HP 705G4 DM with R5 2400G */
        { 0x1002, 0x15dd, 0x103c, 0x8464, 0xd6 },
        { 0, 0, 0, 0, 0 },
};

static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
{
        const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list;

        while (p && p->chip_device != 0) {
                if (pdev->vendor == p->chip_vendor &&
                    pdev->device == p->chip_device &&
                    pdev->subsystem_vendor == p->subsys_vendor &&
                    pdev->subsystem_device == p->subsys_device &&
                    pdev->revision == p->revision) {
                        return true;
                }
                ++p;
        }
        return false;
}

static bool is_raven_kicker(struct amdgpu_device *adev)
{
        if (adev->pm.fw_version >= 0x41e2b)
                return true;
        else
                return false;
}

static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
{
        if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0)) &&
            (adev->gfx.me_fw_version >= 0x000000a5) &&
            (adev->gfx.me_feature_version >= 52))
                return true;
        else
                return false;
}

static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
        if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
                adev->pm.pp_feature &= ~PP_GFXOFF_MASK;

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
        case IP_VERSION(9, 2, 1):
        case IP_VERSION(9, 4, 0):
                break;
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
                if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
                      (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
                    ((!is_raven_kicker(adev) &&
                      adev->gfx.rlc_fw_version < 531) ||
                     (adev->gfx.rlc_feature_version < 1) ||
                     !adev->gfx.rlc.is_rlc_v2_1))
                        adev->pm.pp_feature &= ~PP_GFXOFF_MASK;

                if (adev->pm.pp_feature & PP_GFXOFF_MASK)
                        adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
                                AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_RLC_SMU_HS;
                break;
        case IP_VERSION(9, 3, 0):
                if (adev->pm.pp_feature & PP_GFXOFF_MASK)
                        adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
                                AMD_PG_SUPPORT_CP |
                                AMD_PG_SUPPORT_RLC_SMU_HS;
                break;
        default:
                break;
        }
}

static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
                                          char *chip_name)
{
        int err;

        err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw,
                                   AMDGPU_UCODE_REQUIRED,
                                   "amdgpu/%s_pfp.bin", chip_name);
        if (err)
                goto out;
        amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);

        err = amdgpu_ucode_request(adev, &adev->gfx.me_fw,
                                   AMDGPU_UCODE_REQUIRED,
                                   "amdgpu/%s_me.bin", chip_name);
        if (err)
                goto out;
        amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);

        err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw,
                                   AMDGPU_UCODE_REQUIRED,
                                   "amdgpu/%s_ce.bin", chip_name);
        if (err)
                goto out;
        amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);

out:
        if (err) {
                amdgpu_ucode_release(&adev->gfx.pfp_fw);
                amdgpu_ucode_release(&adev->gfx.me_fw);
                amdgpu_ucode_release(&adev->gfx.ce_fw);
        }
        return err;
}

static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
                                       char *chip_name)
{
        int err;
        const struct rlc_firmware_header_v2_0 *rlc_hdr;
        uint16_t version_major;
        uint16_t version_minor;
        uint32_t smu_version;

        /*
         * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
         * instead of picasso_rlc.bin.
         * Judgment method:
         * PCO AM4: revision >= 0xC8 && revision <= 0xCF
         *          or revision >= 0xD8 && revision <= 0xDF
         * otherwise is PCO FP5
         */
        if (!strcmp(chip_name, "picasso") &&
                (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
                ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
                err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
                                           AMDGPU_UCODE_REQUIRED,
                                           "amdgpu/%s_rlc_am4.bin", chip_name);
        else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
                (smu_version >= 0x41e2b))
                /**
                *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
                */
                err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
                                           AMDGPU_UCODE_REQUIRED,
                                           "amdgpu/%s_kicker_rlc.bin", chip_name);
        else
                err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
                                           AMDGPU_UCODE_REQUIRED,
                                           "amdgpu/%s_rlc.bin", chip_name);
        if (err)
                goto out;

        rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
        version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
        version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
        err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
        if (err)
                amdgpu_ucode_release(&adev->gfx.rlc_fw);

        return err;
}

static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
{
        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
            amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
            amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 3, 0))
                return false;

        return true;
}

static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
                                              char *chip_name)
{
        int err;

        if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
                err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
                                   AMDGPU_UCODE_REQUIRED,
                                   "amdgpu/%s_sjt_mec.bin", chip_name);
        else
                err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
                                           AMDGPU_UCODE_REQUIRED,
                                           "amdgpu/%s_mec.bin", chip_name);
        if (err)
                goto out;

        amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
        amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);

        if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
                if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN))
                        err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
                                                   AMDGPU_UCODE_REQUIRED,
                                                   "amdgpu/%s_sjt_mec2.bin", chip_name);
                else
                        err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw,
                                                   AMDGPU_UCODE_REQUIRED,
                                                   "amdgpu/%s_mec2.bin", chip_name);
                if (!err) {
                        amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
                        amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
                } else {
                        err = 0;
                        amdgpu_ucode_release(&adev->gfx.mec2_fw);
                }
        } else {
                adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
                adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
        }

        gfx_v9_0_check_if_need_gfxoff(adev);
        gfx_v9_0_check_fw_write_wait(adev);

out:
        if (err)
                amdgpu_ucode_release(&adev->gfx.mec_fw);
        return err;
}

static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
        char ucode_prefix[30];
        int r;

        DRM_DEBUG("\n");
        amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));

        /* No CPG in Arcturus */
        if (adev->gfx.num_gfx_rings) {
                r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
                if (r)
                        return r;
        }

        r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
        if (r)
                return r;

        r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
        if (r)
                return r;

        return r;
}

static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
{
        u32 count = 0;
        const struct cs_section_def *sect = NULL;
        const struct cs_extent_def *ext = NULL;

        /* begin clear state */
        count += 2;
        /* context control state */
        count += 3;

        for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
                for (ext = sect->section; ext->extent != NULL; ++ext) {
                        if (sect->id == SECT_CONTEXT)
                                count += 2 + ext->reg_count;
                        else
                                return 0;
                }
        }

        /* end clear state */
        count += 2;
        /* clear state */
        count += 2;

        return count;
}

static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
        u32 count = 0;

        if (adev->gfx.rlc.cs_data == NULL)
                return;
        if (buffer == NULL)
                return;

        count = amdgpu_gfx_csb_preamble_start(buffer);
        count = amdgpu_gfx_csb_data_parser(adev, buffer, count);
        amdgpu_gfx_csb_preamble_end(buffer, count);
}

static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
{
        struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
        uint32_t pg_always_on_cu_num = 2;
        uint32_t always_on_cu_num;
        uint32_t i, j, k;
        uint32_t mask, cu_bitmap, counter;

        if (adev->flags & AMD_IS_APU)
                always_on_cu_num = 4;
        else if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 2, 1))
                always_on_cu_num = 8;
        else
                always_on_cu_num = 12;

        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        mask = 1;
                        cu_bitmap = 0;
                        counter = 0;
                        amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);

                        for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
                                if (cu_info->bitmap[0][i][j] & mask) {
                                        if (counter == pg_always_on_cu_num)
                                                WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
                                        if (counter < always_on_cu_num)
                                                cu_bitmap |= mask;
                                        else
                                                break;
                                        counter++;
                                }
                                mask <<= 1;
                        }

                        WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap);
                        cu_info->ao_cu_bitmap[i][j] = cu_bitmap;
                }
        }
        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
        mutex_unlock(&adev->grbm_idx_mutex);
}

static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
{
        uint32_t data;

        /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));

        /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
        WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);

        /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
        WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);

        mutex_lock(&adev->grbm_idx_mutex);
        /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
        WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);

        /* set mmRLC_LB_PARAMS = 0x003F_1006 */
        data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
        data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
        data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
        WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);

        /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
        data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
        data &= 0x0000FFFF;
        data |= 0x00C00000;
        WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);

        /*
         * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven),
         * programmed in gfx_v9_0_init_always_on_cu_mask()
         */

        /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
         * but used for RLC_LB_CNTL configuration */
        data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
        data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
        data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
        WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
        mutex_unlock(&adev->grbm_idx_mutex);

        gfx_v9_0_init_always_on_cu_mask(adev);
}

static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev)
{
        uint32_t data;

        /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8);
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
        WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16));

        /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
        WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);

        /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
        WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800);

        mutex_lock(&adev->grbm_idx_mutex);
        /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
        WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);

        /* set mmRLC_LB_PARAMS = 0x003F_1006 */
        data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
        data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
        data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
        WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);

        /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
        data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
        data &= 0x0000FFFF;
        data |= 0x00C00000;
        WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);

        /*
         * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON),
         * programmed in gfx_v9_0_init_always_on_cu_mask()
         */

        /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
         * but used for RLC_LB_CNTL configuration */
        data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
        data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
        data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
        WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
        mutex_unlock(&adev->grbm_idx_mutex);

        gfx_v9_0_init_always_on_cu_mask(adev);
}

static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
{
        WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
}

static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
        if (gfx_v9_0_load_mec2_fw_bin_support(adev))
                return 5;
        else
                return 4;
}

static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
{
        struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;

        reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[0];
        reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
        reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1);
        reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2);
        reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3);
        reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL);
        reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX);
        reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT);
        adev->gfx.rlc.rlcg_reg_access_supported = true;
}

static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{
        const struct cs_section_def *cs_data;
        int r;

        adev->gfx.rlc.cs_data = gfx9_cs_data;

        cs_data = adev->gfx.rlc.cs_data;

        if (cs_data) {
                /* init clear state block */
                r = amdgpu_gfx_rlc_init_csb(adev);
                if (r)
                        return r;
        }

        if (adev->flags & AMD_IS_APU) {
                /* TODO: double check the cp_table_size for RV */
                adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
                r = amdgpu_gfx_rlc_init_cpt(adev);
                if (r)
                        return r;
        }

        return 0;
}

static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
{
        amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
        amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}

static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
{
        int r;
        u32 *hpd;
        const __le32 *fw_data;
        unsigned fw_size;
        u32 *fw;
        size_t mec_hpd_size;

        const struct gfx_firmware_header_v1_0 *mec_hdr;

        bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);

        /* take ownership of the relevant compute queues */
        amdgpu_gfx_compute_queue_acquire(adev);
        mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
        if (mec_hpd_size) {
                r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
                                              AMDGPU_GEM_DOMAIN_VRAM |
                                              AMDGPU_GEM_DOMAIN_GTT,
                                              &adev->gfx.mec.hpd_eop_obj,
                                              &adev->gfx.mec.hpd_eop_gpu_addr,
                                              (void **)&hpd);
                if (r) {
                        dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
                        gfx_v9_0_mec_fini(adev);
                        return r;
                }

                memset(hpd, 0, mec_hpd_size);

                amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
                amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
        }

        mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;

        fw_data = (const __le32 *)
                (adev->gfx.mec_fw->data +
                 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
        fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);

        r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
                                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
                                      &adev->gfx.mec.mec_fw_obj,
                                      &adev->gfx.mec.mec_fw_gpu_addr,
                                      (void **)&fw);
        if (r) {
                dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
                gfx_v9_0_mec_fini(adev);
                return r;
        }

        memcpy(fw, fw_data, fw_size);

        amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
        amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);

        return 0;
}

static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
        WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
                (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
                (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
                (address << SQ_IND_INDEX__INDEX__SHIFT) |
                (SQ_IND_INDEX__FORCE_READ_MASK));
        return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}

static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
                           uint32_t wave, uint32_t thread,
                           uint32_t regno, uint32_t num, uint32_t *out)
{
        WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
                (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
                (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
                (regno << SQ_IND_INDEX__INDEX__SHIFT) |
                (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
                (SQ_IND_INDEX__FORCE_READ_MASK) |
                (SQ_IND_INDEX__AUTO_INCR_MASK));
        while (num--)
                *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
}

static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
{
        /* type 1 wave data */
        dst[(*no_fields)++] = 1;
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
}

static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
                                     uint32_t wave, uint32_t start,
                                     uint32_t size, uint32_t *dst)
{
        wave_read_regs(
                adev, simd, wave, 0,
                start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}

static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
                                     uint32_t wave, uint32_t thread,
                                     uint32_t start, uint32_t size,
                                     uint32_t *dst)
{
        wave_read_regs(
                adev, simd, wave, thread,
                start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
}

static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
                                  u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
{
        soc15_grbm_select(adev, me, pipe, q, vm, 0);
}

static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
        .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
        .select_se_sh = &gfx_v9_0_select_se_sh,
        .read_wave_data = &gfx_v9_0_read_wave_data,
        .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
        .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
        .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
        .get_hdp_flush_mask = &amdgpu_gfx_get_hdp_flush_mask,
};

const struct amdgpu_ras_block_hw_ops  gfx_v9_0_ras_ops = {
                .ras_error_inject = &gfx_v9_0_ras_error_inject,
                .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
                .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};

static struct amdgpu_gfx_ras gfx_v9_0_ras = {
        .ras_block = {
                .hw_ops = &gfx_v9_0_ras_ops,
        },
};

static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
        u32 gb_addr_config;
        int err;

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
                gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
                break;
        case IP_VERSION(9, 2, 1):
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
                gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
                drm_info(adev_to_drm(adev), "fix gfx.config for vega12\n");
                break;
        case IP_VERSION(9, 4, 0):
                adev->gfx.ras = &gfx_v9_0_ras;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
                gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
                gb_addr_config &= ~0xf3e777ff;
                gb_addr_config |= 0x22014042;
                /* check vbios table if gpu info is not available */
                err = amdgpu_atomfirmware_get_gfx_info(adev);
                if (err)
                        return err;
                break;
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
                else
                        gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
                break;
        case IP_VERSION(9, 4, 1):
                adev->gfx.ras = &gfx_v9_4_ras;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
                gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
                gb_addr_config &= ~0xf3e777ff;
                gb_addr_config |= 0x22014042;
                break;
        case IP_VERSION(9, 3, 0):
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
                gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
                gb_addr_config &= ~0xf3e777ff;
                gb_addr_config |= 0x22010042;
                break;
        case IP_VERSION(9, 4, 2):
                adev->gfx.ras = &gfx_v9_4_2_ras;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
                adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
                adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
                gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
                gb_addr_config &= ~0xf3e777ff;
                gb_addr_config |= 0x22014042;
                /* check vbios table if gpu info is not available */
                err = amdgpu_atomfirmware_get_gfx_info(adev);
                if (err)
                        return err;
                break;
        default:
                BUG();
                break;
        }

        adev->gfx.config.gb_addr_config = gb_addr_config;

        adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
                        REG_GET_FIELD(
                                        adev->gfx.config.gb_addr_config,
                                        GB_ADDR_CONFIG,
                                        NUM_PIPES);

        adev->gfx.config.max_tile_pipes =
                adev->gfx.config.gb_addr_config_fields.num_pipes;

        adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
                        REG_GET_FIELD(
                                        adev->gfx.config.gb_addr_config,
                                        GB_ADDR_CONFIG,
                                        NUM_BANKS);
        adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
                        REG_GET_FIELD(
                                        adev->gfx.config.gb_addr_config,
                                        GB_ADDR_CONFIG,
                                        MAX_COMPRESSED_FRAGS);
        adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
                        REG_GET_FIELD(
                                        adev->gfx.config.gb_addr_config,
                                        GB_ADDR_CONFIG,
                                        NUM_RB_PER_SE);
        adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
                        REG_GET_FIELD(
                                        adev->gfx.config.gb_addr_config,
                                        GB_ADDR_CONFIG,
                                        NUM_SHADER_ENGINES);
        adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
                        REG_GET_FIELD(
                                        adev->gfx.config.gb_addr_config,
                                        GB_ADDR_CONFIG,
                                        PIPE_INTERLEAVE_SIZE));

        return 0;
}

static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
                                      int mec, int pipe, int queue)
{
        unsigned irq_type;
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
        unsigned int hw_prio;

        ring = &adev->gfx.compute_ring[ring_id];

        /* mec0 is me1 */
        ring->me = mec + 1;
        ring->pipe = pipe;
        ring->queue = queue;

        ring->ring_obj = NULL;
        ring->use_doorbell = true;
        ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
        ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
                                + (ring_id * GFX9_MEC_HPD_SIZE);
        ring->vm_hub = AMDGPU_GFXHUB(0);
        sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);

        irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
                + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
                + ring->pipe;
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
                        AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
                                hw_prio, NULL);
}

static void gfx_v9_0_alloc_ip_dump(struct amdgpu_device *adev)
{
        uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);
        uint32_t *ptr;
        uint32_t inst;

        ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
        if (!ptr) {
                DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
                adev->gfx.ip_dump_core = NULL;
        } else {
                adev->gfx.ip_dump_core = ptr;
        }

        /* Allocate memory for compute queue registers for all the instances */
        reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
        inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
                adev->gfx.mec.num_queue_per_pipe;

        ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
        if (!ptr) {
                DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
                adev->gfx.ip_dump_compute_queues = NULL;
        } else {
                adev->gfx.ip_dump_compute_queues = ptr;
        }
}

static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
{
        int i, j, k, r, ring_id;
        int xcc_id = 0;
        struct amdgpu_ring *ring;
        struct amdgpu_device *adev = ip_block->adev;
        unsigned int hw_prio;

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
        case IP_VERSION(9, 2, 1):
        case IP_VERSION(9, 4, 0):
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
        case IP_VERSION(9, 4, 1):
        case IP_VERSION(9, 3, 0):
        case IP_VERSION(9, 4, 2):
                adev->gfx.mec.num_mec = 2;
                break;
        default:
                adev->gfx.mec.num_mec = 1;
                break;
        }

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
        case IP_VERSION(9, 2, 1):
        case IP_VERSION(9, 4, 0):
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
        case IP_VERSION(9, 3, 0):
                adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
                adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
                if (adev->gfx.me_fw_version  >= 167 &&
                    adev->gfx.pfp_fw_version >= 196 &&
                    adev->gfx.mec_fw_version >= 474) {
                        adev->gfx.enable_cleaner_shader = true;
                        r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
                        if (r) {
                                adev->gfx.enable_cleaner_shader = false;
                                dev_err(adev->dev, "Failed to initialize cleaner shader\n");
                        }
                }
                break;
        case IP_VERSION(9, 4, 2):
                adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex;
                adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex);
                if (adev->gfx.mec_fw_version >= 88) {
                        adev->gfx.enable_cleaner_shader = true;
                        r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
                        if (r) {
                                adev->gfx.enable_cleaner_shader = false;
                                dev_err(adev->dev, "Failed to initialize cleaner shader\n");
                        }
                }
                break;
        default:
                adev->gfx.enable_cleaner_shader = false;
                break;
        }

        adev->gfx.mec.num_pipe_per_mec = 4;
        adev->gfx.mec.num_queue_per_pipe = 8;

        /* EOP Event */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
        if (r)
                return r;

        /* Bad opcode Event */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
                              GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
                              &adev->gfx.bad_op_irq);
        if (r)
                return r;

        /* Privileged reg */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
                              &adev->gfx.priv_reg_irq);
        if (r)
                return r;

        /* Privileged inst */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
                              &adev->gfx.priv_inst_irq);
        if (r)
                return r;

        /* ECC error */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
                              &adev->gfx.cp_ecc_error_irq);
        if (r)
                return r;

        /* FUE error */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
                              &adev->gfx.cp_ecc_error_irq);
        if (r)
                return r;

        adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;

        if (adev->gfx.rlc.funcs) {
                if (adev->gfx.rlc.funcs->init) {
                        r = adev->gfx.rlc.funcs->init(adev);
                        if (r) {
                                dev_err(adev->dev, "Failed to init rlc BOs!\n");
                                return r;
                        }
                }
        }

        r = gfx_v9_0_mec_init(adev);
        if (r) {
                DRM_ERROR("Failed to init MEC BOs!\n");
                return r;
        }

        /* set up the gfx ring */
        for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
                ring = &adev->gfx.gfx_ring[i];
                ring->ring_obj = NULL;
                if (!i)
                        sprintf(ring->name, "gfx");
                else
                        sprintf(ring->name, "gfx_%d", i);
                ring->use_doorbell = true;
                ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;

                /* disable scheduler on the real ring */
                ring->no_scheduler = adev->gfx.mcbp;
                ring->vm_hub = AMDGPU_GFXHUB(0);
                r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
                                     AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }

        /* set up the software rings */
        if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
                for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
                        ring = &adev->gfx.sw_gfx_ring[i];
                        ring->ring_obj = NULL;
                        sprintf(ring->name, amdgpu_sw_ring_name(i));
                        ring->use_doorbell = true;
                        ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
                        ring->is_sw_ring = true;
                        hw_prio = amdgpu_sw_ring_priority(i);
                        ring->vm_hub = AMDGPU_GFXHUB(0);
                        r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
                                             AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP, hw_prio,
                                             NULL);
                        if (r)
                                return r;
                        ring->wptr = 0;
                }

                /* init the muxer and add software rings */
                r = amdgpu_ring_mux_init(&adev->gfx.muxer, &adev->gfx.gfx_ring[0],
                                         GFX9_NUM_SW_GFX_RINGS);
                if (r) {
                        DRM_ERROR("amdgpu_ring_mux_init failed(%d)\n", r);
                        return r;
                }
                for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
                        r = amdgpu_ring_mux_add_sw_ring(&adev->gfx.muxer,
                                                        &adev->gfx.sw_gfx_ring[i]);
                        if (r) {
                                DRM_ERROR("amdgpu_ring_mux_add_sw_ring failed(%d)\n", r);
                                return r;
                        }
                }
        }

        /* set up the compute queues - allocate horizontally across pipes */
        ring_id = 0;
        for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
                for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
                        for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
                                if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
                                                                     k, j))
                                        continue;

                                r = gfx_v9_0_compute_ring_init(adev,
                                                               ring_id,
                                                               i, k, j);
                                if (r)
                                        return r;

                                ring_id++;
                        }
                }
        }

        /* TODO: Add queue reset mask when FW fully supports it */
        adev->gfx.gfx_supported_reset =
                amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
        adev->gfx.compute_supported_reset =
                amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
        if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset) {
                adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
                adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
        }

        r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
        if (r) {
                DRM_ERROR("Failed to init KIQ BOs!\n");
                return r;
        }

        r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
        if (r)
                return r;

        /* create MQD for all compute queues as wel as KIQ for SRIOV case */
        r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation), 0);
        if (r)
                return r;

        adev->gfx.ce_ram_size = 0x8000;

        r = gfx_v9_0_gpu_early_init(adev);
        if (r)
                return r;

        if (amdgpu_gfx_ras_sw_init(adev)) {
                dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
                return -EINVAL;
        }

        gfx_v9_0_alloc_ip_dump(adev);

        r = amdgpu_gfx_sysfs_init(adev);
        if (r)
                return r;

        return 0;
}


static int gfx_v9_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
        int i;
        struct amdgpu_device *adev = ip_block->adev;

        if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
                for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
                        amdgpu_ring_fini(&adev->gfx.sw_gfx_ring[i]);
                amdgpu_ring_mux_fini(&adev->gfx.muxer);
        }

        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                amdgpu_ring_fini(&adev->gfx.compute_ring[i]);

        amdgpu_gfx_mqd_sw_fini(adev, 0);
        amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
        amdgpu_gfx_kiq_fini(adev, 0);

        amdgpu_gfx_cleaner_shader_sw_fini(adev);

        gfx_v9_0_mec_fini(adev);
        amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
                                &adev->gfx.rlc.clear_state_gpu_addr,
                                (void **)&adev->gfx.rlc.cs_ptr);
        if (adev->flags & AMD_IS_APU) {
                amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
                                &adev->gfx.rlc.cp_table_gpu_addr,
                                (void **)&adev->gfx.rlc.cp_table_ptr);
        }
        gfx_v9_0_free_microcode(adev);

        amdgpu_gfx_sysfs_fini(adev);

        kfree(adev->gfx.ip_dump_core);
        kfree(adev->gfx.ip_dump_compute_queues);

        return 0;
}


static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
{
        /* TODO */
}

void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
                           u32 instance, int xcc_id)
{
        u32 data;

        if (instance == 0xffffffff)
                data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
        else
                data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);

        if (se_num == 0xffffffff)
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
        else
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);

        if (sh_num == 0xffffffff)
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
        else
                data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);

        WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
}

static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
{
        u32 data, mask;

        data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
        data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);

        data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
        data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;

        mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
                                         adev->gfx.config.max_sh_per_se);

        return (~data) & mask;
}

static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
{
        int i, j;
        u32 data;
        u32 active_rbs = 0;
        u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
                                        adev->gfx.config.max_sh_per_se;

        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
                        data = gfx_v9_0_get_rb_active_bitmap(adev);
                        active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
                                               rb_bitmap_width_per_sh);
                }
        }
        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
        mutex_unlock(&adev->grbm_idx_mutex);

        adev->gfx.config.backend_enable_mask = active_rbs;
        adev->gfx.config.num_rbs = hweight32(active_rbs);
}

static void gfx_v9_0_debug_trap_config_init(struct amdgpu_device *adev,
                                uint32_t first_vmid,
                                uint32_t last_vmid)
{
        uint32_t data;
        uint32_t trap_config_vmid_mask = 0;
        int i;

        /* Calculate trap config vmid mask */
        for (i = first_vmid; i < last_vmid; i++)
                trap_config_vmid_mask |= (1 << i);

        data = REG_SET_FIELD(0, SPI_GDBG_TRAP_CONFIG,
                        VMID_SEL, trap_config_vmid_mask);
        data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
                        TRAP_EN, 1);
        WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
        WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);

        WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
        WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
}

#define DEFAULT_SH_MEM_BASES    (0x6000)
static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
{
        int i;
        uint32_t sh_mem_config;
        uint32_t sh_mem_bases;

        /*
         * Configure apertures:
         * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
         * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
         * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
         */
        sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);

        sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
                        SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;

        mutex_lock(&adev->srbm_mutex);
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                soc15_grbm_select(adev, 0, 0, 0, i, 0);
                /* CP and shaders */
                WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
                WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
        }
        soc15_grbm_select(adev, 0, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);

        /* Initialize all compute VMIDs to have no GDS, GWS, or OA
           access. These should be enabled by FW for target VMIDs. */
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
        }
}

static void gfx_v9_0_init_gds_vmid(struct amdgpu_device *adev)
{
        int vmid;

        /*
         * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
         * access. Compute VMIDs should be enabled by FW for target VMIDs,
         * the driver can enable them for graphics. VMID0 should maintain
         * access so that HWS firmware can save/restore entries.
         */
        for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * vmid, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * vmid, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, vmid, 0);
                WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, vmid, 0);
        }
}

static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
{
        uint32_t tmp;

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 4, 1):
                tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
                tmp = REG_SET_FIELD(tmp, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
                                !READ_ONCE(adev->barrier_has_auto_waitcnt));
                WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
                break;
        case IP_VERSION(9, 4, 2):
                gfx_v9_4_2_init_sq(adev);
                break;
        default:
                break;
        }
}

static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
{
        u32 tmp;
        int i;

        if (!amdgpu_sriov_vf(adev) ||
            amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) {
                WREG32_FIELD15_RLC(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
        }

        gfx_v9_0_tiling_mode_table_init(adev);

        if (adev->gfx.num_gfx_rings)
                gfx_v9_0_setup_rb(adev);
        gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
        adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);

        /* XXX SH_MEM regs */
        /* where to put LDS, scratch, GPUVM in FSA64 space */
        mutex_lock(&adev->srbm_mutex);
        for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
                soc15_grbm_select(adev, 0, 0, 0, i, 0);
                /* CP and shaders */
                if (i == 0) {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
                                            !!adev->gmc.noretry);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
                } else {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
                                            !!adev->gmc.noretry);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
                        tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
                                (adev->gmc.private_aperture_start >> 48));
                        tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
                                (adev->gmc.shared_aperture_start >> 48));
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, tmp);
                }
        }
        soc15_grbm_select(adev, 0, 0, 0, 0, 0);

        mutex_unlock(&adev->srbm_mutex);

        gfx_v9_0_init_compute_vmid(adev);
        gfx_v9_0_init_gds_vmid(adev);
        gfx_v9_0_init_sq_config(adev);
}

static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
{
        u32 i, j, k;
        u32 mask;

        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
                        for (k = 0; k < adev->usec_timeout; k++) {
                                if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
                                        break;
                                udelay(1);
                        }
                        if (k == adev->usec_timeout) {
                                amdgpu_gfx_select_se_sh(adev, 0xffffffff,
                                                      0xffffffff, 0xffffffff, 0);
                                mutex_unlock(&adev->grbm_idx_mutex);
                                drm_info(adev_to_drm(adev), "Timeout wait for RLC serdes %u,%u\n",
                                         i, j);
                                return;
                        }
                }
        }
        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
        mutex_unlock(&adev->grbm_idx_mutex);

        mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
                RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
                RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
                RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
        for (k = 0; k < adev->usec_timeout; k++) {
                if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
                        break;
                udelay(1);
        }
}

static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
                                               bool enable)
{
        u32 tmp;

        /* These interrupts should be enabled to drive DS clock */

        tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);

        tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
        tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
        tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
        if (adev->gfx.num_gfx_rings)
                tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);

        WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}

static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
{
        adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
        /* csib */
        WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
                        adev->gfx.rlc.clear_state_gpu_addr >> 32);
        WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
                        adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
        WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
                        adev->gfx.rlc.clear_state_size);
}

static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
                                int indirect_offset,
                                int list_size,
                                int *unique_indirect_regs,
                                int unique_indirect_reg_count,
                                int *indirect_start_offsets,
                                int *indirect_start_offsets_count,
                                int max_start_offsets_count)
{
        int idx;

        for (; indirect_offset < list_size; indirect_offset++) {
                WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
                indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
                *indirect_start_offsets_count = *indirect_start_offsets_count + 1;

                while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
                        indirect_offset += 2;

                        /* look for the matching indice */
                        for (idx = 0; idx < unique_indirect_reg_count; idx++) {
                                if (unique_indirect_regs[idx] ==
                                        register_list_format[indirect_offset] ||
                                        !unique_indirect_regs[idx])
                                        break;
                        }

                        BUG_ON(idx >= unique_indirect_reg_count);

                        if (!unique_indirect_regs[idx])
                                unique_indirect_regs[idx] = register_list_format[indirect_offset];

                        indirect_offset++;
                }
        }
}

static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
{
        int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
        int unique_indirect_reg_count = 0;

        int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
        int indirect_start_offsets_count = 0;

        int list_size = 0;
        int i = 0, j = 0;
        u32 tmp = 0;

        u32 *register_list_format =
                kmemdup(adev->gfx.rlc.register_list_format,
                        adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
        if (!register_list_format)
                return -ENOMEM;

        /* setup unique_indirect_regs array and indirect_start_offsets array */
        unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
        gfx_v9_1_parse_ind_reg_list(register_list_format,
                                    adev->gfx.rlc.reg_list_format_direct_reg_list_length,
                                    adev->gfx.rlc.reg_list_format_size_bytes >> 2,
                                    unique_indirect_regs,
                                    unique_indirect_reg_count,
                                    indirect_start_offsets,
                                    &indirect_start_offsets_count,
                                    ARRAY_SIZE(indirect_start_offsets));

        /* enable auto inc in case it is disabled */
        tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
        tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);

        /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
                RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
        for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
                        adev->gfx.rlc.register_restore[i]);

        /* load indirect register */
        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
                adev->gfx.rlc.reg_list_format_start);

        /* direct register portion */
        for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
                        register_list_format[i]);

        /* indirect register portion */
        while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
                if (register_list_format[i] == 0xFFFFFFFF) {
                        WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
                        continue;
                }

                WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
                WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);

                for (j = 0; j < unique_indirect_reg_count; j++) {
                        if (register_list_format[i] == unique_indirect_regs[j]) {
                                WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
                                break;
                        }
                }

                BUG_ON(j >= unique_indirect_reg_count);

                i++;
        }

        /* set save/restore list size */
        list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
        list_size = list_size >> 1;
        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
                adev->gfx.rlc.reg_restore_list_size);
        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);

        /* write the starting offsets to RLC scratch ram */
        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
                adev->gfx.rlc.starting_offsets_start);
        for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
                       indirect_start_offsets[i]);

        /* load unique indirect regs*/
        for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
                if (unique_indirect_regs[i] != 0) {
                        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
                               + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
                               unique_indirect_regs[i] & 0x3FFFF);

                        WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
                               + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
                               unique_indirect_regs[i] >> 20);
                }
        }

        kfree(register_list_format);
        return 0;
}

static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
        WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
}

static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
                                             bool enable)
{
        uint32_t data = 0;
        uint32_t default_data = 0;

        default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
        if (enable) {
                /* enable GFXIP control over CGPG */
                data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
                if(default_data != data)
                        WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);

                /* update status */
                data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
                data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
                if(default_data != data)
                        WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
        } else {
                /* restore GFXIP control over GCPG */
                data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
                if(default_data != data)
                        WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
        }
}

static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
{
        uint32_t data = 0;

        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
                              AMD_PG_SUPPORT_GFX_SMG |
                              AMD_PG_SUPPORT_GFX_DMG)) {
                /* init IDLE_POLL_COUNT = 60 */
                data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
                data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
                data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);

                /* init RLC PG Delay */
                data = 0;
                data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
                data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
                data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
                data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);

                data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
                data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
                data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);

                data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
                data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
                data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);

                data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
                data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;

                /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
                data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
                if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 3, 0))
                        pwr_10_0_gfxip_control_over_cgpg(adev, true);
        }
}

static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
                                                bool enable)
{
        uint32_t data = 0;
        uint32_t default_data = 0;

        default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
        data = REG_SET_FIELD(data, RLC_PG_CNTL,
                             SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
                             enable ? 1 : 0);
        if (default_data != data)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}

static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
                                                bool enable)
{
        uint32_t data = 0;
        uint32_t default_data = 0;

        default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
        data = REG_SET_FIELD(data, RLC_PG_CNTL,
                             SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
                             enable ? 1 : 0);
        if(default_data != data)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}

static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
                                        bool enable)
{
        uint32_t data = 0;
        uint32_t default_data = 0;

        default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
        data = REG_SET_FIELD(data, RLC_PG_CNTL,
                             CP_PG_DISABLE,
                             enable ? 0 : 1);
        if(default_data != data)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}

static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
                                                bool enable)
{
        uint32_t data, default_data;

        default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
        data = REG_SET_FIELD(data, RLC_PG_CNTL,
                             GFX_POWER_GATING_ENABLE,
                             enable ? 1 : 0);
        if(default_data != data)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}

static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
                                                bool enable)
{
        uint32_t data, default_data;

        default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
        data = REG_SET_FIELD(data, RLC_PG_CNTL,
                             GFX_PIPELINE_PG_ENABLE,
                             enable ? 1 : 0);
        if(default_data != data)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);

        if (!enable)
                /* read any GFX register to wake up GFX */
                data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
}

static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
                                                       bool enable)
{
        uint32_t data, default_data;

        default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
        data = REG_SET_FIELD(data, RLC_PG_CNTL,
                             STATIC_PER_CU_PG_ENABLE,
                             enable ? 1 : 0);
        if(default_data != data)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}

static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
                                                bool enable)
{
        uint32_t data, default_data;

        default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
        data = REG_SET_FIELD(data, RLC_PG_CNTL,
                             DYN_PER_CU_PG_ENABLE,
                             enable ? 1 : 0);
        if(default_data != data)
                WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}

static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
{
        gfx_v9_0_init_csb(adev);

        /*
         * Rlc save restore list is workable since v2_1.
         * And it's needed by gfxoff feature.
         */
        if (adev->gfx.rlc.is_rlc_v2_1) {
                if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
                            IP_VERSION(9, 2, 1) ||
                    (adev->apu_flags & AMD_APU_IS_RAVEN2))
                        gfx_v9_1_init_rlc_save_restore_list(adev);
                gfx_v9_0_enable_save_restore_machine(adev);
        }

        if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
                              AMD_PG_SUPPORT_GFX_SMG |
                              AMD_PG_SUPPORT_GFX_DMG |
                              AMD_PG_SUPPORT_CP |
                              AMD_PG_SUPPORT_GDS |
                              AMD_PG_SUPPORT_RLC_SMU_HS)) {
                WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
                             adev->gfx.rlc.cp_table_gpu_addr >> 8);
                gfx_v9_0_init_gfx_power_gating(adev);
        }
}

static void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
        WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
        gfx_v9_0_enable_gui_idle_interrupt(adev, false);
        gfx_v9_0_wait_for_rlc_serdes(adev);
}

static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
{
        WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
        udelay(50);
        WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
        udelay(50);
}

static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
{
#ifdef AMDGPU_RLC_DEBUG_RETRY
        u32 rlc_ucode_ver;
#endif

        WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
        udelay(50);

        /* carrizo do enable cp interrupt after cp inited */
        if (!(adev->flags & AMD_IS_APU)) {
                gfx_v9_0_enable_gui_idle_interrupt(adev, true);
                udelay(50);
        }

#ifdef AMDGPU_RLC_DEBUG_RETRY
        /* RLC_GPM_GENERAL_6 : RLC Ucode version */
        rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
        if(rlc_ucode_ver == 0x108) {
                drm_info(adev_to_drm(adev), "Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i\n",
                                rlc_ucode_ver, adev->gfx.rlc_fw_version);
                /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
                 * default is 0x9C4 to create a 100us interval */
                WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
                /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
                 * to disable the page fault retry interrupts, default is
                 * 0x100 (256) */
                WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
        }
#endif
}

static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
{
        const struct rlc_firmware_header_v2_0 *hdr;
        const __le32 *fw_data;
        unsigned i, fw_size;

        if (!adev->gfx.rlc_fw)
                return -EINVAL;

        hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
        amdgpu_ucode_print_rlc_hdr(&hdr->header);

        fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
                           le32_to_cpu(hdr->header.ucode_array_offset_bytes));
        fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;

        WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
                        RLCG_UCODE_LOADING_START_ADDRESS);
        for (i = 0; i < fw_size; i++)
                WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
        WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);

        return 0;
}

static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
{
        int r;

        if (amdgpu_sriov_vf(adev)) {
                gfx_v9_0_init_csb(adev);
                return 0;
        }

        adev->gfx.rlc.funcs->stop(adev);

        /* disable CG */
        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);

        gfx_v9_0_init_pg(adev);

        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
                /* legacy rlc firmware loading */
                r = gfx_v9_0_rlc_load_microcode(adev);
                if (r)
                        return r;
        }

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
                gfx_v9_0_init_lbpw(adev);
                if (amdgpu_lbpw == 0)
                        gfx_v9_0_enable_lbpw(adev, false);
                else
                        gfx_v9_0_enable_lbpw(adev, true);
                break;
        case IP_VERSION(9, 4, 0):
                gfx_v9_4_init_lbpw(adev);
                if (amdgpu_lbpw > 0)
                        gfx_v9_0_enable_lbpw(adev, true);
                else
                        gfx_v9_0_enable_lbpw(adev, false);
                break;
        default:
                break;
        }

        gfx_v9_0_update_spm_vmid_internal(adev, 0xf);

        adev->gfx.rlc.funcs->start(adev);

        return 0;
}

static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
        u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);

        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_INVALIDATE_ICACHE, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_INVALIDATE_ICACHE, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_INVALIDATE_ICACHE, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE0_RESET, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_PIPE1_RESET, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
        tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
        WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
        udelay(50);
}

static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
{
        const struct gfx_firmware_header_v1_0 *pfp_hdr;
        const struct gfx_firmware_header_v1_0 *ce_hdr;
        const struct gfx_firmware_header_v1_0 *me_hdr;
        const __le32 *fw_data;
        unsigned i, fw_size;

        if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
                return -EINVAL;

        pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
                adev->gfx.pfp_fw->data;
        ce_hdr = (const struct gfx_firmware_header_v1_0 *)
                adev->gfx.ce_fw->data;
        me_hdr = (const struct gfx_firmware_header_v1_0 *)
                adev->gfx.me_fw->data;

        amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
        amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
        amdgpu_ucode_print_gfx_hdr(&me_hdr->header);

        gfx_v9_0_cp_gfx_enable(adev, false);

        /* PFP */
        fw_data = (const __le32 *)
                (adev->gfx.pfp_fw->data +
                 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
        fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
        WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
        for (i = 0; i < fw_size; i++)
                WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
        WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);

        /* CE */
        fw_data = (const __le32 *)
                (adev->gfx.ce_fw->data +
                 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
        fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
        WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
        for (i = 0; i < fw_size; i++)
                WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
        WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);

        /* ME */
        fw_data = (const __le32 *)
                (adev->gfx.me_fw->data +
                 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
        fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
        WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
        for (i = 0; i < fw_size; i++)
                WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
        WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);

        return 0;
}

static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
{
        struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
        const struct cs_section_def *sect = NULL;
        const struct cs_extent_def *ext = NULL;
        int r, i, tmp;

        /* init the CP */
        WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
        WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);

        gfx_v9_0_cp_gfx_enable(adev, true);

        /* Now only limit the quirk on the APU gfx9 series and already
         * confirmed that the APU gfx10/gfx11 needn't such update.
         */
        if (adev->flags & AMD_IS_APU &&
                        adev->in_s3 && !pm_resume_via_firmware()) {
                drm_info(adev_to_drm(adev), "Will skip the CSB packet resubmit\n");
                return 0;
        }
        r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
        if (r) {
                drm_err(adev_to_drm(adev), "cp failed to lock ring (%d).\n", r);
                return r;
        }

        amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
        amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);

        amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
        amdgpu_ring_write(ring, 0x80000000);
        amdgpu_ring_write(ring, 0x80000000);

        for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
                for (ext = sect->section; ext->extent != NULL; ++ext) {
                        if (sect->id == SECT_CONTEXT) {
                                amdgpu_ring_write(ring,
                                       PACKET3(PACKET3_SET_CONTEXT_REG,
                                               ext->reg_count));
                                amdgpu_ring_write(ring,
                                       ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
                                for (i = 0; i < ext->reg_count; i++)
                                        amdgpu_ring_write(ring, ext->extent[i]);
                        }
                }
        }

        amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
        amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);

        amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
        amdgpu_ring_write(ring, 0);

        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
        amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
        amdgpu_ring_write(ring, 0x8000);
        amdgpu_ring_write(ring, 0x8000);

        amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
        tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
                (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
        amdgpu_ring_write(ring, tmp);
        amdgpu_ring_write(ring, 0);

        amdgpu_ring_commit(ring);

        return 0;
}

static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
{
        struct amdgpu_ring *ring;
        u32 tmp;
        u32 rb_bufsz;
        u64 rb_addr, rptr_addr, wptr_gpu_addr;

        /* Set the write pointer delay */
        WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);

        /* set the RB to use vmid 0 */
        WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);

        /* Set ring buffer size */
        ring = &adev->gfx.gfx_ring[0];
        rb_bufsz = order_base_2(ring->ring_size / 8);
        tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
        tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
        tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
#endif
        WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);

        /* Initialize the ring buffer's write pointers */
        ring->wptr = 0;
        WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
        WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));

        /* set the wb address whether it's enabled or not */
        rptr_addr = ring->rptr_gpu_addr;
        WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
        WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);

        wptr_gpu_addr = ring->wptr_gpu_addr;
        WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
        WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));

        mdelay(1);
        WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);

        rb_addr = ring->gpu_addr >> 8;
        WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
        WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));

        tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
        if (ring->use_doorbell) {
                tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
                                    DOORBELL_OFFSET, ring->doorbell_index);
                tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
                                    DOORBELL_EN, 1);
        } else {
                tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
        }
        WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);

        tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
                        DOORBELL_RANGE_LOWER, ring->doorbell_index);
        WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);

        WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
                       CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);


        /* start the ring */
        gfx_v9_0_cp_gfx_start(adev);

        return 0;
}

static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
        if (enable) {
                WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
        } else {
                WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
                                 (CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
                                  CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
                                  CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
                                  CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
                                  CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
                                  CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
                                  CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
                                  CP_MEC_CNTL__MEC_ME1_HALT_MASK |
                                  CP_MEC_CNTL__MEC_ME2_HALT_MASK));
                adev->gfx.kiq[0].ring.sched.ready = false;
        }
        udelay(50);
}

static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
{
        const struct gfx_firmware_header_v1_0 *mec_hdr;
        const __le32 *fw_data;
        unsigned i;
        u32 tmp;

        if (!adev->gfx.mec_fw)
                return -EINVAL;

        gfx_v9_0_cp_compute_enable(adev, false);

        mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
        amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);

        fw_data = (const __le32 *)
                (adev->gfx.mec_fw->data +
                 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
        tmp = 0;
        tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
        tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
        WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);

        WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
                adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
        WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
                upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));

        /* MEC1 */
        WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
                         mec_hdr->jt_offset);
        for (i = 0; i < mec_hdr->jt_size; i++)
                WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
                        le32_to_cpup(fw_data + mec_hdr->jt_offset + i));

        WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
                        adev->gfx.mec_fw_version);
        /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */

        return 0;
}

/* KIQ functions */
static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
{
        uint32_t tmp;
        struct amdgpu_device *adev = ring->adev;

        /* tell RLC which is KIQ queue */
        tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
        tmp &= 0xffffff00;
        tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
        WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp | 0x80);
}

static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
{
        struct amdgpu_device *adev = ring->adev;

        if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
                if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
                        mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
                        mqd->cp_hqd_queue_priority =
                                AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
                }
        }
}

static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        struct v9_mqd *mqd = ring->mqd_ptr;
        uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
        uint32_t tmp;

        mqd->header = 0xC0310800;
        mqd->compute_pipelinestat_enable = 0x00000001;
        mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se4 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se5 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se6 = 0xffffffff;
        mqd->compute_static_thread_mgmt_se7 = 0xffffffff;
        mqd->compute_misc_reserved = 0x00000003;

        mqd->dynamic_cu_mask_addr_lo =
                lower_32_bits(ring->mqd_gpu_addr
                              + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
        mqd->dynamic_cu_mask_addr_hi =
                upper_32_bits(ring->mqd_gpu_addr
                              + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));

        eop_base_addr = ring->eop_gpu_addr >> 8;
        mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
        mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);

        /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
        tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
        tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
                        (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));

        mqd->cp_hqd_eop_control = tmp;

        /* enable doorbell? */
        tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);

        if (ring->use_doorbell) {
                tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
                                    DOORBELL_OFFSET, ring->doorbell_index);
                tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
                                    DOORBELL_EN, 1);
                tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
                                    DOORBELL_SOURCE, 0);
                tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
                                    DOORBELL_HIT, 0);
        } else {
                tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
                                         DOORBELL_EN, 0);
        }

        mqd->cp_hqd_pq_doorbell_control = tmp;

        /* disable the queue if it's active */
        ring->wptr = 0;
        mqd->cp_hqd_dequeue_request = 0;
        mqd->cp_hqd_pq_rptr = 0;
        mqd->cp_hqd_pq_wptr_lo = 0;
        mqd->cp_hqd_pq_wptr_hi = 0;

        /* set the pointer to the MQD */
        mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
        mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);

        /* set MQD vmid to 0 */
        tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
        tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
        mqd->cp_mqd_control = tmp;

        /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
        hqd_gpu_addr = ring->gpu_addr >> 8;
        mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
        mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);

        /* set up the HQD, this is similar to CP_RB0_CNTL */
        tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
                            (order_base_2(ring->ring_size / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
                        (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
#ifdef __BIG_ENDIAN
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
#endif
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
        mqd->cp_hqd_pq_control = tmp;

        /* set the wb address whether it's enabled or not */
        wb_gpu_addr = ring->rptr_gpu_addr;
        mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_rptr_report_addr_hi =
                upper_32_bits(wb_gpu_addr) & 0xffff;

        /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
        wb_gpu_addr = ring->wptr_gpu_addr;
        mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;

        /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
        ring->wptr = 0;
        mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);

        /* set the vmid for the queue */
        mqd->cp_hqd_vmid = 0;

        tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
        mqd->cp_hqd_persistent_state = tmp;

        /* set MIN_IB_AVAIL_SIZE */
        tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
        tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
        mqd->cp_hqd_ib_control = tmp;

        /* set static priority for a queue/ring */
        gfx_v9_0_mqd_set_priority(ring, mqd);
        mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);

        /* map_queues packet doesn't need activate the queue,
         * so only kiq need set this field.
         */
        if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
                mqd->cp_hqd_active = 1;

        return 0;
}

static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        struct v9_mqd *mqd = ring->mqd_ptr;
        int j;

        /* disable wptr polling */
        WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);

        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
               mqd->cp_hqd_eop_base_addr_lo);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
               mqd->cp_hqd_eop_base_addr_hi);

        /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_EOP_CONTROL,
               mqd->cp_hqd_eop_control);

        /* enable doorbell? */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
               mqd->cp_hqd_pq_doorbell_control);

        /* disable the queue if it's active */
        if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
                WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
                for (j = 0; j < adev->usec_timeout; j++) {
                        if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
                                break;
                        udelay(1);
                }
                WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
                       mqd->cp_hqd_dequeue_request);
                WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR,
                       mqd->cp_hqd_pq_rptr);
                WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
                       mqd->cp_hqd_pq_wptr_lo);
                WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
                       mqd->cp_hqd_pq_wptr_hi);
        }

        /* set the pointer to the MQD */
        WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR,
               mqd->cp_mqd_base_addr_lo);
        WREG32_SOC15_RLC(GC, 0, mmCP_MQD_BASE_ADDR_HI,
               mqd->cp_mqd_base_addr_hi);

        /* set MQD vmid to 0 */
        WREG32_SOC15_RLC(GC, 0, mmCP_MQD_CONTROL,
               mqd->cp_mqd_control);

        /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE,
               mqd->cp_hqd_pq_base_lo);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_BASE_HI,
               mqd->cp_hqd_pq_base_hi);

        /* set up the HQD, this is similar to CP_RB0_CNTL */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_CONTROL,
               mqd->cp_hqd_pq_control);

        /* set the wb address whether it's enabled or not */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
                                mqd->cp_hqd_pq_rptr_report_addr_lo);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
                                mqd->cp_hqd_pq_rptr_report_addr_hi);

        /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
               mqd->cp_hqd_pq_wptr_poll_addr_lo);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
               mqd->cp_hqd_pq_wptr_poll_addr_hi);

        /* enable the doorbell if requested */
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
                /* If GC has entered CGPG, ringing doorbell > first page
                 * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
                 * workaround this issue. And this change has to align with firmware
                 * update.
                 */
                if (check_if_enlarge_doorbell_range(adev))
                        WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
                                        (adev->doorbell.size - 4));
                else
                        WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
                                        (adev->doorbell_index.userqueue_end * 2) << 2);
        }

        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
               mqd->cp_hqd_pq_doorbell_control);

        /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO,
               mqd->cp_hqd_pq_wptr_lo);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI,
               mqd->cp_hqd_pq_wptr_hi);

        /* set the vmid for the queue */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);

        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE,
               mqd->cp_hqd_persistent_state);

        /* activate the queue */
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE,
               mqd->cp_hqd_active);

        if (ring->use_doorbell)
                WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);

        return 0;
}

static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        int j;

        /* disable the queue if it's active */
        if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {

                WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);

                for (j = 0; j < adev->usec_timeout; j++) {
                        if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
                                break;
                        udelay(1);
                }

                if (j == AMDGPU_MAX_USEC_TIMEOUT) {
                        DRM_DEBUG("KIQ dequeue request failed.\n");

                        /* Manual disable if dequeue request times out */
                        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_ACTIVE, 0);
                }

                WREG32_SOC15_RLC(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
                      0);
        }

        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IQ_TIMER, 0);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_IB_CONTROL, 0);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_RPTR, 0);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
        WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);

        return 0;
}

static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        struct v9_mqd *mqd = ring->mqd_ptr;
        struct v9_mqd *tmp_mqd;

        gfx_v9_0_kiq_setting(ring);

        /* GPU could be in bad state during probe, driver trigger the reset
         * after load the SMU, in this case , the mqd is not be initialized.
         * driver need to re-init the mqd.
         * check mqd->cp_hqd_pq_control since this value should not be 0
         */
        tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[0].mqd_backup;
        if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
                /* for GPU_RESET case , reset MQD to a clean status */
                if (adev->gfx.kiq[0].mqd_backup)
                        memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct v9_mqd_allocation));

                /* reset ring buffer */
                ring->wptr = 0;
                amdgpu_ring_clear_ring(ring);

                mutex_lock(&adev->srbm_mutex);
                soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
                gfx_v9_0_kiq_init_register(ring);
                soc15_grbm_select(adev, 0, 0, 0, 0, 0);
                mutex_unlock(&adev->srbm_mutex);
        } else {
                memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
                ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
                ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
                if (amdgpu_sriov_vf(adev) && adev->in_suspend)
                        amdgpu_ring_clear_ring(ring);
                mutex_lock(&adev->srbm_mutex);
                soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
                gfx_v9_0_mqd_init(ring);
                gfx_v9_0_kiq_init_register(ring);
                soc15_grbm_select(adev, 0, 0, 0, 0, 0);
                mutex_unlock(&adev->srbm_mutex);

                if (adev->gfx.kiq[0].mqd_backup)
                        memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
        }

        return 0;
}

static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
        struct amdgpu_device *adev = ring->adev;
        struct v9_mqd *mqd = ring->mqd_ptr;
        int mqd_idx = ring - &adev->gfx.compute_ring[0];
        struct v9_mqd *tmp_mqd;

        /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
         * is not be initialized before
         */
        tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];

        if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
            (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
                memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
                ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
                ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
                mutex_lock(&adev->srbm_mutex);
                soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
                gfx_v9_0_mqd_init(ring);
                soc15_grbm_select(adev, 0, 0, 0, 0, 0);
                mutex_unlock(&adev->srbm_mutex);

                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
        } else {
                /* restore MQD to a clean status */
                if (adev->gfx.mec.mqd_backup[mqd_idx])
                        memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
                /* reset ring buffer */
                ring->wptr = 0;
                atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
                amdgpu_ring_clear_ring(ring);
        }

        return 0;
}

static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
{
        gfx_v9_0_kiq_init_queue(&adev->gfx.kiq[0].ring);
        return 0;
}

static int gfx_v9_0_kcq_resume(struct amdgpu_device *adev)
{
        int i, r;

        gfx_v9_0_cp_compute_enable(adev, true);

        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                r = gfx_v9_0_kcq_init_queue(&adev->gfx.compute_ring[i], false);
                if (r)
                        return r;
        }

        return amdgpu_gfx_enable_kcq(adev, 0);
}

static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
{
        int r, i;
        struct amdgpu_ring *ring;

        if (!(adev->flags & AMD_IS_APU))
                gfx_v9_0_enable_gui_idle_interrupt(adev, false);

        if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
                if (adev->gfx.num_gfx_rings) {
                        /* legacy firmware loading */
                        r = gfx_v9_0_cp_gfx_load_microcode(adev);
                        if (r)
                                return r;
                }

                r = gfx_v9_0_cp_compute_load_microcode(adev);
                if (r)
                        return r;
        }

        if (adev->gfx.num_gfx_rings)
                gfx_v9_0_cp_gfx_enable(adev, false);
        gfx_v9_0_cp_compute_enable(adev, false);

        r = gfx_v9_0_kiq_resume(adev);
        if (r)
                return r;

        if (adev->gfx.num_gfx_rings) {
                r = gfx_v9_0_cp_gfx_resume(adev);
                if (r)
                        return r;
        }

        r = gfx_v9_0_kcq_resume(adev);
        if (r)
                return r;

        if (adev->gfx.num_gfx_rings) {
                ring = &adev->gfx.gfx_ring[0];
                r = amdgpu_ring_test_helper(ring);
                if (r)
                        return r;
        }

        for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                ring = &adev->gfx.compute_ring[i];
                amdgpu_ring_test_helper(ring);
        }

        gfx_v9_0_enable_gui_idle_interrupt(adev, true);

        return 0;
}

static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
{
        u32 tmp;

        if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1) &&
            amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2))
                return;

        tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
        tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE64KHASH,
                                adev->df.hash_status.hash_64k);
        tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE2MHASH,
                                adev->df.hash_status.hash_2m);
        tmp = REG_SET_FIELD(tmp, TCP_ADDR_CONFIG, ENABLE1GHASH,
                                adev->df.hash_status.hash_1g);
        WREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG, tmp);
}

static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
        if (adev->gfx.num_gfx_rings)
                gfx_v9_0_cp_gfx_enable(adev, enable);
        gfx_v9_0_cp_compute_enable(adev, enable);
}

static int gfx_v9_0_hw_init(struct amdgpu_ip_block *ip_block)
{
        int r;
        struct amdgpu_device *adev = ip_block->adev;

        amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
                                       adev->gfx.cleaner_shader_ptr);

        if (!amdgpu_sriov_vf(adev))
                gfx_v9_0_init_golden_registers(adev);

        gfx_v9_0_constants_init(adev);

        gfx_v9_0_init_tcp_config(adev);

        r = adev->gfx.rlc.funcs->resume(adev);
        if (r)
                return r;

        r = gfx_v9_0_cp_resume(adev);
        if (r)
                return r;

        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) &&
            !amdgpu_sriov_vf(adev))
                gfx_v9_4_2_set_power_brake_sequence(adev);

        return r;
}

static int gfx_v9_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;

        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);

        /* DF freeze and kcq disable will fail */
        if (!amdgpu_ras_intr_triggered())
                /* disable KCQ to avoid CPC touch memory not valid anymore */
                amdgpu_gfx_disable_kcq(adev, 0);

        if (amdgpu_sriov_vf(adev)) {
                gfx_v9_0_cp_gfx_enable(adev, false);
                /* must disable polling for SRIOV when hw finished, otherwise
                 * CPC engine may still keep fetching WB address which is already
                 * invalid after sw finished and trigger DMAR reading error in
                 * hypervisor side.
                 */
                WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
                return 0;
        }

        /* Use deinitialize sequence from CAIL when unbinding device from driver,
         * otherwise KIQ is hanging when binding back
         */
        if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
                mutex_lock(&adev->srbm_mutex);
                soc15_grbm_select(adev, adev->gfx.kiq[0].ring.me,
                                adev->gfx.kiq[0].ring.pipe,
                                adev->gfx.kiq[0].ring.queue, 0, 0);
                gfx_v9_0_kiq_fini_register(&adev->gfx.kiq[0].ring);
                soc15_grbm_select(adev, 0, 0, 0, 0, 0);
                mutex_unlock(&adev->srbm_mutex);
        }

        gfx_v9_0_cp_enable(adev, false);

        /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
        if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
            (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) {
                dev_dbg(adev->dev, "Skipping RLC halt\n");
                return 0;
        }

        adev->gfx.rlc.funcs->stop(adev);
        return 0;
}

static int gfx_v9_0_suspend(struct amdgpu_ip_block *ip_block)
{
        return gfx_v9_0_hw_fini(ip_block);
}

static int gfx_v9_0_resume(struct amdgpu_ip_block *ip_block)
{
        return gfx_v9_0_hw_init(ip_block);
}

static bool gfx_v9_0_is_idle(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;

        if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
                                GRBM_STATUS, GUI_ACTIVE))
                return false;
        else
                return true;
}

static int gfx_v9_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
        unsigned i;
        struct amdgpu_device *adev = ip_block->adev;

        for (i = 0; i < adev->usec_timeout; i++) {
                if (gfx_v9_0_is_idle(ip_block))
                        return 0;
                udelay(1);
        }
        return -ETIMEDOUT;
}

static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
        u32 grbm_soft_reset = 0;
        u32 tmp;
        struct amdgpu_device *adev = ip_block->adev;

        /* GRBM_STATUS */
        tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
        if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
                   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
                   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
                   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
                   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
                   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
                grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
                                                GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
                grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
                                                GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
        }

        if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
                grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
                                                GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
        }

        /* GRBM_STATUS2 */
        tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
        if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
                grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
                                                GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);


        if (grbm_soft_reset) {
                /* stop the rlc */
                adev->gfx.rlc.funcs->stop(adev);

                if (adev->gfx.num_gfx_rings)
                        /* Disable GFX parsing/prefetching */
                        gfx_v9_0_cp_gfx_enable(adev, false);

                /* Disable MEC parsing/prefetching */
                gfx_v9_0_cp_compute_enable(adev, false);

                tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
                tmp |= grbm_soft_reset;
                dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
                WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
                tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);

                udelay(50);

                tmp &= ~grbm_soft_reset;
                WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
                tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);

                /* Wait a little for things to settle down */
                udelay(50);
        }
        return 0;
}

static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
{
        signed long r, cnt = 0;
        unsigned long flags;
        uint32_t seq, reg_val_offs = 0;
        uint64_t value = 0;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *ring = &kiq->ring;

        BUG_ON(!ring->funcs->emit_rreg);

        spin_lock_irqsave(&kiq->ring_lock, flags);
        if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
                pr_err("critical bug! too many kiq readers\n");
                goto failed_unlock;
        }
        amdgpu_ring_alloc(ring, 32);
        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 9 |     /* src: register*/
                                (5 << 8) |      /* dst: memory */
                                (1 << 16) |     /* count sel */
                                (1 << 20));     /* write confirm */
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
                                reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
                                reg_val_offs * 4));
        r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
        if (r)
                goto failed_undo;

        amdgpu_ring_commit(ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);

        r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);

        /* don't wait anymore for gpu reset case because this way may
         * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
         * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
         * never return if we keep waiting in virt_kiq_rreg, which cause
         * gpu_recover() hang there.
         *
         * also don't wait anymore for IRQ context
         * */
        if (r < 1 && (amdgpu_in_reset(adev)))
                goto failed_kiq_read;

        might_sleep();
        while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
                msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
                r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
        }

        if (cnt > MAX_KIQ_REG_TRY)
                goto failed_kiq_read;

        mb();
        value = (uint64_t)adev->wb.wb[reg_val_offs] |
                (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
        amdgpu_device_wb_free(adev, reg_val_offs);
        return value;

failed_undo:
        amdgpu_ring_undo(ring);
failed_unlock:
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
        if (reg_val_offs)
                amdgpu_device_wb_free(adev, reg_val_offs);
        pr_err("failed to read gpu clock\n");
        return ~0;
}

static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
        uint64_t clock, clock_lo, clock_hi, hi_check;

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 3, 0):
                preempt_disable();
                clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
                clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
                hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
                /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
                 * roughly every 42 seconds.
                 */
                if (hi_check != clock_hi) {
                        clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
                        clock_hi = hi_check;
                }
                preempt_enable();
                clock = clock_lo | (clock_hi << 32ULL);
                break;
        default:
                amdgpu_gfx_off_ctrl(adev, false);
                mutex_lock(&adev->gfx.gpu_clock_mutex);
                if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
                            IP_VERSION(9, 0, 1) &&
                    amdgpu_sriov_runtime(adev)) {
                        clock = gfx_v9_0_kiq_read_clock(adev);
                } else {
                        WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
                        clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
                                ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
                }
                mutex_unlock(&adev->gfx.gpu_clock_mutex);
                amdgpu_gfx_off_ctrl(adev, true);
                break;
        }
        return clock;
}

static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
                                          uint32_t vmid,
                                          uint32_t gds_base, uint32_t gds_size,
                                          uint32_t gws_base, uint32_t gws_size,
                                          uint32_t oa_base, uint32_t oa_size)
{
        struct amdgpu_device *adev = ring->adev;

        /* GDS Base */
        gfx_v9_0_write_data_to_reg(ring, 0, false,
                                   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
                                   gds_base);

        /* GDS Size */
        gfx_v9_0_write_data_to_reg(ring, 0, false,
                                   SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
                                   gds_size);

        /* GWS */
        gfx_v9_0_write_data_to_reg(ring, 0, false,
                                   SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
                                   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);

        /* OA */
        gfx_v9_0_write_data_to_reg(ring, 0, false,
                                   SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
                                   (1 << (oa_size + oa_base)) - (1 << oa_base));
}

static const u32 vgpr_init_compute_shader[] =
{
        0xb07c0000, 0xbe8000ff,
        0x000000f8, 0xbf110800,
        0x7e000280, 0x7e020280,
        0x7e040280, 0x7e060280,
        0x7e080280, 0x7e0a0280,
        0x7e0c0280, 0x7e0e0280,
        0x80808800, 0xbe803200,
        0xbf84fff5, 0xbf9c0000,
        0xd28c0001, 0x0001007f,
        0xd28d0001, 0x0002027e,
        0x10020288, 0xb8810904,
        0xb7814000, 0xd1196a01,
        0x00000301, 0xbe800087,
        0xbefc00c1, 0xd89c4000,
        0x00020201, 0xd89cc080,
        0x00040401, 0x320202ff,
        0x00000800, 0x80808100,
        0xbf84fff8, 0x7e020280,
        0xbf810000, 0x00000000,
};

static const u32 sgpr_init_compute_shader[] =
{
        0xb07c0000, 0xbe8000ff,
        0x0000005f, 0xbee50080,
        0xbe812c65, 0xbe822c65,
        0xbe832c65, 0xbe842c65,
        0xbe852c65, 0xb77c0005,
        0x80808500, 0xbf84fff8,
        0xbe800080, 0xbf810000,
};

static const u32 vgpr_init_compute_shader_arcturus[] = {
        0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
        0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
        0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
        0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
        0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
        0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
        0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
        0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
        0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
        0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
        0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
        0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
        0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
        0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
        0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
        0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
        0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
        0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
        0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
        0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
        0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
        0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
        0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
        0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
        0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
        0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
        0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
        0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
        0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
        0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
        0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
        0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
        0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
        0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
        0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
        0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
        0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
        0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
        0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
        0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
        0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
        0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
        0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
        0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
        0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
        0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
        0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
        0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
        0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
        0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
        0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
        0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
        0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
        0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
        0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
        0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
        0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
        0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
        0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
        0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
        0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
        0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
        0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
        0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
        0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
        0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
        0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
        0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
        0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
        0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
        0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
        0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
        0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
        0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
        0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
        0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
        0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
        0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
        0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
        0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
        0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
        0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
        0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
        0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
        0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
        0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
        0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
        0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
        0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
        0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
        0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
        0xbf84fff8, 0xbf810000,
};

/* When below register arrays changed, please update gpr_reg_size,
  and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
  to cover all gfx9 ASICs */
static const struct soc15_reg_entry vgpr_init_regs[] = {
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};

static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0xbf },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 },  /* 64KB LDS */
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};

static const struct soc15_reg_entry sgpr1_init_regs[] = {
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff },
};

static const struct soc15_reg_entry sgpr2_init_regs[] = {
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 },
   { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 },
};

static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1},
   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6},
   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16},
   { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6},
   { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16},
   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1},
   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32},
   { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32},
   { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72},
   { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
   { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
   { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
};

static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
{
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
        int i, r;

        /* only support when RAS is enabled */
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return 0;

        r = amdgpu_ring_alloc(ring, 7);
        if (r) {
                drm_err(adev_to_drm(adev), "GDS workarounds failed to lock ring %s (%d).\n",
                        ring->name, r);
                return r;
        }

        WREG32_SOC15(GC, 0, mmGDS_VMID0_BASE, 0x00000000);
        WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, adev->gds.gds_size);

        amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
        amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
                                PACKET3_DMA_DATA_DST_SEL(1) |
                                PACKET3_DMA_DATA_SRC_SEL(2) |
                                PACKET3_DMA_DATA_ENGINE(0)));
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
                                adev->gds.gds_size);

        amdgpu_ring_commit(ring);

        for (i = 0; i < adev->usec_timeout; i++) {
                if (ring->wptr == gfx_v9_0_ring_get_rptr_compute(ring))
                        break;
                udelay(1);
        }

        if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;

        WREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE, 0x00000000);

        return r;
}

static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
{
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
        struct amdgpu_ib ib;
        struct dma_fence *f = NULL;
        int r, i;
        unsigned total_size, vgpr_offset, sgpr_offset;
        u64 gpu_addr;

        int compute_dim_x = adev->gfx.config.max_shader_engines *
                                                adev->gfx.config.max_cu_per_sh *
                                                adev->gfx.config.max_sh_per_se;
        int sgpr_work_group_size = 5;
        int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
        int vgpr_init_shader_size;
        const u32 *vgpr_init_shader_ptr;
        const struct soc15_reg_entry *vgpr_init_regs_ptr;

        /* only support when RAS is enabled */
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return 0;

        /* bail if the compute ring is not ready */
        if (!ring->sched.ready)
                return 0;

        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1)) {
                vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
                vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
                vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
        } else {
                vgpr_init_shader_ptr = vgpr_init_compute_shader;
                vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
                vgpr_init_regs_ptr = vgpr_init_regs;
        }

        total_size =
                (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
        total_size +=
                (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */
        total_size +=
                (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
        total_size = ALIGN(total_size, 256);
        vgpr_offset = total_size;
        total_size += ALIGN(vgpr_init_shader_size, 256);
        sgpr_offset = total_size;
        total_size += sizeof(sgpr_init_compute_shader);

        /* allocate an indirect buffer to put the commands in */
        memset(&ib, 0, sizeof(ib));
        r = amdgpu_ib_get(adev, NULL, total_size,
                                        AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                drm_err(adev_to_drm(adev), "failed to get ib (%d).\n", r);
                return r;
        }

        /* load the compute shaders */
        for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
                ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];

        for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
                ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];

        /* init the ib length to 0 */
        ib.length_dw = 0;

        /* VGPR */
        /* write the register state for the compute dispatch */
        for (i = 0; i < gpr_reg_size; i++) {
                ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
                ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
                                                                - PACKET3_SET_SH_REG_START;
                ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
        }
        /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
        gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
        ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
                                                        - PACKET3_SET_SH_REG_START;
        ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
        ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);

        /* write dispatch packet */
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
        ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
        ib.ptr[ib.length_dw++] = 1; /* y */
        ib.ptr[ib.length_dw++] = 1; /* z */
        ib.ptr[ib.length_dw++] =
                REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);

        /* write CS partial flush packet */
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
        ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);

        /* SGPR1 */
        /* write the register state for the compute dispatch */
        for (i = 0; i < gpr_reg_size; i++) {
                ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
                ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i])
                                                                - PACKET3_SET_SH_REG_START;
                ib.ptr[ib.length_dw++] = sgpr1_init_regs[i].reg_value;
        }
        /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
        gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
        ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
                                                        - PACKET3_SET_SH_REG_START;
        ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
        ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);

        /* write dispatch packet */
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
        ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
        ib.ptr[ib.length_dw++] = 1; /* y */
        ib.ptr[ib.length_dw++] = 1; /* z */
        ib.ptr[ib.length_dw++] =
                REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);

        /* write CS partial flush packet */
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
        ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);

        /* SGPR2 */
        /* write the register state for the compute dispatch */
        for (i = 0; i < gpr_reg_size; i++) {
                ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
                ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i])
                                                                - PACKET3_SET_SH_REG_START;
                ib.ptr[ib.length_dw++] = sgpr2_init_regs[i].reg_value;
        }
        /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
        gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
        ib.ptr[ib.length_dw++] = SOC15_REG_OFFSET(GC, 0, mmCOMPUTE_PGM_LO)
                                                        - PACKET3_SET_SH_REG_START;
        ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
        ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);

        /* write dispatch packet */
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
        ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */
        ib.ptr[ib.length_dw++] = 1; /* y */
        ib.ptr[ib.length_dw++] = 1; /* z */
        ib.ptr[ib.length_dw++] =
                REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);

        /* write CS partial flush packet */
        ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
        ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);

        /* shedule the ib on the ring */
        r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
        if (r) {
                drm_err(adev_to_drm(adev), "ib schedule failed (%d).\n", r);
                goto fail;
        }

        /* wait for the GPU to finish processing the IB */
        r = dma_fence_wait(f, false);
        if (r) {
                drm_err(adev_to_drm(adev), "fence wait failed (%d).\n", r);
                goto fail;
        }

fail:
        amdgpu_ib_free(&ib, NULL);
        dma_fence_put(f);

        return r;
}

static int gfx_v9_0_early_init(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;

        adev->gfx.funcs = &gfx_v9_0_gfx_funcs;

        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) ||
            amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
                adev->gfx.num_gfx_rings = 0;
        else
                adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
        adev->gfx.xcc_mask = 1;
        adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
                                          AMDGPU_MAX_COMPUTE_RINGS);
        gfx_v9_0_set_kiq_pm4_funcs(adev);
        gfx_v9_0_set_ring_funcs(adev);
        gfx_v9_0_set_irq_funcs(adev);
        gfx_v9_0_set_gds_init(adev);
        gfx_v9_0_set_rlc_funcs(adev);

        /* init rlcg reg access ctrl */
        gfx_v9_0_init_rlcg_reg_access_ctrl(adev);

        return gfx_v9_0_init_microcode(adev);
}

static int gfx_v9_0_ecc_late_init(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;
        int r;

        /*
         * Temp workaround to fix the issue that CP firmware fails to
         * update read pointer when CPDMA is writing clearing operation
         * to GDS in suspend/resume sequence on several cards. So just
         * limit this operation in cold boot sequence.
         */
        if ((!adev->in_suspend) &&
            (adev->gds.gds_size)) {
                r = gfx_v9_0_do_edc_gds_workarounds(adev);
                if (r)
                        return r;
        }

        /* requires IBs so do in late init after IB pool is initialized */
        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
                r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
        else
                r = gfx_v9_0_do_edc_gpr_workarounds(adev);

        if (r)
                return r;

        if (adev->gfx.ras &&
            adev->gfx.ras->enable_watchdog_timer)
                adev->gfx.ras->enable_watchdog_timer(adev);

        return 0;
}

static int gfx_v9_0_late_init(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;
        int r;

        r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
        if (r)
                return r;

        r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
        if (r)
                return r;

        r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
        if (r)
                return r;

        r = gfx_v9_0_ecc_late_init(ip_block);
        if (r)
                return r;

        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
                gfx_v9_4_2_debug_trap_config_init(adev,
                        adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);
        else
                gfx_v9_0_debug_trap_config_init(adev,
                        adev->vm_manager.first_kfd_vmid, AMDGPU_NUM_VMID);

        return 0;
}

static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
{
        uint32_t rlc_setting;

        /* if RLC is not enabled, do nothing */
        rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
        if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
                return false;

        return true;
}

static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
        uint32_t data;
        unsigned i;

        data = RLC_SAFE_MODE__CMD_MASK;
        data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
        WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);

        /* wait for RLC_SAFE_MODE */
        for (i = 0; i < adev->usec_timeout; i++) {
                if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
                        break;
                udelay(1);
        }
}

static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
{
        uint32_t data;

        data = RLC_SAFE_MODE__CMD_MASK;
        WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
}

static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
                                                bool enable)
{
        amdgpu_gfx_rlc_enter_safe_mode(adev, 0);

        if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
                gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
                if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
                        gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
        } else {
                gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
                if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
                        gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
        }

        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}

static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
                                                bool enable)
{
        /* TODO: double check if we need to perform under safe mode */
        /* gfx_v9_0_enter_rlc_safe_mode(adev); */

        if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
                gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
        else
                gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);

        if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
                gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
        else
                gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);

        /* gfx_v9_0_exit_rlc_safe_mode(adev); */
}

static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                                                      bool enable)
{
        uint32_t data, def;

        /* It is disabled by HW by default */
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
                /* 1 - RLC_CGTT_MGCG_OVERRIDE */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);

                if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
                        data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;

                data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);

                /* only for Vega10 & Raven1 */
                data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;

                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);

                /* MGLS is a global flag to control all MGLS in GFX */
                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
                        /* 2 - RLC memory Light sleep */
                        if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
                                def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
                                data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
                                if (def != data)
                                        WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
                        }
                        /* 3 - CP memory Light sleep */
                        if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
                                def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
                                data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
                                if (def != data)
                                        WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
                        }
                }
        } else {
                /* 1 - MGCG_OVERRIDE */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);

                if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 2, 1))
                        data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;

                data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
                         RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
                         RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
                         RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);

                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);

                /* 2 - disable MGLS in RLC */
                data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
                if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
                        data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
                        WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
                }

                /* 3 - disable MGLS in CP */
                data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
                if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
                        data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
                        WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
                }
        }
}

static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
                                           bool enable)
{
        uint32_t data, def;

        if (!adev->gfx.num_gfx_rings)
                return;

        /* Enable 3D CGCG/CGLS */
        if (enable) {
                /* write cmd to clear cgcg/cgls ov */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
                /* unset CGCG override */
                data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
                /* update CGCG and CGLS override bits */
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);

                /* enable 3Dcgcg FSM(0x0000363f) */
                def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);

                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
                        data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
                                RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
                else
                        data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;

                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
                        data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
                                RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);

                /* set IDLE_POLL_COUNT(0x00900100) */
                def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
                data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
                        (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
                if (def != data)
                        WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
        } else {
                /* Disable CGCG/CGLS */
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
                /* disable cgcg, cgls should be disabled */
                data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
                          RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
                /* disable cgcg and cgls in FSM */
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
        }
}

static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
                                                      bool enable)
{
        uint32_t def, data;

        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
                /* unset CGCG override */
                data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
                        data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
                else
                        data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
                /* update CGCG and CGLS override bits */
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);

                /* enable cgcg FSM(0x0000363F) */
                def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);

                if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))
                        data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
                                RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
                else
                        data = (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
                                RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
                if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
                        data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
                                RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);

                /* set IDLE_POLL_COUNT(0x00900100) */
                def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
                data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
                        (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
                if (def != data)
                        WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
        } else {
                def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
                /* reset CGCG/CGLS bits */
                data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
                /* disable cgcg and cgls in FSM */
                if (def != data)
                        WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
        }
}

static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
                                            bool enable)
{
        amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
        if (enable) {
                /* CGCG/CGLS should be enabled after MGCG/MGLS
                 * ===  MGCG + MGLS ===
                 */
                gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
                /* ===  CGCG /CGLS for GFX 3D Only === */
                gfx_v9_0_update_3d_clock_gating(adev, enable);
                /* ===  CGCG + CGLS === */
                gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
        } else {
                /* CGCG/CGLS should be disabled before MGCG/MGLS
                 * ===  CGCG + CGLS ===
                 */
                gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
                /* ===  CGCG /CGLS for GFX 3D Only === */
                gfx_v9_0_update_3d_clock_gating(adev, enable);
                /* ===  MGCG + MGLS === */
                gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
        }
        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
        return 0;
}

static void gfx_v9_0_update_spm_vmid_internal(struct amdgpu_device *adev,
                                              unsigned int vmid)
{
        u32 reg, data;

        reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
        if (amdgpu_sriov_is_pp_one_vf(adev))
                data = RREG32_NO_KIQ(reg);
        else
                data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);

        data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;

        if (amdgpu_sriov_is_pp_one_vf(adev))
                WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
        else
                WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}

static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, int xcc_id,
                struct amdgpu_ring *ring, unsigned int vmid)
{
        amdgpu_gfx_off_ctrl(adev, false);

        gfx_v9_0_update_spm_vmid_internal(adev, vmid);

        amdgpu_gfx_off_ctrl(adev, true);
}

static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
                                        uint32_t offset,
                                        struct soc15_reg_rlcg *entries, int arr_size)
{
        int i;
        uint32_t reg;

        if (!entries)
                return false;

        for (i = 0; i < arr_size; i++) {
                const struct soc15_reg_rlcg *entry;

                entry = &entries[i];
                reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
                if (offset == reg)
                        return true;
        }

        return false;
}

static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
{
        return gfx_v9_0_check_rlcg_range(adev, offset,
                                        (void *)rlcg_access_gc_9_0,
                                        ARRAY_SIZE(rlcg_access_gc_9_0));
}

static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
        .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
        .set_safe_mode = gfx_v9_0_set_safe_mode,
        .unset_safe_mode = gfx_v9_0_unset_safe_mode,
        .init = gfx_v9_0_rlc_init,
        .get_csb_size = gfx_v9_0_get_csb_size,
        .get_csb_buffer = gfx_v9_0_get_csb_buffer,
        .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
        .resume = gfx_v9_0_rlc_resume,
        .stop = gfx_v9_0_rlc_stop,
        .reset = gfx_v9_0_rlc_reset,
        .start = gfx_v9_0_rlc_start,
        .update_spm_vmid = gfx_v9_0_update_spm_vmid,
        .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};

static int gfx_v9_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
                                          enum amd_powergating_state state)
{
        struct amdgpu_device *adev = ip_block->adev;
        bool enable = (state == AMD_PG_STATE_GATE);

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
        case IP_VERSION(9, 3, 0):
                if (!enable)
                        amdgpu_gfx_off_ctrl_immediate(adev, false);

                if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
                        gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
                        gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
                } else {
                        gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
                        gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
                }

                if (adev->pg_flags & AMD_PG_SUPPORT_CP)
                        gfx_v9_0_enable_cp_power_gating(adev, true);
                else
                        gfx_v9_0_enable_cp_power_gating(adev, false);

                /* update gfx cgpg state */
                gfx_v9_0_update_gfx_cg_power_gating(adev, enable);

                /* update mgcg state */
                gfx_v9_0_update_gfx_mg_power_gating(adev, enable);

                if (enable)
                        amdgpu_gfx_off_ctrl_immediate(adev, true);
                break;
        case IP_VERSION(9, 2, 1):
                amdgpu_gfx_off_ctrl_immediate(adev, enable);
                break;
        default:
                break;
        }

        return 0;
}

static int gfx_v9_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
                                          enum amd_clockgating_state state)
{
        struct amdgpu_device *adev = ip_block->adev;

        if (amdgpu_sriov_vf(adev))
                return 0;

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
        case IP_VERSION(9, 2, 1):
        case IP_VERSION(9, 4, 0):
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
        case IP_VERSION(9, 4, 1):
        case IP_VERSION(9, 3, 0):
        case IP_VERSION(9, 4, 2):
                gfx_v9_0_update_gfx_clock_gating(adev,
                                                 state == AMD_CG_STATE_GATE);
                break;
        default:
                break;
        }
        return 0;
}

static void gfx_v9_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
        struct amdgpu_device *adev = ip_block->adev;
        int data;

        if (amdgpu_sriov_vf(adev))
                *flags = 0;

        /* AMD_CG_SUPPORT_GFX_MGCG */
        data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE));
        if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
                *flags |= AMD_CG_SUPPORT_GFX_MGCG;

        /* AMD_CG_SUPPORT_GFX_CGCG */
        data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL));
        if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
                *flags |= AMD_CG_SUPPORT_GFX_CGCG;

        /* AMD_CG_SUPPORT_GFX_CGLS */
        if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
                *flags |= AMD_CG_SUPPORT_GFX_CGLS;

        /* AMD_CG_SUPPORT_GFX_RLC_LS */
        data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL));
        if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
                *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;

        /* AMD_CG_SUPPORT_GFX_CP_LS */
        data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL));
        if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
                *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;

        if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 1)) {
                /* AMD_CG_SUPPORT_GFX_3D_CGCG */
                data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
                if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
                        *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;

                /* AMD_CG_SUPPORT_GFX_3D_CGLS */
                if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
                        *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
        }
}

static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
{
        return *ring->rptr_cpu_addr; /* gfx9 is 32bit rptr*/
}

static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        u64 wptr;

        /* XXX check if swapping is necessary on BE */
        if (ring->use_doorbell) {
                wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
        } else {
                wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
                wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
        }

        return wptr;
}

static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        if (ring->use_doorbell) {
                /* XXX check if swapping is necessary on BE */
                atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
                WDOORBELL64(ring->doorbell_index, ring->wptr);
        } else {
                WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
                WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
        }
}

static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        u32 ref_and_mask, reg_mem_engine;

        if (!adev->gfx.funcs->get_hdp_flush_mask) {
                dev_err(adev->dev, "%s: gfx hdp flush is not supported.\n", __func__);
                return;
        }

        adev->gfx.funcs->get_hdp_flush_mask(ring, &ref_and_mask, &reg_mem_engine);
        gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
                              adev->nbio.funcs->get_hdp_flush_req_offset(adev),
                              adev->nbio.funcs->get_hdp_flush_done_offset(adev),
                              ref_and_mask, ref_and_mask, 0x20);
}

static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
                                        struct amdgpu_job *job,
                                        struct amdgpu_ib *ib,
                                        uint32_t flags)
{
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 header, control = 0;

        if (ib->flags & AMDGPU_IB_FLAG_CE)
                header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
        else
                header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);

        control |= ib->length_dw | (vmid << 24);

        if (ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
                control |= INDIRECT_BUFFER_PRE_ENB(1);

                if (flags & AMDGPU_IB_PREEMPTED)
                        control |= INDIRECT_BUFFER_PRE_RESUME(1);

                if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
                        gfx_v9_0_ring_emit_de_meta(ring,
                                                   (!amdgpu_sriov_vf(ring->adev) &&
                                                   flags & AMDGPU_IB_PREEMPTED) ?
                                                   true : false,
                                                   job->gds_size > 0 && job->gds_base != 0);
        }

        amdgpu_ring_write(ring, header);
        BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
        amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
                (2 << 0) |
#endif
                lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_ib_on_emit_cntl(ring);
        amdgpu_ring_write(ring, control);
}

static void gfx_v9_0_ring_patch_cntl(struct amdgpu_ring *ring,
                                     unsigned offset)
{
        u32 control = ring->ring[offset];

        control |= INDIRECT_BUFFER_PRE_RESUME(1);
        ring->ring[offset] = control;
}

static void gfx_v9_0_ring_patch_ce_meta(struct amdgpu_ring *ring,
                                        unsigned offset)
{
        struct amdgpu_device *adev = ring->adev;
        void *ce_payload_cpu_addr;
        uint64_t payload_offset, payload_size;

        payload_size = sizeof(struct v9_ce_ib_state);

        payload_offset = offsetof(struct v9_gfx_meta_data, ce_payload);
        ce_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;

        if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
                memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr, payload_size);
        } else {
                memcpy((void *)&ring->ring[offset], ce_payload_cpu_addr,
                       (ring->buf_mask + 1 - offset) << 2);
                payload_size -= (ring->buf_mask + 1 - offset) << 2;
                memcpy((void *)&ring->ring[0],
                       ce_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
                       payload_size);
        }
}

static void gfx_v9_0_ring_patch_de_meta(struct amdgpu_ring *ring,
                                        unsigned offset)
{
        struct amdgpu_device *adev = ring->adev;
        void *de_payload_cpu_addr;
        uint64_t payload_offset, payload_size;

        payload_size = sizeof(struct v9_de_ib_state);

        payload_offset = offsetof(struct v9_gfx_meta_data, de_payload);
        de_payload_cpu_addr = adev->virt.csa_cpu_addr + payload_offset;

        ((struct v9_de_ib_state *)de_payload_cpu_addr)->ib_completion_status =
                IB_COMPLETION_STATUS_PREEMPTED;

        if (offset + (payload_size >> 2) <= ring->buf_mask + 1) {
                memcpy((void *)&ring->ring[offset], de_payload_cpu_addr, payload_size);
        } else {
                memcpy((void *)&ring->ring[offset], de_payload_cpu_addr,
                       (ring->buf_mask + 1 - offset) << 2);
                payload_size -= (ring->buf_mask + 1 - offset) << 2;
                memcpy((void *)&ring->ring[0],
                       de_payload_cpu_addr + ((ring->buf_mask + 1 - offset) << 2),
                       payload_size);
        }
}

static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
                                          struct amdgpu_job *job,
                                          struct amdgpu_ib *ib,
                                          uint32_t flags)
{
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);

        /* Currently, there is a high possibility to get wave ID mismatch
         * between ME and GDS, leading to a hw deadlock, because ME generates
         * different wave IDs than the GDS expects. This situation happens
         * randomly when at least 5 compute pipes use GDS ordered append.
         * The wave IDs generated by ME are also wrong after suspend/resume.
         * Those are probably bugs somewhere else in the kernel driver.
         *
         * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
         * GDS to 0 for this ring (me/pipe).
         */
        if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
                amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
                amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID);
                amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
        }

        amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
        BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
        amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
                                (2 << 0) |
#endif
                                lower_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
        amdgpu_ring_write(ring, control);
}

static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
                                     u64 seq, unsigned flags)
{
        bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
        bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
        bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
        bool exec = flags & AMDGPU_FENCE_FLAG_EXEC;
        uint32_t dw2 = 0;

        /* RELEASE_MEM - flush caches, send int */
        amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));

        if (writeback) {
                dw2 = EOP_TC_NC_ACTION_EN;
        } else {
                dw2 = EOP_TCL1_ACTION_EN | EOP_TC_ACTION_EN |
                                EOP_TC_MD_ACTION_EN;
        }
        dw2 |= EOP_TC_WB_ACTION_EN | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
                                EVENT_INDEX(5);
        if (exec)
                dw2 |= EOP_EXEC;

        amdgpu_ring_write(ring, dw2);
        amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));

        /*
         * the address should be Qword aligned if 64bit write, Dword
         * aligned if only send 32bit data low (discard data high)
         */
        if (write64bit)
                BUG_ON(addr & 0x7);
        else
                BUG_ON(addr & 0x3);
        amdgpu_ring_write(ring, lower_32_bits(addr));
        amdgpu_ring_write(ring, upper_32_bits(addr));
        amdgpu_ring_write(ring, lower_32_bits(seq));
        amdgpu_ring_write(ring, upper_32_bits(seq));
        amdgpu_ring_write(ring, 0);
}

static void gfx_v9_0_ring_emit_event_write(struct amdgpu_ring *ring,
                                           uint32_t event_type,
                                           uint32_t event_index)
{
        amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
        amdgpu_ring_write(ring, EVENT_TYPE(event_type) |
                          EVENT_INDEX(event_index));
}

static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
{
        const unsigned int cp_coher_cntl =
                        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
                        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
                        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
                        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
                        PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);

        /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
        amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
        amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
        amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
        amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
        amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
        amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
        amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
}

static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{
        if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
                gfx_v9_0_ring_emit_event_write(ring, VS_PARTIAL_FLUSH, 4);
                gfx_v9_0_ring_emit_event_write(ring, PS_PARTIAL_FLUSH, 4);
        }
        gfx_v9_0_ring_emit_event_write(ring, CS_PARTIAL_FLUSH, 4);
        gfx_v9_0_emit_mem_sync(ring);
}

static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
                                        unsigned vmid, uint64_t pd_addr)
{
        amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);

        /* compute doesn't have PFP */
        if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
                /* sync PFP to ME, otherwise we might get invalid PFP reads */
                amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
                amdgpu_ring_write(ring, 0x0);
        }
}

static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
{
        return *ring->rptr_cpu_addr; /* gfx9 hardware is 32bit rptr */
}

static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
{
        u64 wptr;

        /* XXX check if swapping is necessary on BE */
        if (ring->use_doorbell)
                wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
        else
                BUG();
        return wptr;
}

static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        /* XXX check if swapping is necessary on BE */
        if (ring->use_doorbell) {
                atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr);
                WDOORBELL64(ring->doorbell_index, ring->wptr);
        } else{
                BUG(); /* only DOORBELL method supported on gfx9 now */
        }
}

static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
                                         u64 seq, unsigned int flags)
{
        struct amdgpu_device *adev = ring->adev;

        /* we only allocate 32bit for each seq wb address */
        BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);

        /* write fence seq to the "addr" */
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
                                 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
        amdgpu_ring_write(ring, lower_32_bits(addr));
        amdgpu_ring_write(ring, upper_32_bits(addr));
        amdgpu_ring_write(ring, lower_32_bits(seq));

        if (flags & AMDGPU_FENCE_FLAG_INT) {
                /* set register to trigger INT */
                amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
                amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
                                         WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
                amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
                amdgpu_ring_write(ring, 0);
                amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
        }
}

static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
{
        amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
        amdgpu_ring_write(ring, 0);
}

static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume)
{
        struct amdgpu_device *adev = ring->adev;
        struct v9_ce_ib_state ce_payload = {0};
        uint64_t offset, ce_payload_gpu_addr;
        void *ce_payload_cpu_addr;
        int cnt;

        cnt = (sizeof(ce_payload) >> 2) + 4 - 2;

        offset = offsetof(struct v9_gfx_meta_data, ce_payload);
        ce_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
        ce_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;

        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
                                 WRITE_DATA_DST_SEL(8) |
                                 WR_CONFIRM) |
                                 WRITE_DATA_CACHE_POLICY(0));
        amdgpu_ring_write(ring, lower_32_bits(ce_payload_gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(ce_payload_gpu_addr));

        amdgpu_ring_ib_on_emit_ce(ring);

        if (resume)
                amdgpu_ring_write_multiple(ring, ce_payload_cpu_addr,
                                           sizeof(ce_payload) >> 2);
        else
                amdgpu_ring_write_multiple(ring, (void *)&ce_payload,
                                           sizeof(ce_payload) >> 2);
}

static int gfx_v9_0_ring_preempt_ib(struct amdgpu_ring *ring)
{
        int i, r = 0;
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        unsigned long flags;

        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;

        spin_lock_irqsave(&kiq->ring_lock, flags);

        if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
                spin_unlock_irqrestore(&kiq->ring_lock, flags);
                return -ENOMEM;
        }

        /* assert preemption condition */
        amdgpu_ring_set_preempt_cond_exec(ring, false);

        ring->trail_seq += 1;
        amdgpu_ring_alloc(ring, 13);
        gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
                                 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC | AMDGPU_FENCE_FLAG_INT);

        /* assert IB preemption, emit the trailing fence */
        kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
                                   ring->trail_fence_gpu_addr,
                                   ring->trail_seq);

        amdgpu_ring_commit(kiq_ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);

        /* poll the trailing fence */
        for (i = 0; i < adev->usec_timeout; i++) {
                if (ring->trail_seq ==
                        le32_to_cpu(*ring->trail_fence_cpu_addr))
                        break;
                udelay(1);
        }

        if (i >= adev->usec_timeout) {
                r = -EINVAL;
                drm_warn(adev_to_drm(adev), "ring %d timeout to preempt ib\n", ring->idx);
        }

        /*reset the CP_VMID_PREEMPT after trailing fence*/
        amdgpu_ring_emit_wreg(ring,
                              SOC15_REG_OFFSET(GC, 0, mmCP_VMID_PREEMPT),
                              0x0);
        amdgpu_ring_commit(ring);

        /* deassert preemption condition */
        amdgpu_ring_set_preempt_cond_exec(ring, true);
        return r;
}

static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume, bool usegds)
{
        struct amdgpu_device *adev = ring->adev;
        struct v9_de_ib_state de_payload = {0};
        uint64_t offset, gds_addr, de_payload_gpu_addr;
        void *de_payload_cpu_addr;
        int cnt;

        offset = offsetof(struct v9_gfx_meta_data, de_payload);
        de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
        de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;

        gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
                         AMDGPU_CSA_SIZE - adev->gds.gds_size,
                         PAGE_SIZE);

        if (usegds) {
                de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
                de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
        }

        cnt = (sizeof(de_payload) >> 2) + 4 - 2;
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
        amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
                                 WRITE_DATA_DST_SEL(8) |
                                 WR_CONFIRM) |
                                 WRITE_DATA_CACHE_POLICY(0));
        amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
        amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));

        amdgpu_ring_ib_on_emit_de(ring);
        if (resume)
                amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
                                           sizeof(de_payload) >> 2);
        else
                amdgpu_ring_write_multiple(ring, (void *)&de_payload,
                                           sizeof(de_payload) >> 2);
}

static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
                                   bool secure)
{
        uint32_t v = secure ? FRAME_TMZ : 0;

        amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
        amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}

static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
        uint32_t dw2 = 0;

        gfx_v9_0_ring_emit_ce_meta(ring,
                                   (!amdgpu_sriov_vf(ring->adev) &&
                                   flags & AMDGPU_IB_PREEMPTED) ? true : false);

        dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
        if (flags & AMDGPU_HAVE_CTX_SWITCH) {
                /* set load_global_config & load_global_uconfig */
                dw2 |= 0x8001;
                /* set load_cs_sh_regs */
                dw2 |= 0x01000000;
                /* set load_per_context_state & load_gfx_sh_regs for GFX */
                dw2 |= 0x10002;

                /* set load_ce_ram if preamble presented */
                if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
                        dw2 |= 0x10000000;
        } else {
                /* still load_ce_ram if this is the first time preamble presented
                 * although there is no context switch happens.
                 */
                if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
                        dw2 |= 0x10000000;
        }

        amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
        amdgpu_ring_write(ring, dw2);
        amdgpu_ring_write(ring, 0);
}

static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring,
                                                  uint64_t addr)
{
        unsigned ret;
        amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
        amdgpu_ring_write(ring, lower_32_bits(addr));
        amdgpu_ring_write(ring, upper_32_bits(addr));
        /* discard following DWs if *cond_exec_gpu_addr==0 */
        amdgpu_ring_write(ring, 0);
        ret = ring->wptr & ring->buf_mask;
        /* patch dummy value later */
        amdgpu_ring_write(ring, 0);
        return ret;
}

static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
                                    uint32_t reg_val_offs)
{
        struct amdgpu_device *adev = ring->adev;

        amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
        amdgpu_ring_write(ring, 0 |     /* src: register*/
                                (5 << 8) |      /* dst: memory */
                                (1 << 20));     /* write confirm */
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
                                reg_val_offs * 4));
        amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
                                reg_val_offs * 4));
}

static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
                                    uint32_t val)
{
        uint32_t cmd = 0;

        switch (ring->funcs->type) {
        case AMDGPU_RING_TYPE_GFX:
                cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
                break;
        case AMDGPU_RING_TYPE_KIQ:
                cmd = (1 << 16); /* no inc addr */
                break;
        default:
                cmd = WR_CONFIRM;
                break;
        }
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, cmd);
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, val);
}

static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
                                        uint32_t val, uint32_t mask)
{
        gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
}

static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
                                                  uint32_t reg0, uint32_t reg1,
                                                  uint32_t ref, uint32_t mask)
{
        int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
        struct amdgpu_device *adev = ring->adev;
        bool fw_version_ok = (ring->funcs->type == AMDGPU_RING_TYPE_GFX) ?
                adev->gfx.me_fw_write_wait : adev->gfx.mec_fw_write_wait;

        if (fw_version_ok)
                gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
                                      ref, mask, 0x20);
        else
                amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
                                                           ref, mask);
}

static void gfx_v9_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
{
        struct amdgpu_device *adev = ring->adev;
        uint32_t value = 0;

        value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
        value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
        value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
        value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
        amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
        WREG32_SOC15(GC, 0, mmSQ_CMD, value);
        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}

static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
                                                 enum amdgpu_interrupt_state state)
{
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
        case AMDGPU_IRQ_STATE_ENABLE:
                WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
                               TIME_STAMP_INT_ENABLE,
                               state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
                break;
        default:
                break;
        }
}

static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
                                                     int me, int pipe,
                                                     enum amdgpu_interrupt_state state)
{
        u32 mec_int_cntl, mec_int_cntl_reg;

        /*
         * amdgpu controls only the first MEC. That's why this function only
         * handles the setting of interrupts for this specific MEC. All other
         * pipes' interrupts are set by amdkfd.
         */

        if (me == 1) {
                switch (pipe) {
                case 0:
                        mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
                        break;
                case 1:
                        mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
                        break;
                case 2:
                        mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
                        break;
                case 3:
                        mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
                        break;
                default:
                        DRM_DEBUG("invalid pipe %d\n", pipe);
                        return;
                }
        } else {
                DRM_DEBUG("invalid me %d\n", me);
                return;
        }

        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
                mec_int_cntl = RREG32_SOC15_IP(GC,mec_int_cntl_reg);
                mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                             TIME_STAMP_INT_ENABLE, 0);
                WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
                break;
        case AMDGPU_IRQ_STATE_ENABLE:
                mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
                mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                             TIME_STAMP_INT_ENABLE, 1);
                WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
                break;
        default:
                break;
        }
}

static u32 gfx_v9_0_get_cpc_int_cntl(struct amdgpu_device *adev,
                                     int me, int pipe)
{
        /*
         * amdgpu controls only the first MEC. That's why this function only
         * handles the setting of interrupts for this specific MEC. All other
         * pipes' interrupts are set by amdkfd.
         */
        if (me != 1)
                return 0;

        switch (pipe) {
        case 0:
                return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
        case 1:
                return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
        case 2:
                return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
        case 3:
                return SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
        default:
                return 0;
        }
}

static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
                                             struct amdgpu_irq_src *source,
                                             unsigned type,
                                             enum amdgpu_interrupt_state state)
{
        u32 cp_int_cntl_reg, cp_int_cntl;
        int i, j;

        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
        case AMDGPU_IRQ_STATE_ENABLE:
                WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
                               PRIV_REG_INT_ENABLE,
                               state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
                for (i = 0; i < adev->gfx.mec.num_mec; i++) {
                        for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
                                /* MECs start at 1 */
                                cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);

                                if (cp_int_cntl_reg) {
                                        cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
                                        cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                                                    PRIV_REG_INT_ENABLE,
                                                                    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
                                        WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
                                }
                        }
                }
                break;
        default:
                break;
        }

        return 0;
}

static int gfx_v9_0_set_bad_op_fault_state(struct amdgpu_device *adev,
                                           struct amdgpu_irq_src *source,
                                           unsigned type,
                                           enum amdgpu_interrupt_state state)
{
        u32 cp_int_cntl_reg, cp_int_cntl;
        int i, j;

        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
        case AMDGPU_IRQ_STATE_ENABLE:
                WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
                               OPCODE_ERROR_INT_ENABLE,
                               state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
                for (i = 0; i < adev->gfx.mec.num_mec; i++) {
                        for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
                                /* MECs start at 1 */
                                cp_int_cntl_reg = gfx_v9_0_get_cpc_int_cntl(adev, i + 1, j);

                                if (cp_int_cntl_reg) {
                                        cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
                                        cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                                                    OPCODE_ERROR_INT_ENABLE,
                                                                    state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
                                        WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
                                }
                        }
                }
                break;
        default:
                break;
        }

        return 0;
}

static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
                                              struct amdgpu_irq_src *source,
                                              unsigned type,
                                              enum amdgpu_interrupt_state state)
{
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
        case AMDGPU_IRQ_STATE_ENABLE:
                WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
                               PRIV_INSTR_INT_ENABLE,
                               state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
                break;
        default:
                break;
        }

        return 0;
}

#define ENABLE_ECC_ON_ME_PIPE(me, pipe)                         \
        WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
                        CP_ECC_ERROR_INT_ENABLE, 1)

#define DISABLE_ECC_ON_ME_PIPE(me, pipe)                        \
        WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
                        CP_ECC_ERROR_INT_ENABLE, 0)

static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
                                              struct amdgpu_irq_src *source,
                                              unsigned type,
                                              enum amdgpu_interrupt_state state)
{
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
                WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
                                CP_ECC_ERROR_INT_ENABLE, 0);
                DISABLE_ECC_ON_ME_PIPE(1, 0);
                DISABLE_ECC_ON_ME_PIPE(1, 1);
                DISABLE_ECC_ON_ME_PIPE(1, 2);
                DISABLE_ECC_ON_ME_PIPE(1, 3);
                break;

        case AMDGPU_IRQ_STATE_ENABLE:
                WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
                                CP_ECC_ERROR_INT_ENABLE, 1);
                ENABLE_ECC_ON_ME_PIPE(1, 0);
                ENABLE_ECC_ON_ME_PIPE(1, 1);
                ENABLE_ECC_ON_ME_PIPE(1, 2);
                ENABLE_ECC_ON_ME_PIPE(1, 3);
                break;
        default:
                break;
        }

        return 0;
}


static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
                                            struct amdgpu_irq_src *src,
                                            unsigned type,
                                            enum amdgpu_interrupt_state state)
{
        switch (type) {
        case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
                gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
                break;
        case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
                gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
                break;
        default:
                break;
        }
        return 0;
}

static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
                            struct amdgpu_irq_src *source,
                            struct amdgpu_iv_entry *entry)
{
        int i;
        u8 me_id, pipe_id, queue_id;
        struct amdgpu_ring *ring;

        DRM_DEBUG("IH: CP EOP\n");
        me_id = (entry->ring_id & 0x0c) >> 2;
        pipe_id = (entry->ring_id & 0x03) >> 0;
        queue_id = (entry->ring_id & 0x70) >> 4;

        switch (me_id) {
        case 0:
                if (adev->gfx.num_gfx_rings) {
                        if (!adev->gfx.mcbp) {
                                amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
                        } else if (!amdgpu_mcbp_handle_trailing_fence_irq(&adev->gfx.muxer)) {
                                /* Fence signals are handled on the software rings*/
                                for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
                                        amdgpu_fence_process(&adev->gfx.sw_gfx_ring[i]);
                        }
                }
                break;
        case 1:
        case 2:
                for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                        ring = &adev->gfx.compute_ring[i];
                        /* Per-queue interrupt is supported for MEC starting from VI.
                          * The interrupt can only be enabled/disabled per pipe instead of per queue.
                          */
                        if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
                                amdgpu_fence_process(ring);
                }
                break;
        }
        return 0;
}

static void gfx_v9_0_fault(struct amdgpu_device *adev,
                           struct amdgpu_iv_entry *entry)
{
        u8 me_id, pipe_id, queue_id;
        struct amdgpu_ring *ring;
        int i;

        me_id = (entry->ring_id & 0x0c) >> 2;
        pipe_id = (entry->ring_id & 0x03) >> 0;
        queue_id = (entry->ring_id & 0x70) >> 4;

        switch (me_id) {
        case 0:
                drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
                break;
        case 1:
        case 2:
                for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                        ring = &adev->gfx.compute_ring[i];
                        if (ring->me == me_id && ring->pipe == pipe_id &&
                            ring->queue == queue_id)
                                drm_sched_fault(&ring->sched);
                }
                break;
        }
}

static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
                                 struct amdgpu_irq_src *source,
                                 struct amdgpu_iv_entry *entry)
{
        DRM_ERROR("Illegal register access in command stream\n");
        gfx_v9_0_fault(adev, entry);
        return 0;
}

static int gfx_v9_0_bad_op_irq(struct amdgpu_device *adev,
                               struct amdgpu_irq_src *source,
                               struct amdgpu_iv_entry *entry)
{
        DRM_ERROR("Illegal opcode in command stream\n");
        gfx_v9_0_fault(adev, entry);
        return 0;
}

static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
                                  struct amdgpu_irq_src *source,
                                  struct amdgpu_iv_entry *entry)
{
        DRM_ERROR("Illegal instruction in command stream\n");
        gfx_v9_0_fault(adev, entry);
        return 0;
}


static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = {
        { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT),
          SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
          SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT)
        },
        { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT),
          SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
          SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT)
        },
        { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
          SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1),
          0, 0
        },
        { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT),
          SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2),
          0, 0
        },
        { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT),
          SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
          SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT)
        },
        { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
          SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT),
          0, 0
        },
        { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT),
          SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT),
          SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT)
        },
        { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT),
          SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT),
          SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT)
        },
        { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT),
          SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1),
          0, 0
        },
        { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT),
          SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1),
          0, 0
        },
        { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT),
          SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1),
          0, 0
        },
        { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
          SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
          SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED)
        },
        { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT),
          SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED),
          0, 0
        },
        { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
          SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
          SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED)
        },
        { "GDS_OA_PHY_PHY_CMD_RAM_MEM",
          SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
          SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
          SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED)
        },
        { "GDS_OA_PHY_PHY_DATA_RAM_MEM",
          SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT),
          SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED),
          0, 0
        },
        { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM",
          SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED)
        },
        { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM",
          SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED)
        },
        { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM",
          SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED)
        },
        { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM",
          SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
          SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED)
        },
        { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT),
          SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT),
          0, 0
        },
        { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
          SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
          SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT)
        },
        { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
          SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT),
          0, 0
        },
        { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
          SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT),
          0, 0
        },
        { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
          SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT),
          0, 0
        },
        { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT),
          SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT),
          0, 0
        },
        { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
          SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT),
          0, 0
        },
        { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT),
          SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT),
          0, 0
        },
        { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT)
        },
        { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT)
        },
        { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT)
        },
        { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT)
        },
        { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT)
        },
        { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT),
          0, 0
        },
        { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT),
          0, 0
        },
        { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT),
          0, 0
        },
        { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT),
          0, 0
        },
        { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT),
          0, 0
        },
        { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT),
          SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT),
          0, 0
        },
        { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
          SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT),
          0, 0
        },
        { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
          SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT),
          0, 0
        },
        { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
          SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT),
          0, 0
        },
        { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
          SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT),
          0, 0
        },
        { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
          SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT),
          0, 0
        },
        { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
          SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT),
          0, 0
        },
        { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2),
          SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT),
          0, 0
        },
        { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT),
          SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT),
          0, 0
        },
        { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT)
        },
        { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT)
        },
        { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT),
          0, 0
        },
        { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
          0, 0
        },
        { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT),
          0, 0
        },
        { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT)
        },
        { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
          SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT)
        },
        { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
          SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
          SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT)
        },
        { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
          SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
          SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT)
        },
        { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT),
          SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT),
          0, 0
        },
        { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT)
        },
        { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT)
        },
        { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT)
        },
        { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT)
        },
        { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT)
        },
        { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT)
        },
        { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
          SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT)
        },
        { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT)
        },
        { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT)
        },
        { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT)
        },
        { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT)
        },
        { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT)
        },
        { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT)
        },
        { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT)
        },
        { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT)
        },
        { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT)
        },
        { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT)
        },
        { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT),
          0, 0
        },
        { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT)
        },
        { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT)
        },
        { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT)
        },
        { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT)
        },
        { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
          SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT)
        },
        { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT),
          0, 0
        },
        { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3),
          SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT),
          0, 0
        },
        { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT)
        },
        { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT)
        },
        { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT)
        },
        { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT)
        },
        { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT)
        },
        { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
          0, 0
        },
        { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
          0, 0
        },
        { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT),
          0, 0
        },
        { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
          0, 0
        },
        { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT),
          0, 0
        },
        { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT)
        },
        { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT)
        },
        { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT)
        },
        { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
          0, 0
        },
        { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
          0, 0
        },
        { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
          0, 0
        },
        { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
          0, 0
        },
        { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
          0, 0
        },
        { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2),
          SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
          0, 0
        }
};

static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
                                     void *inject_if, uint32_t instance_mask)
{
        struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
        int ret;
        struct ta_ras_trigger_error_input block_info = { 0 };

        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return -EINVAL;

        if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks))
                return -EINVAL;

        if (!ras_gfx_subblocks[info->head.sub_block_index].name)
                return -EPERM;

        if (!(ras_gfx_subblocks[info->head.sub_block_index].hw_supported_error_type &
              info->head.type)) {
                DRM_ERROR("GFX Subblock %s, hardware do not support type 0x%x\n",
                        ras_gfx_subblocks[info->head.sub_block_index].name,
                        info->head.type);
                return -EPERM;
        }

        if (!(ras_gfx_subblocks[info->head.sub_block_index].sw_supported_error_type &
              info->head.type)) {
                DRM_ERROR("GFX Subblock %s, driver do not support type 0x%x\n",
                        ras_gfx_subblocks[info->head.sub_block_index].name,
                        info->head.type);
                return -EPERM;
        }

        block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
        block_info.sub_block_index =
                ras_gfx_subblocks[info->head.sub_block_index].ta_subblock;
        block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
        block_info.address = info->address;
        block_info.value = info->value;

        mutex_lock(&adev->grbm_idx_mutex);
        ret = psp_ras_trigger_error(&adev->psp, &block_info, instance_mask);
        mutex_unlock(&adev->grbm_idx_mutex);

        return ret;
}

static const char * const vml2_mems[] = {
        "UTC_VML2_BANK_CACHE_0_BIGK_MEM0",
        "UTC_VML2_BANK_CACHE_0_BIGK_MEM1",
        "UTC_VML2_BANK_CACHE_0_4K_MEM0",
        "UTC_VML2_BANK_CACHE_0_4K_MEM1",
        "UTC_VML2_BANK_CACHE_1_BIGK_MEM0",
        "UTC_VML2_BANK_CACHE_1_BIGK_MEM1",
        "UTC_VML2_BANK_CACHE_1_4K_MEM0",
        "UTC_VML2_BANK_CACHE_1_4K_MEM1",
        "UTC_VML2_BANK_CACHE_2_BIGK_MEM0",
        "UTC_VML2_BANK_CACHE_2_BIGK_MEM1",
        "UTC_VML2_BANK_CACHE_2_4K_MEM0",
        "UTC_VML2_BANK_CACHE_2_4K_MEM1",
        "UTC_VML2_BANK_CACHE_3_BIGK_MEM0",
        "UTC_VML2_BANK_CACHE_3_BIGK_MEM1",
        "UTC_VML2_BANK_CACHE_3_4K_MEM0",
        "UTC_VML2_BANK_CACHE_3_4K_MEM1",
};

static const char * const vml2_walker_mems[] = {
        "UTC_VML2_CACHE_PDE0_MEM0",
        "UTC_VML2_CACHE_PDE0_MEM1",
        "UTC_VML2_CACHE_PDE1_MEM0",
        "UTC_VML2_CACHE_PDE1_MEM1",
        "UTC_VML2_CACHE_PDE2_MEM0",
        "UTC_VML2_CACHE_PDE2_MEM1",
        "UTC_VML2_RDIF_LOG_FIFO",
};

static const char * const atc_l2_cache_2m_mems[] = {
        "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM",
        "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM",
        "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM",
        "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM",
};

static const char *atc_l2_cache_4k_mems[] = {
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6",
        "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6",
        "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7",
};

static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
                                         struct ras_err_data *err_data)
{
        uint32_t i, data;
        uint32_t sec_count, ded_count;

        WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
        WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);

        for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
                WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
                data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);

                sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
                if (sec_count) {
                        dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
                                "SEC %d\n", i, vml2_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }

                ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
                if (ded_count) {
                        dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
                                "DED %d\n", i, vml2_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
        }

        for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
                WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
                data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);

                sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
                                                SEC_COUNT);
                if (sec_count) {
                        dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
                                "SEC %d\n", i, vml2_walker_mems[i], sec_count);
                        err_data->ce_count += sec_count;
                }

                ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
                                                DED_COUNT);
                if (ded_count) {
                        dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
                                "DED %d\n", i, vml2_walker_mems[i], ded_count);
                        err_data->ue_count += ded_count;
                }
        }

        for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
                WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
                data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);

                sec_count = (data & 0x00006000L) >> 0xd;
                if (sec_count) {
                        dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
                                "SEC %d\n", i, atc_l2_cache_2m_mems[i],
                                sec_count);
                        err_data->ce_count += sec_count;
                }
        }

        for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
                WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
                data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);

                sec_count = (data & 0x00006000L) >> 0xd;
                if (sec_count) {
                        dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
                                "SEC %d\n", i, atc_l2_cache_4k_mems[i],
                                sec_count);
                        err_data->ce_count += sec_count;
                }

                ded_count = (data & 0x00018000L) >> 0xf;
                if (ded_count) {
                        dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
                                "DED %d\n", i, atc_l2_cache_4k_mems[i],
                                ded_count);
                        err_data->ue_count += ded_count;
                }
        }

        WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);

        return 0;
}

static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
        const struct soc15_reg_entry *reg,
        uint32_t se_id, uint32_t inst_id, uint32_t value,
        uint32_t *sec_count, uint32_t *ded_count)
{
        uint32_t i;
        uint32_t sec_cnt, ded_cnt;

        for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) {
                if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset ||
                        gfx_v9_0_ras_fields[i].seg != reg->seg ||
                        gfx_v9_0_ras_fields[i].inst != reg->inst)
                        continue;

                sec_cnt = (value &
                                gfx_v9_0_ras_fields[i].sec_count_mask) >>
                                gfx_v9_0_ras_fields[i].sec_count_shift;
                if (sec_cnt) {
                        dev_info(adev->dev, "GFX SubBlock %s, "
                                "Instance[%d][%d], SEC %d\n",
                                gfx_v9_0_ras_fields[i].name,
                                se_id, inst_id,
                                sec_cnt);
                        *sec_count += sec_cnt;
                }

                ded_cnt = (value &
                                gfx_v9_0_ras_fields[i].ded_count_mask) >>
                                gfx_v9_0_ras_fields[i].ded_count_shift;
                if (ded_cnt) {
                        dev_info(adev->dev, "GFX SubBlock %s, "
                                "Instance[%d][%d], DED %d\n",
                                gfx_v9_0_ras_fields[i].name,
                                se_id, inst_id,
                                ded_cnt);
                        *ded_count += ded_cnt;
                }
        }

        return 0;
}

static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
{
        int i, j, k;

        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return;

        /* read back registers to clear the counters */
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
                for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
                        for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
                                amdgpu_gfx_select_se_sh(adev, j, 0x0, k, 0);
                                RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
                        }
                }
        }
        WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
        mutex_unlock(&adev->grbm_idx_mutex);

        WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0);
        WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0);

        for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) {
                WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i);
                RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT);
        }

        for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) {
                WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i);
                RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT);
        }

        for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) {
                WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i);
                RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT);
        }

        for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) {
                WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i);
                RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT);
        }

        WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255);
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255);
}

static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
                                          void *ras_error_status)
{
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        uint32_t sec_count = 0, ded_count = 0;
        uint32_t i, j, k;
        uint32_t reg_value;

        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return;

        err_data->ue_count = 0;
        err_data->ce_count = 0;

        mutex_lock(&adev->grbm_idx_mutex);

        for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
                for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) {
                        for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) {
                                amdgpu_gfx_select_se_sh(adev, j, 0, k, 0);
                                reg_value =
                                        RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
                                if (reg_value)
                                        gfx_v9_0_ras_error_count(adev,
                                                &gfx_v9_0_edc_counter_regs[i],
                                                j, k, reg_value,
                                                &sec_count, &ded_count);
                        }
                }
        }

        err_data->ce_count += sec_count;
        err_data->ue_count += ded_count;

        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
        mutex_unlock(&adev->grbm_idx_mutex);

        gfx_v9_0_query_utc_edc_status(adev, err_data);
}

static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
                                        uint32_t pipe, bool enable)
{
        struct amdgpu_device *adev = ring->adev;
        uint32_t val;
        uint32_t wcl_cs_reg;

        /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
        val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT;

        switch (pipe) {
        case 0:
                wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0);
                break;
        case 1:
                wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1);
                break;
        case 2:
                wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2);
                break;
        case 3:
                wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3);
                break;
        default:
                DRM_DEBUG("invalid pipe %d\n", pipe);
                return;
        }

        amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);

}
static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
{
        struct amdgpu_device *adev = ring->adev;
        uint32_t val;
        int i;


        /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
         * number of gfx waves. Setting 5 bit will make sure gfx only gets
         * around 25% of gpu resources.
         */
        val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
        amdgpu_ring_emit_wreg(ring,
                              SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX),
                              val);

        /* Restrict waves for normal/low priority compute queues as well
         * to get best QoS for high priority compute jobs.
         *
         * amdgpu controls only 1st ME(0-3 CS pipes).
         */
        for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
                if (i != ring->pipe)
                        gfx_v9_0_emit_wave_limit_cs(ring, i, enable);

        }
}

static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
{
        /* Header itself is a NOP packet */
        if (num_nop == 1) {
                amdgpu_ring_write(ring, ring->funcs->nop);
                return;
        }

        /* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
        amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));

        /* Header is at index 0, followed by num_nops - 1 NOP packet's */
        amdgpu_ring_insert_nop(ring, num_nop - 1);
}

static void gfx_v9_0_ring_emit_wreg_me(struct amdgpu_ring *ring,
                                       uint32_t reg,
                                       uint32_t val)
{
        uint32_t cmd = 0;

        switch (ring->funcs->type) {
        case AMDGPU_RING_TYPE_KIQ:
                cmd = (1 << 16); /* no inc addr */
                break;
        default:
                cmd = WR_CONFIRM;
                break;
        }
        amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
        amdgpu_ring_write(ring, cmd);
        amdgpu_ring_write(ring, reg);
        amdgpu_ring_write(ring, 0);
        amdgpu_ring_write(ring, val);
}

static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring,
                              unsigned int vmid,
                              struct amdgpu_fence *timedout_fence)
{
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        unsigned long flags;
        u32 tmp;
        int r;

        amdgpu_ring_reset_helper_begin(ring, timedout_fence);

        spin_lock_irqsave(&kiq->ring_lock, flags);

        if (amdgpu_ring_alloc(kiq_ring, 5)) {
                spin_unlock_irqrestore(&kiq->ring_lock, flags);
                return -ENOMEM;
        }

        /* send the reset - 5 */
        tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
        gfx_v9_0_ring_emit_wreg(kiq_ring,
                                SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
        amdgpu_ring_commit(kiq_ring);
        r = amdgpu_ring_test_ring(kiq_ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
        if (r)
                return r;

        if (amdgpu_ring_alloc(ring, 8 + 7 + 5 + 2 + 8 + 7))
                return -ENOMEM;
        /* emit the fence to finish the reset - 8 */
        ring->trail_seq++;
        gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
                                 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC);
        /* wait for the fence - 7 */
        gfx_v9_0_wait_reg_mem(ring, 0, 1, 0,
                              lower_32_bits(ring->trail_fence_gpu_addr),
                              upper_32_bits(ring->trail_fence_gpu_addr),
                              ring->trail_seq, 0xffffffff, 4);
        /* clear mmCP_VMID_RESET - 5 */
        gfx_v9_0_ring_emit_wreg_me(ring,
                                   SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0);
        /* event write ENABLE_LEGACY_PIPELINE - 2 */
        gfx_v9_0_ring_emit_event_write(ring, ENABLE_LEGACY_PIPELINE, 0);
        /* emit a regular fence - 8 */
        ring->trail_seq++;
        gfx_v9_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
                                 ring->trail_seq, AMDGPU_FENCE_FLAG_EXEC);
        /* wait for the fence - 7 */
        gfx_v9_0_wait_reg_mem(ring, 1, 1, 0,
                              lower_32_bits(ring->trail_fence_gpu_addr),
                              upper_32_bits(ring->trail_fence_gpu_addr),
                              ring->trail_seq, 0xffffffff, 4);
        amdgpu_ring_commit(ring);
        /* wait for the commands to complete */
        r = amdgpu_ring_test_ring(ring);
        if (r)
                return r;

        return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}

static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
                              unsigned int vmid,
                              struct amdgpu_fence *timedout_fence)
{
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
        struct amdgpu_ring *kiq_ring = &kiq->ring;
        unsigned long flags;
        int i, r;

        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;

        amdgpu_ring_reset_helper_begin(ring, timedout_fence);

        spin_lock_irqsave(&kiq->ring_lock, flags);

        if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
                spin_unlock_irqrestore(&kiq->ring_lock, flags);
                return -ENOMEM;
        }

        kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
                                   0, 0);
        amdgpu_ring_commit(kiq_ring);

        spin_unlock_irqrestore(&kiq->ring_lock, flags);

        r = amdgpu_ring_test_ring(kiq_ring);
        if (r)
                return r;

        /* make sure dequeue is complete*/
        amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
        mutex_lock(&adev->srbm_mutex);
        soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, 0);
        for (i = 0; i < adev->usec_timeout; i++) {
                if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
                        break;
                udelay(1);
        }
        if (i >= adev->usec_timeout)
                r = -ETIMEDOUT;
        soc15_grbm_select(adev, 0, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
        amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
        if (r) {
                dev_err(adev->dev, "fail to wait on hqd deactive\n");
                return r;
        }

        r = gfx_v9_0_kcq_init_queue(ring, true);
        if (r) {
                dev_err(adev->dev, "fail to init kcq\n");
                return r;
        }
        spin_lock_irqsave(&kiq->ring_lock, flags);
        r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
        if (r) {
                spin_unlock_irqrestore(&kiq->ring_lock, flags);
                return -ENOMEM;
        }
        kiq->pmf->kiq_map_queues(kiq_ring, ring);
        amdgpu_ring_commit(kiq_ring);
        r = amdgpu_ring_test_ring(kiq_ring);
        spin_unlock_irqrestore(&kiq->ring_lock, flags);
        if (r) {
                DRM_ERROR("fail to remap queue\n");
                return r;
        }
        return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}

static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
{
        struct amdgpu_device *adev = ip_block->adev;
        uint32_t i, j, k, reg, index = 0;
        uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);

        if (!adev->gfx.ip_dump_core)
                return;

        for (i = 0; i < reg_count; i++)
                drm_printf(p, "%-50s \t 0x%08x\n",
                           gc_reg_list_9[i].reg_name,
                           adev->gfx.ip_dump_core[i]);

        /* print compute queue registers for all instances */
        if (!adev->gfx.ip_dump_compute_queues)
                return;

        reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
        drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
                   adev->gfx.mec.num_mec,
                   adev->gfx.mec.num_pipe_per_mec,
                   adev->gfx.mec.num_queue_per_pipe);

        for (i = 0; i < adev->gfx.mec.num_mec; i++) {
                for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
                        for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
                                drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
                                for (reg = 0; reg < reg_count; reg++) {
                                        if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
                                                drm_printf(p, "%-50s \t 0x%08x\n",
                                                           "mmCP_MEC_ME2_HEADER_DUMP",
                                                           adev->gfx.ip_dump_compute_queues[index + reg]);
                                        else
                                                drm_printf(p, "%-50s \t 0x%08x\n",
                                                           gc_cp_reg_list_9[reg].reg_name,
                                                           adev->gfx.ip_dump_compute_queues[index + reg]);
                                }
                                index += reg_count;
                        }
                }
        }

}

static void gfx_v9_ip_dump(struct amdgpu_ip_block *ip_block)
{
        struct amdgpu_device *adev = ip_block->adev;
        uint32_t i, j, k, reg, index = 0;
        uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9);

        if (!adev->gfx.ip_dump_core || !adev->gfx.num_gfx_rings)
                return;

        amdgpu_gfx_off_ctrl(adev, false);
        for (i = 0; i < reg_count; i++)
                adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_9[i]));
        amdgpu_gfx_off_ctrl(adev, true);

        /* dump compute queue registers for all instances */
        if (!adev->gfx.ip_dump_compute_queues)
                return;

        reg_count = ARRAY_SIZE(gc_cp_reg_list_9);
        amdgpu_gfx_off_ctrl(adev, false);
        mutex_lock(&adev->srbm_mutex);
        for (i = 0; i < adev->gfx.mec.num_mec; i++) {
                for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
                        for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
                                /* ME0 is for GFX so start from 1 for CP */
                                soc15_grbm_select(adev, 1 + i, j, k, 0, 0);

                                for (reg = 0; reg < reg_count; reg++) {
                                        if (i && gc_cp_reg_list_9[reg].reg_offset == mmCP_MEC_ME1_HEADER_DUMP)
                                                adev->gfx.ip_dump_compute_queues[index + reg] =
                                                        RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME2_HEADER_DUMP));
                                        else
                                                adev->gfx.ip_dump_compute_queues[index + reg] =
                                                        RREG32(SOC15_REG_ENTRY_OFFSET(
                                                                       gc_cp_reg_list_9[reg]));
                                }
                                index += reg_count;
                        }
                }
        }
        soc15_grbm_select(adev, 0, 0, 0, 0, 0);
        mutex_unlock(&adev->srbm_mutex);
        amdgpu_gfx_off_ctrl(adev, true);

}

static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;

        /* Emit the cleaner shader */
        if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
                amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
        else
                amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER_9_0, 0));

        amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
}

static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ip_block *gfx_block =
                amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);

        amdgpu_gfx_enforce_isolation_ring_begin_use(ring);

        /* Raven and PCO APUs seem to have stability issues
         * with compute and gfxoff and gfx pg.  Disable gfx pg during
         * submission and allow again afterwards.
         */
        if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
                gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
}

static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
{
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ip_block *gfx_block =
                amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);

        /* Raven and PCO APUs seem to have stability issues
         * with compute and gfxoff and gfx pg.  Disable gfx pg during
         * submission and allow again afterwards.
         */
        if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
                gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);

        amdgpu_gfx_enforce_isolation_ring_end_use(ring);
}

static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
        .name = "gfx_v9_0",
        .early_init = gfx_v9_0_early_init,
        .late_init = gfx_v9_0_late_init,
        .sw_init = gfx_v9_0_sw_init,
        .sw_fini = gfx_v9_0_sw_fini,
        .hw_init = gfx_v9_0_hw_init,
        .hw_fini = gfx_v9_0_hw_fini,
        .suspend = gfx_v9_0_suspend,
        .resume = gfx_v9_0_resume,
        .is_idle = gfx_v9_0_is_idle,
        .wait_for_idle = gfx_v9_0_wait_for_idle,
        .soft_reset = gfx_v9_0_soft_reset,
        .set_clockgating_state = gfx_v9_0_set_clockgating_state,
        .set_powergating_state = gfx_v9_0_set_powergating_state,
        .get_clockgating_state = gfx_v9_0_get_clockgating_state,
        .dump_ip_state = gfx_v9_ip_dump,
        .print_ip_state = gfx_v9_ip_print,
};

static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
        .type = AMDGPU_RING_TYPE_GFX,
        .align_mask = 0xff,
        .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .support_64bit_ptrs = true,
        .secure_submission_supported = true,
        .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
        .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
        .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
        .emit_frame_size = /* totally 242 maximum if 16 IBs */
                5 +  /* COND_EXEC */
                13 +  /* PIPELINE_SYNC */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                2 + /* VM_FLUSH */
                8 +  /* FENCE for VM_FLUSH */
                20 + /* GDS switch */
                4 + /* double SWITCH_BUFFER,
                       the first COND_EXEC jump to the place just
                           prior to this double SWITCH_BUFFER  */
                5 + /* COND_EXEC */
                7 +      /*     HDP_flush */
                4 +      /*     VGT_flush */
                14 + /* CE_META */
                31 + /* DE_META */
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
                2 + /* SWITCH_BUFFER */
                7 + /* gfx_v9_0_emit_mem_sync */
                2, /* gfx_v9_0_ring_emit_cleaner_shader */
        .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v9_0_ring_emit_fence,
        .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
        .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
        .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
        .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
        .test_ring = gfx_v9_0_ring_test_ring,
        .insert_nop = gfx_v9_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_switch_buffer = gfx_v9_ring_emit_sb,
        .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
        .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
        .preempt_ib = gfx_v9_0_ring_preempt_ib,
        .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
        .emit_mem_sync = gfx_v9_0_emit_mem_sync,
        .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
        .reset = gfx_v9_0_reset_kgq,
        .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
        .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};

static const struct amdgpu_ring_funcs gfx_v9_0_sw_ring_funcs_gfx = {
        .type = AMDGPU_RING_TYPE_GFX,
        .align_mask = 0xff,
        .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .support_64bit_ptrs = true,
        .secure_submission_supported = true,
        .get_rptr = amdgpu_sw_ring_get_rptr_gfx,
        .get_wptr = amdgpu_sw_ring_get_wptr_gfx,
        .set_wptr = amdgpu_sw_ring_set_wptr_gfx,
        .emit_frame_size = /* totally 242 maximum if 16 IBs */
                5 +  /* COND_EXEC */
                13 +  /* PIPELINE_SYNC */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                2 + /* VM_FLUSH */
                8 +  /* FENCE for VM_FLUSH */
                20 + /* GDS switch */
                4 + /* double SWITCH_BUFFER,
                     * the first COND_EXEC jump to the place just
                     * prior to this double SWITCH_BUFFER
                     */
                5 + /* COND_EXEC */
                7 +      /*     HDP_flush */
                4 +      /*     VGT_flush */
                14 + /* CE_META */
                31 + /* DE_META */
                3 + /* CNTX_CTRL */
                5 + /* HDP_INVL */
                8 + 8 + /* FENCE x2 */
                2 + /* SWITCH_BUFFER */
                7 + /* gfx_v9_0_emit_mem_sync */
                2, /* gfx_v9_0_ring_emit_cleaner_shader */
        .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
        .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
        .emit_fence = gfx_v9_0_ring_emit_fence,
        .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
        .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
        .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
        .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
        .test_ring = gfx_v9_0_ring_test_ring,
        .test_ib = gfx_v9_0_ring_test_ib,
        .insert_nop = gfx_v9_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_switch_buffer = gfx_v9_ring_emit_sb,
        .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
        .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
        .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
        .soft_recovery = gfx_v9_0_ring_soft_recovery,
        .emit_mem_sync = gfx_v9_0_emit_mem_sync,
        .patch_cntl = gfx_v9_0_ring_patch_cntl,
        .patch_de = gfx_v9_0_ring_patch_de_meta,
        .patch_ce = gfx_v9_0_ring_patch_ce_meta,
        .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
        .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
        .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
};

static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
        .type = AMDGPU_RING_TYPE_COMPUTE,
        .align_mask = 0xff,
        .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .support_64bit_ptrs = true,
        .get_rptr = gfx_v9_0_ring_get_rptr_compute,
        .get_wptr = gfx_v9_0_ring_get_wptr_compute,
        .set_wptr = gfx_v9_0_ring_set_wptr_compute,
        .emit_frame_size =
                20 + /* gfx_v9_0_ring_emit_gds_switch */
                7 + /* gfx_v9_0_ring_emit_hdp_flush */
                5 + /* hdp invalidate */
                9 + /* gfx_v9_0_ring_emit_pipeline_sync */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
                7 + /* gfx_v9_0_emit_mem_sync */
                5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
                15 + /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
                2, /* gfx_v9_0_ring_emit_cleaner_shader */
        .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
        .emit_ib = gfx_v9_0_ring_emit_ib_compute,
        .emit_fence = gfx_v9_0_ring_emit_fence,
        .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
        .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
        .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
        .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
        .test_ring = gfx_v9_0_ring_test_ring,
        .test_ib = gfx_v9_0_ring_test_ib,
        .insert_nop = gfx_v9_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
        .emit_mem_sync = gfx_v9_0_emit_mem_sync,
        .emit_wave_limit = gfx_v9_0_emit_wave_limit,
        .reset = gfx_v9_0_reset_kcq,
        .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
        .begin_use = gfx_v9_0_ring_begin_use_compute,
        .end_use = gfx_v9_0_ring_end_use_compute,
};

static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
        .type = AMDGPU_RING_TYPE_KIQ,
        .align_mask = 0xff,
        .nop = PACKET3(PACKET3_NOP, 0x3FFF),
        .support_64bit_ptrs = true,
        .get_rptr = gfx_v9_0_ring_get_rptr_compute,
        .get_wptr = gfx_v9_0_ring_get_wptr_compute,
        .set_wptr = gfx_v9_0_ring_set_wptr_compute,
        .emit_frame_size =
                20 + /* gfx_v9_0_ring_emit_gds_switch */
                7 + /* gfx_v9_0_ring_emit_hdp_flush */
                5 + /* hdp invalidate */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
                8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
        .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
        .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
        .test_ring = gfx_v9_0_ring_test_ring,
        .insert_nop = amdgpu_ring_insert_nop,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .emit_rreg = gfx_v9_0_ring_emit_rreg,
        .emit_wreg = gfx_v9_0_ring_emit_wreg,
        .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
        .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
};

static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
{
        int i;

        adev->gfx.kiq[0].ring.funcs = &gfx_v9_0_ring_funcs_kiq;

        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;

        if (adev->gfx.mcbp && adev->gfx.num_gfx_rings) {
                for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++)
                        adev->gfx.sw_gfx_ring[i].funcs = &gfx_v9_0_sw_ring_funcs_gfx;
        }

        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
}

static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
        .set = gfx_v9_0_set_eop_interrupt_state,
        .process = gfx_v9_0_eop_irq,
};

static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
        .set = gfx_v9_0_set_priv_reg_fault_state,
        .process = gfx_v9_0_priv_reg_irq,
};

static const struct amdgpu_irq_src_funcs gfx_v9_0_bad_op_irq_funcs = {
        .set = gfx_v9_0_set_bad_op_fault_state,
        .process = gfx_v9_0_bad_op_irq,
};

static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
        .set = gfx_v9_0_set_priv_inst_fault_state,
        .process = gfx_v9_0_priv_inst_irq,
};

static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
        .set = gfx_v9_0_set_cp_ecc_error_state,
        .process = amdgpu_gfx_cp_ecc_error_irq,
};


static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
        adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
        adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;

        adev->gfx.priv_reg_irq.num_types = 1;
        adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;

        adev->gfx.bad_op_irq.num_types = 1;
        adev->gfx.bad_op_irq.funcs = &gfx_v9_0_bad_op_irq_funcs;

        adev->gfx.priv_inst_irq.num_types = 1;
        adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;

        adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
        adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
}

static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
{
        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
        case IP_VERSION(9, 2, 1):
        case IP_VERSION(9, 4, 0):
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
        case IP_VERSION(9, 4, 1):
        case IP_VERSION(9, 3, 0):
        case IP_VERSION(9, 4, 2):
                adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
                break;
        default:
                break;
        }
}

static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{
        /* init asci gds info */
        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
        case IP_VERSION(9, 2, 1):
        case IP_VERSION(9, 4, 0):
                adev->gds.gds_size = 0x10000;
                break;
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
        case IP_VERSION(9, 4, 1):
                adev->gds.gds_size = 0x1000;
                break;
        case IP_VERSION(9, 4, 2):
                /* aldebaran removed all the GDS internal memory,
                 * only support GWS opcode in kernel, like barrier
                 * semaphore.etc */
                adev->gds.gds_size = 0;
                break;
        default:
                adev->gds.gds_size = 0x10000;
                break;
        }

        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(9, 0, 1):
        case IP_VERSION(9, 4, 0):
                adev->gds.gds_compute_max_wave_id = 0x7ff;
                break;
        case IP_VERSION(9, 2, 1):
                adev->gds.gds_compute_max_wave_id = 0x27f;
                break;
        case IP_VERSION(9, 2, 2):
        case IP_VERSION(9, 1, 0):
                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
                else
                        adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
                break;
        case IP_VERSION(9, 4, 1):
                adev->gds.gds_compute_max_wave_id = 0xfff;
                break;
        case IP_VERSION(9, 4, 2):
                /* deprecated for Aldebaran, no usage at all */
                adev->gds.gds_compute_max_wave_id = 0;
                break;
        default:
                /* this really depends on the chip */
                adev->gds.gds_compute_max_wave_id = 0x7ff;
                break;
        }

        adev->gds.gws_size = 64;
        adev->gds.oa_size = 16;
}

static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
                                                 u32 bitmap)
{
        u32 data;

        if (!bitmap)
                return;

        data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
        data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;

        WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
}

static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
{
        u32 data, mask;

        data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
        data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);

        data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
        data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;

        mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);

        return (~data) & mask;
}

static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
                                 struct amdgpu_cu_info *cu_info)
{
        int i, j, k, counter, active_cu_number = 0;
        u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
        unsigned disable_masks[4 * 4];

        if (!adev || !cu_info)
                return -EINVAL;

        /*
         * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
         */
        if (adev->gfx.config.max_shader_engines *
                adev->gfx.config.max_sh_per_se > 16)
                return -EINVAL;

        amdgpu_gfx_parse_disable_cu(adev, disable_masks,
                                    adev->gfx.config.max_shader_engines,
                                    adev->gfx.config.max_sh_per_se);

        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
                for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
                        mask = 1;
                        ao_bitmap = 0;
                        counter = 0;
                        amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
                        gfx_v9_0_set_user_cu_inactive_bitmap(
                                adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
                        bitmap = gfx_v9_0_get_cu_active_bitmap(adev);

                        /*
                         * The bitmap(and ao_cu_bitmap) in cu_info structure is
                         * 4x4 size array, and it's usually suitable for Vega
                         * ASICs which has 4*2 SE/SH layout.
                         * But for Arcturus, SE/SH layout is changed to 8*1.
                         * To mostly reduce the impact, we make it compatible
                         * with current bitmap array as below:
                         *    SE4,SH0 --> bitmap[0][1]
                         *    SE5,SH0 --> bitmap[1][1]
                         *    SE6,SH0 --> bitmap[2][1]
                         *    SE7,SH0 --> bitmap[3][1]
                         */
                        cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;

                        for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
                                if (bitmap & mask) {
                                        if (counter < adev->gfx.config.max_cu_per_sh)
                                                ao_bitmap |= mask;
                                        counter ++;
                                }
                                mask <<= 1;
                        }
                        active_cu_number += counter;
                        if (i < 2 && j < 2)
                                ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
                        cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
                }
        }
        amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
        mutex_unlock(&adev->grbm_idx_mutex);

        cu_info->number = active_cu_number;
        cu_info->ao_cu_mask = ao_cu_mask;
        cu_info->simd_per_cu = NUM_SIMD_PER_CU;

        return 0;
}

const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
{
        .type = AMD_IP_BLOCK_TYPE_GFX,
        .major = 9,
        .minor = 0,
        .rev = 0,
        .funcs = &gfx_v9_0_ip_funcs,
};