#include <linux/firmware.h>
#include <linux/pci.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_atomfirmware.h"
#include "gmc_v12_0.h"
#include "gmc_v12_1.h"
#include "athub/athub_4_1_0_sh_mask.h"
#include "athub/athub_4_1_0_offset.h"
#include "oss/osssys_7_0_0_offset.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
#include "soc24_enum.h"
#include "soc24.h"
#include "soc15d.h"
#include "soc15_common.h"
#include "nbif_v6_3_1.h"
#include "gfxhub_v12_0.h"
#include "gfxhub_v12_1.h"
#include "mmhub_v4_1_0.h"
#include "mmhub_v4_2_0.h"
#include "athub_v4_1_0.h"
#include "umc_v8_14.h"
static int gmc_v12_0_ecc_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
return 0;
}
static int gmc_v12_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src, unsigned type,
enum amdgpu_interrupt_state state)
{
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), false);
if (!adev->in_s0ix)
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB0(0), true);
if (!adev->in_s0ix)
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB(0), true);
break;
default:
break;
}
return 0;
}
static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_vmhub *hub;
bool retry_fault = !!(entry->src_data[1] &
AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
bool write_fault = !!(entry->src_data[1] &
AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (entry->client_id == SOC21_IH_CLIENTID_VMC)
hub = &adev->vmhub[AMDGPU_MMHUB0(0)];
else
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
if (retry_fault) {
int ret = amdgpu_gmc_handle_retry_fault(adev, entry, addr, 0, 0,
write_fault);
if (ret == 1)
return 1;
}
if (!amdgpu_sriov_vf(adev)) {
if (entry->vmid_src == AMDGPU_GFXHUB(0))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status,
entry->vmid_src ? AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0));
}
if (printk_ratelimit()) {
struct amdgpu_task_info *task_info;
dev_err(adev->dev,
"[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
addr, entry->client_id);
if (status != 0)
hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
}
return 0;
}
static const struct amdgpu_irq_src_funcs gmc_v12_0_irq_funcs = {
.set = gmc_v12_0_vm_fault_interrupt_state,
.process = gmc_v12_0_process_interrupt,
};
static const struct amdgpu_irq_src_funcs gmc_v12_0_ecc_funcs = {
.set = gmc_v12_0_ecc_interrupt_state,
.process = amdgpu_umc_process_ecc_irq,
};
static void gmc_v12_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs;
if (!amdgpu_sriov_vf(adev)) {
adev->gmc.ecc_irq.num_types = 1;
adev->gmc.ecc_irq.funcs = &gmc_v12_0_ecc_funcs;
}
}
static bool gmc_v12_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
return ((vmhub == AMDGPU_MMHUB0(0)) &&
(!amdgpu_sriov_vf(adev)));
}
static bool gmc_v12_0_get_vmid_pasid_mapping_info(
struct amdgpu_device *adev,
uint8_t vmid, uint16_t *p_pasid)
{
*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
return !!(*p_pasid);
}
static void gmc_v12_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
unsigned int vmhub, uint32_t flush_type)
{
bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(adev, vmhub);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 tmp;
const unsigned eng = 17;
unsigned int i;
unsigned char hub_ip = 0;
hub_ip = (vmhub == AMDGPU_GFXHUB(0)) ?
GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
if (use_semaphore) {
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
hub->eng_distance * eng, hub_ip);
if (tmp & 0x1)
break;
udelay(1);
}
if (i >= adev->usec_timeout)
dev_err(adev->dev,
"Timeout waiting for sem acquire in VM flush!\n");
}
WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
hub->eng_distance * eng, hub_ip);
tmp &= 1 << vmid;
if (tmp)
break;
udelay(1);
}
if (use_semaphore)
WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
hub->eng_distance * eng, 0, hub_ip);
if ((vmhub != AMDGPU_GFXHUB(0)) &&
(hub->vm_l2_bank_select_reserved_cid2) &&
!amdgpu_sriov_vf(adev)) {
inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
inv_req |= (1 << 25);
WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
}
spin_unlock(&adev->gmc.invalidate_lock);
if (i < adev->usec_timeout)
return;
dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
}
static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
if ((vmhub == AMDGPU_GFXHUB(0)) && !adev->gfx.is_poweron)
return;
amdgpu_device_flush_hdp(adev, NULL);
if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring[0].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid, GET_INST(GC, 0));
return;
}
gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
return;
}
static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub, uint32_t inst)
{
uint16_t queried;
int vmid, i;
if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
(adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
struct mes_inv_tlbs_pasid_input input = {0};
input.pasid = pasid;
input.flush_type = flush_type;
input.hub_id = AMDGPU_GFXHUB(0);
adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
if (all_hub) {
input.hub_id = AMDGPU_MMHUB0(0);
adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
}
return;
}
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
valid = gmc_v12_0_get_vmid_pasid_mapping_info(adev, vmid,
&queried);
if (!valid || queried != pasid)
continue;
if (all_hub) {
for_each_set_bit(i, adev->vmhubs_mask,
AMDGPU_MAX_VMHUBS)
gmc_v12_0_flush_gpu_tlb(adev, vmid, i,
flush_type);
} else {
gmc_v12_0_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(0),
flush_type);
}
}
}
static uint64_t gmc_v12_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
bool use_semaphore = gmc_v12_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
if (use_semaphore)
amdgpu_ring_emit_reg_wait(ring,
hub->vm_inv_eng0_sem +
hub->eng_distance * eng, 0x1, 0x1);
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
(hub->ctx_addr_distance * vmid),
lower_32_bits(pd_addr));
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
(hub->ctx_addr_distance * vmid),
upper_32_bits(pd_addr));
amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
hub->eng_distance * eng,
hub->vm_inv_eng0_ack +
hub->eng_distance * eng,
req, 1 << vmid);
if (use_semaphore)
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
hub->eng_distance * eng, 0);
return pd_addr;
}
static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
amdgpu_ring_emit_wreg(ring, reg, pasid);
}
static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM))
*addr = adev->vm_manager.vram_base_offset + *addr -
adev->gmc.vram_start;
BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->gmc.translate_further)
return;
if (level == AMDGPU_VM_PDB1) {
if (!(*flags & AMDGPU_PDE_PTE_GFX12))
*flags |= AMDGPU_PDE_BFS_GFX12(0x9);
} else if (level == AMDGPU_VM_PDB0) {
if (*flags & AMDGPU_PDE_PTE_GFX12)
*flags &= ~AMDGPU_PDE_PTE_GFX12;
}
}
static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
uint32_t vm_flags,
uint64_t *flags)
{
if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
*flags |= AMDGPU_PTE_EXECUTABLE;
else
*flags &= ~AMDGPU_PTE_EXECUTABLE;
switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
case AMDGPU_VM_MTYPE_DEFAULT:
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_NC:
default:
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_UC:
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
break;
}
if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
*flags |= AMDGPU_PTE_NOALLOC;
else
*flags &= ~AMDGPU_PTE_NOALLOC;
if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT_GFX12;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
*flags |= AMDGPU_PTE_IS_PTE;
*flags &= ~AMDGPU_PTE_VALID;
}
if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
*flags |= AMDGPU_PTE_DCC;
if (bo && bo->flags & AMDGPU_GEM_CREATE_UNCACHED)
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
}
static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
{
return 0;
}
static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
{
unsigned int max_tex_channel_caches, alignment;
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
return 0;
max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
if (is_power_of_2(max_tex_channel_caches))
alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
else
alignment = roundup_pow_of_two(max_tex_channel_caches);
return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
}
static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
.get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
.get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
};
static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
{
adev->gmc.gmc_funcs = &gmc_v12_0_gmc_funcs;
}
static void gmc_v12_0_set_umc_funcs(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
case IP_VERSION(8, 14, 0):
adev->umc.channel_inst_num = UMC_V8_14_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_14_UMC_INSTANCE_NUM(adev);
adev->umc.node_inst_num = 0;
adev->umc.max_ras_err_cnt_per_query = UMC_V8_14_TOTAL_CHANNEL_NUM(adev);
adev->umc.channel_offs = UMC_V8_14_PER_CHANNEL_OFFSET;
adev->umc.ras = &umc_v8_14_ras;
break;
default:
break;
}
}
static void gmc_v12_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
adev->mmhub.funcs = &mmhub_v4_1_0_funcs;
break;
case IP_VERSION(4, 2, 0):
adev->mmhub.funcs = &mmhub_v4_2_0_funcs;
break;
default:
break;
}
}
static void gmc_v12_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
adev->gfxhub.funcs = &gfxhub_v12_0_funcs;
break;
case IP_VERSION(12, 1, 0):
adev->gfxhub.funcs = &gfxhub_v12_1_funcs;
break;
default:
break;
}
}
static int gmc_v12_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 1, 0):
gmc_v12_1_set_gmc_funcs(adev);
gmc_v12_1_set_irq_funcs(adev);
adev->gmc.init_pte_flags = AMDGPU_PTE_IS_PTE;
break;
default:
gmc_v12_0_set_gmc_funcs(adev);
gmc_v12_0_set_irq_funcs(adev);
break;
}
gmc_v12_0_set_gfxhub_funcs(adev);
gmc_v12_0_set_mmhub_funcs(adev);
gmc_v12_0_set_umc_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
adev->gmc.private_aperture_start = 0x1000000000000000ULL;
adev->gmc.private_aperture_end =
adev->gmc.private_aperture_start + (4ULL << 30) - 1;
adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
return 0;
}
static int gmc_v12_0_late_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
int r;
r = amdgpu_gmc_allocate_vm_inv_eng(adev);
if (r)
return r;
r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
static void gmc_v12_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc)
{
u64 base = 0;
base = adev->mmhub.funcs->get_fb_location(adev);
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
if (amdgpu_sriov_vf(adev))
adev->vm_manager.vram_base_offset = 0;
else
adev->vm_manager.vram_base_offset = adev->mmhub.funcs->get_mc_fb_offset(adev);
}
static int gmc_v12_0_mc_init(struct amdgpu_device *adev)
{
int r;
adev->gmc.mc_vram_size =
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev);
if (r)
return r;
}
adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
#endif
adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
if (amdgpu_gart_size == -1) {
adev->gmc.gart_size = 512ULL << 20;
} else
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
gmc_v12_0_vram_gtt_location(adev, &adev->gmc);
return 0;
}
static int gmc_v12_0_gart_init(struct amdgpu_device *adev)
{
int r;
if (adev->gart.bo) {
WARN(1, "PCIE GART already initialized\n");
return 0;
}
r = amdgpu_gart_init(adev);
if (r)
return r;
adev->gart.table_size = adev->gart.num_gpu_pages * 8;
adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC) |
AMDGPU_PTE_EXECUTABLE |
AMDGPU_PTE_IS_PTE;
return amdgpu_gart_table_vram_alloc(adev);
}
static int gmc_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
struct amdgpu_device *adev = ip_block->adev;
int i;
adev->mmhub.funcs->init(adev);
adev->gfxhub.funcs->init(adev);
spin_lock_init(&adev->gmc.invalidate_lock);
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
gmc_v12_1_init_vram_info(adev);
} else {
r = amdgpu_atomfirmware_get_vram_info(adev,
&vram_width, &vram_type, &vram_vendor);
adev->gmc.vram_width = vram_width;
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
}
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break;
case IP_VERSION(12, 1, 0):
bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
NUM_XCC(adev->gfx.xcc_mask));
for (i = 0; i < hweight32(adev->aid_mask); i++)
set_bit(AMDGPU_MMHUB0(i), adev->vmhubs_mask);
amdgpu_vm_adjust_size(adev, 128 * 1024 * 1024, 9, 4, 57);
break;
default:
break;
}
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
if (r)
return r;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_UTCL2,
UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
if (r)
return r;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_UTCL2,
UTCL2_1_0__SRCID__RETRY,
&adev->gmc.vm_fault);
if (r)
return r;
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_VMC,
VMC_1_0__SRCID__VM_RETRY,
&adev->gmc.vm_fault);
} else {
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
}
if (r)
return r;
if ((amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 1, 0)) &&
!amdgpu_sriov_vf(adev)) {
r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_DF, 0,
&adev->gmc.ecc_irq);
if (r)
return r;
}
adev->gmc.mc_mask = AMDGPU_GMC_HOLE_MASK;
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
drm_warn(adev_to_drm(adev), "No suitable DMA available.\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(44);
r = gmc_v12_0_mc_init(adev);
if (r)
return r;
amdgpu_gmc_get_vbios_allocations(adev);
#ifdef HAVE_ACPI_DEV_GET_FIRST_MATCH_DEV
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) {
r = amdgpu_gmc_init_mem_ranges(adev);
if (r)
return r;
}
#endif
r = amdgpu_bo_init(adev);
if (r)
return r;
r = gmc_v12_0_gart_init(adev);
if (r)
return r;
adev->vm_manager.first_kfd_vmid =
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0) ?
3 : 8;
adev->vm_manager.first_kfd_vmid =
adev->gfx.disable_kq ? 1 : (adev->vm_manager.first_kfd_vmid);
amdgpu_vm_manager_init(adev);
r = amdgpu_gmc_ras_sw_init(adev);
if (r)
return r;
return 0;
}
static void gmc_v12_0_gart_fini(struct amdgpu_device *adev)
{
amdgpu_gart_table_vram_free(adev);
}
static int gmc_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
amdgpu_vm_manager_fini(adev);
gmc_v12_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
return 0;
}
static void gmc_v12_0_init_golden_registers(struct amdgpu_device *adev)
{
}
static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
{
int r;
bool value;
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
return r;
amdgpu_device_flush_hdp(adev, NULL);
value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
adev->gmc.gmc_funcs->flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
drm_info(adev_to_drm(adev), "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
return 0;
}
static int gmc_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
struct amdgpu_device *adev = ip_block->adev;
gmc_v12_0_init_golden_registers(adev);
r = gmc_v12_0_gart_enable(adev);
if (r)
return r;
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
return 0;
}
static void gmc_v12_0_gart_disable(struct amdgpu_device *adev)
{
adev->mmhub.funcs->gart_disable(adev);
}
static int gmc_v12_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
if (amdgpu_sriov_vf(adev)) {
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
return 0;
}
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
if (adev->gmc.ecc_irq.funcs &&
amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
gmc_v12_0_gart_disable(adev);
return 0;
}
static int gmc_v12_0_suspend(struct amdgpu_ip_block *ip_block)
{
gmc_v12_0_hw_fini(ip_block);
return 0;
}
static int gmc_v12_0_resume(struct amdgpu_ip_block *ip_block)
{
int r;
r = gmc_v12_0_hw_init(ip_block);
if (r)
return r;
amdgpu_vmid_reset_all(ip_block->adev);
return 0;
}
static bool gmc_v12_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
static int gmc_v12_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
return 0;
}
static int gmc_v12_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
int r;
struct amdgpu_device *adev = ip_block->adev;
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
return r;
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 1, 0))
return athub_v4_1_0_set_clockgating(adev, state);
else
return 0;
}
static void gmc_v12_0_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
{
struct amdgpu_device *adev = ip_block->adev;
adev->mmhub.funcs->get_clockgating(adev, flags);
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 1, 0))
athub_v4_1_0_get_clockgating(adev, flags);
}
static int gmc_v12_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
const struct amd_ip_funcs gmc_v12_0_ip_funcs = {
.name = "gmc_v12_0",
.early_init = gmc_v12_0_early_init,
.sw_init = gmc_v12_0_sw_init,
.hw_init = gmc_v12_0_hw_init,
.late_init = gmc_v12_0_late_init,
.sw_fini = gmc_v12_0_sw_fini,
.hw_fini = gmc_v12_0_hw_fini,
.suspend = gmc_v12_0_suspend,
.resume = gmc_v12_0_resume,
.is_idle = gmc_v12_0_is_idle,
.wait_for_idle = gmc_v12_0_wait_for_idle,
.set_clockgating_state = gmc_v12_0_set_clockgating_state,
.set_powergating_state = gmc_v12_0_set_powergating_state,
.get_clockgating_state = gmc_v12_0_get_clockgating_state,
};
const struct amdgpu_ip_block_version gmc_v12_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_GMC,
.major = 12,
.minor = 0,
.rev = 0,
.funcs = &gmc_v12_0_ip_funcs,
};