#include "amdgpu.h"
#include "gmc_v12_1.h"
#include "soc15_common.h"
#include "soc_v1_0_enum.h"
#include "oss/osssys_7_1_0_offset.h"
#include "oss/osssys_7_1_0_sh_mask.h"
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
static int gmc_v12_1_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned int type,
enum amdgpu_interrupt_state state)
{
struct amdgpu_vmhub *hub;
u32 tmp, reg, i, j;
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
continue;
if (j >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP(MMHUB, reg);
else
tmp = RREG32_XCC(reg, j);
tmp &= ~hub->vm_cntx_cntl_vm_fault;
if (j >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP(MMHUB, reg, tmp);
else
WREG32_XCC(reg, tmp, j);
}
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
continue;
if (j >= AMDGPU_MMHUB0(0))
tmp = RREG32_SOC15_IP(MMHUB, reg);
else
tmp = RREG32_XCC(reg, j);
tmp |= hub->vm_cntx_cntl_vm_fault;
if (j >= AMDGPU_MMHUB0(0))
WREG32_SOC15_IP(MMHUB, reg, tmp);
else
WREG32_XCC(reg, tmp, j);
}
}
break;
default:
break;
}
return 0;
}
static int gmc_v12_1_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_task_info *task_info;
bool retry_fault = false, write_fault = false;
unsigned int vmhub, node_id;
struct amdgpu_vmhub *hub;
uint32_t cam_index = 0;
const char *hub_name;
int ret, xcc_id = 0;
uint32_t status = 0;
u64 addr;
node_id = entry->node_id;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0x1fff) << 44;
if (entry->src_id == UTCL2_1_0__SRCID__RETRY) {
retry_fault = true;
write_fault = !!(entry->src_data[1] & 0x200000);
}
if (entry->client_id == SOC_V1_0_IH_CLIENTID_VMC) {
hub_name = "mmhub0";
vmhub = AMDGPU_MMHUB0(node_id / 4);
} else {
hub_name = "gfxhub0";
if (adev->gfx.funcs->ih_node_to_logical_xcc) {
xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
node_id);
if (xcc_id < 0)
xcc_id = 0;
}
vmhub = xcc_id;
}
hub = &adev->vmhub[vmhub];
if (retry_fault) {
if (adev->irq.retry_cam_enabled) {
if (entry->ih == &adev->irq.ih) {
amdgpu_irq_delegate(adev, entry, 8);
return 1;
}
cam_index = entry->src_data[3] & 0x3ff;
ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
addr, entry->timestamp, write_fault);
WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
if (ret)
return 1;
} else {
if (entry->ih != &adev->irq.ih_soft &&
amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
entry->timestamp))
return 1;
if (entry->ih == &adev->irq.ih) {
amdgpu_irq_delegate(adev, entry, 8);
return 1;
}
if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
addr, entry->timestamp, write_fault))
return 1;
}
}
if (kgd2kfd_vmfault_fast_path(adev, entry, retry_fault))
return 1;
if (!printk_ratelimit())
return 0;
dev_err(adev->dev,
"[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n", hub_name,
retry_fault ? "retry" : "no-retry",
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
dev_err(adev->dev, " in page starting at address 0x%016llx from IH client %d (%s)\n",
addr, entry->client_id, soc_v1_0_ih_clientid_name[entry->client_id]);
if (amdgpu_sriov_vf(adev))
return 0;
if (entry->vmid_src == AMDGPU_GFXHUB(0))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
if (!status)
return 0;
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub);
hub->vmhub_funcs->print_l2_protection_fault_status(adev, status);
return 0;
}
static bool gmc_v12_1_get_vmid_pasid_mapping_info(struct amdgpu_device *adev,
uint8_t vmid, uint8_t inst,
uint16_t *p_pasid)
{
uint16_t index;
if (inst/4)
index = 0xA + inst%4;
else
index = 0x2 + inst%4;
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), index);
*p_pasid = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid) & 0xffff;
return !!(*p_pasid);
}
static void gmc_v12_1_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
unsigned int vmhub, uint32_t flush_type)
{
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 tmp;
const unsigned eng = 17;
unsigned int i;
unsigned char hub_ip = 0;
hub_ip = (AMDGPU_IS_GFXHUB(vmhub)) ?
GC_HWIP : MMHUB_HWIP;
spin_lock(&adev->gmc.invalidate_lock);
WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
hub->eng_distance * eng, hub_ip);
tmp &= 1 << vmid;
if (tmp)
break;
udelay(1);
}
if (!AMDGPU_IS_GFXHUB(vmhub) &&
(hub->vm_l2_bank_select_reserved_cid2) &&
!amdgpu_sriov_vf(adev)) {
inv_req = RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
inv_req |= (1 << 25);
WREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2, inv_req);
RREG32_NO_KIQ(hub->vm_l2_bank_select_reserved_cid2);
}
spin_unlock(&adev->gmc.invalidate_lock);
if (i < adev->usec_timeout)
return;
dev_err(adev->dev, "Timeout waiting for VM flush ACK!\n");
}
static void gmc_v12_1_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
uint32_t vmhub, uint32_t flush_type)
{
u32 inst;
if (AMDGPU_IS_GFXHUB(vmhub) &&
!adev->gfx.is_poweron)
return;
if (vmhub >= AMDGPU_MMHUB0(0))
inst = 0;
else
inst = vmhub;
if (((adev->gfx.kiq[inst].ring.sched.ready ||
adev->mes.ring[MES_PIPE_INST(inst, 0)].sched.ready) &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)))) {
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
const unsigned eng = 17;
u32 inv_req = hub->vmhub_funcs->get_invalidate_req(vmid, flush_type);
u32 req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
u32 ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
amdgpu_gmc_fw_reg_write_reg_wait(adev, req, ack, inv_req,
1 << vmid, inst);
return;
}
mutex_lock(&adev->mman.gtt_window_lock);
gmc_v12_1_flush_vm_hub(adev, vmid, vmhub, 0);
mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
static void gmc_v12_1_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t pasid, uint32_t flush_type,
bool all_hub, uint32_t inst)
{
uint16_t queried;
int vmid, i;
if (adev->enable_uni_mes && adev->mes.ring[0].sched.ready &&
(adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x6f) {
struct mes_inv_tlbs_pasid_input input = {0};
input.xcc_id = inst;
input.pasid = pasid;
input.flush_type = flush_type;
if (!amdgpu_gfx_is_master_xcc(adev, inst))
return;
input.hub_id = AMDGPU_GFXHUB(0);
adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
if (all_hub) {
if (test_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask)) {
input.hub_id = AMDGPU_MMHUB0(0);
adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
}
if (test_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask)) {
input.hub_id = AMDGPU_MMHUB1(0);
adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
}
}
return;
}
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
valid = gmc_v12_1_get_vmid_pasid_mapping_info(adev, vmid, inst,
&queried);
if (!valid || queried != pasid)
continue;
if (all_hub) {
for_each_set_bit(i, adev->vmhubs_mask,
AMDGPU_MAX_VMHUBS)
gmc_v12_1_flush_gpu_tlb(adev, vmid, i,
flush_type);
} else {
gmc_v12_1_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB(inst),
flush_type);
}
}
}
static uint64_t gmc_v12_1_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub];
uint32_t req = hub->vmhub_funcs->get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
(hub->ctx_addr_distance * vmid),
lower_32_bits(pd_addr));
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
(hub->ctx_addr_distance * vmid),
upper_32_bits(pd_addr));
amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
hub->eng_distance * eng,
hub->vm_inv_eng0_ack +
hub->eng_distance * eng,
req, 1 << vmid);
return pd_addr;
}
static void gmc_v12_1_emit_pasid_mapping(struct amdgpu_ring *ring,
unsigned vmid, unsigned pasid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
if (ring->vm_hub == AMDGPU_GFXHUB(0))
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid;
amdgpu_ring_emit_wreg(ring, reg, pasid);
}
static void gmc_v12_1_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE_GFX12) && !(*flags & AMDGPU_PTE_SYSTEM))
*addr = adev->vm_manager.vram_base_offset + *addr -
adev->gmc.vram_start;
BUG_ON(*addr & 0xFFFF00000000003FULL);
*flags |= AMDGPU_PTE_SNOOPED;
if (!adev->gmc.translate_further)
return;
if (level == AMDGPU_VM_PDB1) {
if (!(*flags & AMDGPU_PDE_PTE_GFX12))
*flags |= AMDGPU_PDE_BFS_GFX12(0x9);
} else if (level == AMDGPU_VM_PDB0) {
if (*flags & AMDGPU_PDE_PTE_GFX12)
*flags &= ~AMDGPU_PDE_PTE_GFX12;
}
}
static void gmc_v12_1_get_coherence_flags(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
bool is_vram = bo->tbo.resource &&
bo->tbo.resource->mem_type == TTM_PL_VRAM;
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
AMDGPU_GEM_CREATE_EXT_COHERENT);
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
unsigned int mtype, mtype_local;
bool snoop = false;
bool is_local = false;
switch (gc_ip_version) {
case IP_VERSION(12, 1, 0):
mtype_local = MTYPE_RW;
if (amdgpu_mtype_local == 1) {
DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
mtype_local = MTYPE_NC;
} else if (amdgpu_mtype_local == 2) {
DRM_INFO_ONCE("MTYPE_CC not supported, using MTYPE_RW instead for local memory\n");
} else {
DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
}
is_local = (is_vram && adev == bo_adev);
snoop = true;
if (uncached) {
mtype = MTYPE_UC;
} else if (ext_coherent) {
mtype = is_local ? mtype_local : MTYPE_UC;
} else {
if (is_local)
mtype = mtype_local;
else
mtype = MTYPE_NC;
}
break;
default:
if (uncached || coherent)
mtype = MTYPE_UC;
else
mtype = MTYPE_NC;
}
if (mtype != MTYPE_NC)
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, mtype);
if (is_local || adev->have_atomics_support)
*flags |= AMDGPU_PTE_BUS_ATOMICS;
*flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
}
static void gmc_v12_1_get_vm_pte(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
uint32_t vm_flags,
uint64_t *flags)
{
if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
*flags |= AMDGPU_PTE_EXECUTABLE;
else
*flags &= ~AMDGPU_PTE_EXECUTABLE;
switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
case AMDGPU_VM_MTYPE_DEFAULT:
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_NC:
default:
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
break;
case AMDGPU_VM_MTYPE_RW:
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_RW);
break;
case AMDGPU_VM_MTYPE_UC:
*flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
break;
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
gmc_v12_1_get_coherence_flags(adev, bo, flags);
}
static const struct amdgpu_gmc_funcs gmc_v12_1_gmc_funcs = {
.flush_gpu_tlb = gmc_v12_1_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v12_1_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_1_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_1_emit_pasid_mapping,
.get_vm_pde = gmc_v12_1_get_vm_pde,
.get_vm_pte = gmc_v12_1_get_vm_pte,
.query_mem_partition_mode = &amdgpu_gmc_query_memory_partition,
.request_mem_partition_mode = &amdgpu_gmc_request_memory_partition,
};
void gmc_v12_1_set_gmc_funcs(struct amdgpu_device *adev)
{
adev->gmc.gmc_funcs = &gmc_v12_1_gmc_funcs;
}
static const struct amdgpu_irq_src_funcs gmc_v12_1_irq_funcs = {
.set = gmc_v12_1_vm_fault_interrupt_state,
.process = gmc_v12_1_process_interrupt,
};
void gmc_v12_1_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v12_1_irq_funcs;
}
void gmc_v12_1_init_vram_info(struct amdgpu_device *adev)
{
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM4;
adev->gmc.vram_width = 384 * 64;
}