#include "amdgpu.h"
#include "amdgpu_ip.h"
static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
enum amd_hw_ip_block_type block,
int8_t inst)
{
int8_t dev_inst;
switch (block) {
case GC_HWIP:
case SDMA0_HWIP:
case VCN_HWIP:
dev_inst = adev->ip_map.dev_inst[block][inst];
break;
default:
dev_inst = inst;
break;
}
return dev_inst;
}
static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
enum amd_hw_ip_block_type block,
uint32_t mask)
{
uint32_t dev_mask = 0;
int8_t log_inst, dev_inst;
while (mask) {
log_inst = ffs(mask) - 1;
dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
dev_mask |= (1 << dev_inst);
mask &= ~(1 << log_inst);
}
return dev_mask;
}
static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
enum amd_hw_ip_block_type ip_block,
uint32_t inst_mask)
{
int l = 0, i;
while (inst_mask) {
i = ffs(inst_mask) - 1;
adev->ip_map.dev_inst[ip_block][l++] = i;
inst_mask &= ~(1 << i);
}
for (; l < HWIP_MAX_INSTANCE; l++)
adev->ip_map.dev_inst[ip_block][l] = -1;
}
void amdgpu_ip_map_init(struct amdgpu_device *adev)
{
u32 ip_map[][2] = {
{ GC_HWIP, adev->gfx.xcc_mask },
{ SDMA0_HWIP, adev->sdma.sdma_mask },
{ VCN_HWIP, adev->vcn.inst_mask },
};
int i;
for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
}
int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block)
{
int r;
if (ip_block->version->funcs->suspend) {
r = ip_block->version->funcs->suspend(ip_block);
if (r) {
dev_err(ip_block->adev->dev,
"suspend of IP block <%s> failed %d\n",
ip_block->version->funcs->name, r);
return r;
}
}
ip_block->status.hw = false;
return 0;
}
int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block)
{
int r;
if (ip_block->version->funcs->resume) {
r = ip_block->version->funcs->resume(ip_block);
if (r) {
dev_err(ip_block->adev->dev,
"resume of IP block <%s> failed %d\n",
ip_block->version->funcs->name, r);
return r;
}
}
ip_block->status.hw = true;
return 0;
}
struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
enum amd_ip_block_type type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++)
if (adev->ip_blocks[i].version->type == type)
return &adev->ip_blocks[i];
return NULL;
}
int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
enum amd_ip_block_type type, u32 major,
u32 minor)
{
struct amdgpu_ip_block *ip_block =
amdgpu_device_ip_get_ip_block(adev, type);
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
(ip_block->version->minor >= minor))))
return 0;
return 1;
}
static const char *const ip_block_names[] = {
[AMD_IP_BLOCK_TYPE_COMMON] = "common",
[AMD_IP_BLOCK_TYPE_GMC] = "gmc",
[AMD_IP_BLOCK_TYPE_IH] = "ih",
[AMD_IP_BLOCK_TYPE_SMC] = "smu",
[AMD_IP_BLOCK_TYPE_PSP] = "psp",
[AMD_IP_BLOCK_TYPE_DCE] = "dce",
[AMD_IP_BLOCK_TYPE_GFX] = "gfx",
[AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
[AMD_IP_BLOCK_TYPE_UVD] = "uvd",
[AMD_IP_BLOCK_TYPE_VCE] = "vce",
[AMD_IP_BLOCK_TYPE_ACP] = "acp",
[AMD_IP_BLOCK_TYPE_VCN] = "vcn",
[AMD_IP_BLOCK_TYPE_MES] = "mes",
[AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
[AMD_IP_BLOCK_TYPE_VPE] = "vpe",
[AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
[AMD_IP_BLOCK_TYPE_ISP] = "isp",
[AMD_IP_BLOCK_TYPE_RAS] = "ras",
};
static const char *ip_block_name(struct amdgpu_device *adev,
enum amd_ip_block_type type)
{
int idx = (int)type;
return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] :
"unknown";
}
int amdgpu_device_ip_block_add(
struct amdgpu_device *adev,
const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
switch (ip_block_version->type) {
case AMD_IP_BLOCK_TYPE_VCN:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
return 0;
break;
case AMD_IP_BLOCK_TYPE_JPEG:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
return 0;
break;
default:
break;
}
dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
adev->num_ip_blocks,
ip_block_name(adev, ip_block_version->type),
ip_block_version->major, ip_block_version->minor,
ip_block_version->rev, ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
return 0;
}
int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_clockgating_state state)
{
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type != block_type)
continue;
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
&adev->ip_blocks[i], state);
if (r)
dev_err(adev->dev,
"set_clockgating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
enum amd_ip_block_type block_type,
enum amd_powergating_state state)
{
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type != block_type)
continue;
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
&adev->ip_blocks[i], state);
if (r)
dev_err(adev->dev,
"set_powergating_state of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
adev->ip_blocks[i].version->funcs->get_clockgating_state(
&adev->ip_blocks[i], flags);
}
}
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
struct amdgpu_ip_block *ip_block;
ip_block = amdgpu_device_ip_get_ip_block(adev, block_type);
if (!ip_block || !ip_block->status.valid)
return 0;
if (ip_block->version->funcs->wait_for_idle)
return ip_block->version->funcs->wait_for_idle(ip_block);
return 0;
}
bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
struct amdgpu_ip_block *ip_block;
ip_block = amdgpu_device_ip_get_ip_block(adev, block_type);
if (ip_block)
return ip_block->status.hw;
return false;
}
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
struct amdgpu_ip_block *ip_block;
ip_block = amdgpu_device_ip_get_ip_block(adev, block_type);
if (ip_block)
return ip_block->status.valid;
return false;
}