#include <linux/firmware.h>
#include <linux/mfd/core.h>
#include "amdgpu.h"
#include "amdgpu_isp.h"
#include "isp_v4_1_0.h"
#include "isp_v4_1_1.h"
#define ISP_MC_ADDR_ALIGN (1024 * 32)
static int isp_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
if (isp->funcs->hw_init != NULL)
return isp->funcs->hw_init(isp);
return -ENODEV;
}
static int isp_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_isp *isp = &ip_block->adev->isp;
if (isp->funcs->hw_fini != NULL)
return isp->funcs->hw_fini(isp);
return -ENODEV;
}
static int isp_load_fw_by_psp(struct amdgpu_device *adev)
{
const struct common_firmware_header *hdr;
char ucode_prefix[10];
int r = 0;
amdgpu_ucode_ip_version_decode(adev, ISP_HWIP, ucode_prefix,
sizeof(ucode_prefix));
r = amdgpu_ucode_request(adev, &adev->isp.fw, AMDGPU_UCODE_OPTIONAL,
"amdgpu/%s.bin", ucode_prefix);
if (r) {
amdgpu_ucode_release(&adev->isp.fw);
return r;
}
hdr = (const struct common_firmware_header *)adev->isp.fw->data;
adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].ucode_id =
AMDGPU_UCODE_ID_ISP;
adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].fw = adev->isp.fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
return r;
}
static int isp_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
isp_v4_1_0_set_isp_funcs(isp);
break;
case IP_VERSION(4, 1, 1):
isp_v4_1_1_set_isp_funcs(isp);
break;
default:
return -EINVAL;
}
isp->adev = adev;
isp->parent = adev->dev;
if (isp_load_fw_by_psp(adev)) {
DRM_DEBUG_DRIVER("%s: isp fw load failed\n", __func__);
return -ENOENT;
}
return 0;
}
static bool isp_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
static int isp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
{
if (isp_parent != amdgpu_dev)
return -EINVAL;
return 0;
}
int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
void **buf_obj, u64 *buf_addr)
{
struct platform_device *ispdev = to_platform_device(dev);
const struct isp_platform_data *isp_pdata;
struct amdgpu_device *adev;
struct mfd_cell *mfd_cell;
struct amdgpu_bo *bo;
u64 gpu_addr;
int ret;
if (WARN_ON(!ispdev))
return -ENODEV;
if (WARN_ON(!buf_obj))
return -EINVAL;
if (WARN_ON(!buf_addr))
return -EINVAL;
mfd_cell = &ispdev->mfd_cell[0];
if (!mfd_cell)
return -ENODEV;
isp_pdata = mfd_cell->platform_data;
adev = isp_pdata->adev;
ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
if (ret)
return ret;
ret = amdgpu_bo_create_isp_user(adev, dmabuf,
AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
if (ret) {
drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
return ret;
}
*buf_obj = (void *)bo;
*buf_addr = gpu_addr;
return 0;
}
EXPORT_SYMBOL(isp_user_buffer_alloc);
void isp_user_buffer_free(void *buf_obj)
{
amdgpu_bo_free_isp_user(buf_obj);
}
EXPORT_SYMBOL(isp_user_buffer_free);
int isp_kernel_buffer_alloc(struct device *dev, u64 size,
void **buf_obj, u64 *gpu_addr, void **cpu_addr)
{
struct platform_device *ispdev = to_platform_device(dev);
struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
const struct isp_platform_data *isp_pdata;
struct amdgpu_device *adev;
struct mfd_cell *mfd_cell;
int ret;
if (WARN_ON(!ispdev))
return -ENODEV;
if (WARN_ON(!buf_obj))
return -EINVAL;
if (WARN_ON(!gpu_addr))
return -EINVAL;
if (WARN_ON(!cpu_addr))
return -EINVAL;
mfd_cell = &ispdev->mfd_cell[0];
if (!mfd_cell)
return -ENODEV;
isp_pdata = mfd_cell->platform_data;
adev = isp_pdata->adev;
ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
if (ret)
return ret;
*bo = NULL;
ret = amdgpu_bo_create_kernel(adev,
size,
ISP_MC_ADDR_ALIGN,
AMDGPU_GEM_DOMAIN_GTT,
bo,
gpu_addr,
cpu_addr);
if (!cpu_addr || ret) {
drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(isp_kernel_buffer_alloc);
void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
{
struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
}
EXPORT_SYMBOL(isp_kernel_buffer_free);
static int isp_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
if (isp->funcs->hw_resume)
return isp->funcs->hw_resume(isp);
return -ENODEV;
}
static int isp_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_isp *isp = &adev->isp;
if (isp->funcs->hw_suspend)
return isp->funcs->hw_suspend(isp);
return -ENODEV;
}
static const struct amd_ip_funcs isp_ip_funcs = {
.name = "isp_ip",
.early_init = isp_early_init,
.hw_init = isp_hw_init,
.hw_fini = isp_hw_fini,
.is_idle = isp_is_idle,
.suspend = isp_suspend,
.resume = isp_resume,
.set_clockgating_state = isp_set_clockgating_state,
.set_powergating_state = isp_set_powergating_state,
};
const struct amdgpu_ip_block_version isp_v4_1_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_ISP,
.major = 4,
.minor = 1,
.rev = 0,
.funcs = &isp_ip_funcs,
};
const struct amdgpu_ip_block_version isp_v4_1_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_ISP,
.major = 4,
.minor = 1,
.rev = 1,
.funcs = &isp_ip_funcs,
};