#include "vsp1_vspx.h"
#include <linux/cleanup.h>
#include <linux/container_of.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <media/media-entity.h>
#include <media/v4l2-subdev.h>
#include <media/vsp1.h>
#include "vsp1_dl.h"
#include "vsp1_iif.h"
#include "vsp1_pipe.h"
#include "vsp1_rwpf.h"
struct vsp1_vspx_pipeline {
struct vsp1_pipeline pipe;
struct vsp1_partition partition;
struct mutex mutex;
spinlock_t lock;
bool enabled;
void (*vspx_frame_end)(void *frame_end_data);
void *frame_end_data;
};
static inline struct vsp1_vspx_pipeline *
to_vsp1_vspx_pipeline(struct vsp1_pipeline *pipe)
{
return container_of(pipe, struct vsp1_vspx_pipeline, pipe);
}
struct vsp1_vspx {
struct vsp1_device *vsp1;
struct vsp1_vspx_pipeline pipe;
};
static int vsp1_vspx_rwpf_set_subdev_fmt(struct vsp1_device *vsp1,
struct vsp1_rwpf *rwpf,
u32 isp_fourcc,
unsigned int width,
unsigned int height)
{
struct vsp1_entity *ent = &rwpf->entity;
struct v4l2_subdev_format format = {};
u32 vspx_fourcc;
switch (isp_fourcc) {
case V4L2_PIX_FMT_GREY:
vspx_fourcc = V4L2_PIX_FMT_RGB332;
break;
case V4L2_PIX_FMT_Y10:
case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y16:
vspx_fourcc = V4L2_PIX_FMT_RGB565;
break;
case V4L2_META_FMT_GENERIC_8:
vspx_fourcc = V4L2_PIX_FMT_XBGR32;
break;
default:
return -EINVAL;
}
rwpf->fmtinfo = vsp1_get_format_info(vsp1, vspx_fourcc);
format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
format.pad = RWPF_PAD_SINK;
format.format.width = width;
format.format.height = height;
format.format.field = V4L2_FIELD_NONE;
format.format.code = rwpf->fmtinfo->mbus;
return v4l2_subdev_call(&ent->subdev, pad, set_fmt, NULL, &format);
}
static int vsp1_vspx_pipeline_configure(struct vsp1_device *vsp1,
dma_addr_t addr, u32 isp_fourcc,
unsigned int width, unsigned int height,
unsigned int stride,
unsigned int iif_sink_pad,
struct vsp1_dl_list *dl,
struct vsp1_dl_body *dlb)
{
struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
struct vsp1_rwpf *rpf0 = pipe->inputs[0];
int ret;
ret = vsp1_vspx_rwpf_set_subdev_fmt(vsp1, rpf0, isp_fourcc, width,
height);
if (ret)
return ret;
ret = vsp1_vspx_rwpf_set_subdev_fmt(vsp1, pipe->output, isp_fourcc,
width, height);
if (ret)
return ret;
vsp1_pipeline_calculate_partition(pipe, &pipe->part_table[0], width, 0);
rpf0->format.plane_fmt[0].bytesperline = stride;
rpf0->format.num_planes = 1;
rpf0->mem.addr[0] = addr;
rpf0->entity.sink_pad = iif_sink_pad;
vsp1_entity_route_setup(&rpf0->entity, pipe, dlb);
vsp1_entity_configure_stream(&rpf0->entity, rpf0->entity.state, pipe,
dl, dlb);
vsp1_entity_configure_partition(&rpf0->entity, pipe,
&pipe->part_table[0], dl, dlb);
return 0;
}
static void vsp1_vspx_pipeline_frame_end(struct vsp1_pipeline *pipe,
unsigned int completion)
{
struct vsp1_vspx_pipeline *vspx_pipe = to_vsp1_vspx_pipeline(pipe);
scoped_guard(spinlock_irqsave, &pipe->irqlock) {
pipe->state = VSP1_PIPELINE_STOPPED;
}
if (vspx_pipe->vspx_frame_end)
vspx_pipe->vspx_frame_end(vspx_pipe->frame_end_data);
}
int vsp1_isp_init(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
if (!vsp1)
return -EPROBE_DEFER;
return 0;
}
EXPORT_SYMBOL_GPL(vsp1_isp_init);
struct device *vsp1_isp_get_bus_master(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
if (!vsp1)
return ERR_PTR(-ENODEV);
return vsp1->bus_master;
}
EXPORT_SYMBOL_GPL(vsp1_isp_get_bus_master);
int vsp1_isp_alloc_buffer(struct device *dev, size_t size,
struct vsp1_isp_buffer_desc *buffer_desc)
{
struct device *bus_master = vsp1_isp_get_bus_master(dev);
if (IS_ERR_OR_NULL(bus_master))
return -ENODEV;
buffer_desc->cpu_addr = dma_alloc_coherent(bus_master, size,
&buffer_desc->dma_addr,
GFP_KERNEL);
if (!buffer_desc->cpu_addr)
return -ENOMEM;
buffer_desc->size = size;
return 0;
}
EXPORT_SYMBOL_GPL(vsp1_isp_alloc_buffer);
void vsp1_isp_free_buffer(struct device *dev,
struct vsp1_isp_buffer_desc *buffer_desc)
{
struct device *bus_master = vsp1_isp_get_bus_master(dev);
if (IS_ERR_OR_NULL(bus_master))
return;
dma_free_coherent(bus_master, buffer_desc->size, buffer_desc->cpu_addr,
buffer_desc->dma_addr);
}
EXPORT_SYMBOL_GPL(vsp1_isp_free_buffer);
int vsp1_isp_start_streaming(struct device *dev,
struct vsp1_vspx_frame_end *frame_end)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
u32 value;
int ret;
if (!frame_end)
return -EINVAL;
guard(mutex)(&vspx_pipe->mutex);
scoped_guard(spinlock_irq, &vspx_pipe->lock) {
if (vspx_pipe->enabled)
return -EBUSY;
}
vspx_pipe->vspx_frame_end = frame_end->vspx_frame_end;
vspx_pipe->frame_end_data = frame_end->frame_end_data;
vsp1_pipeline_dump(pipe, "VSPX job");
ret = vsp1_device_get(vsp1);
if (ret < 0)
return ret;
value = vsp1_read(vsp1, VI6_CMD(0));
if (value & VI6_CMD_STRCMD) {
dev_err(vsp1->dev,
"%s: Starting of WPF0 already reserved\n", __func__);
ret = -EBUSY;
goto error_put;
}
value = vsp1_read(vsp1, VI6_STATUS);
if (value & VI6_STATUS_SYS_ACT(0)) {
dev_err(vsp1->dev,
"%s: WPF0 has not entered idle state\n", __func__);
ret = -EBUSY;
goto error_put;
}
scoped_guard(spinlock_irq, &vspx_pipe->lock) {
vspx_pipe->enabled = true;
}
return 0;
error_put:
vsp1_device_put(vsp1);
return ret;
}
EXPORT_SYMBOL_GPL(vsp1_isp_start_streaming);
void vsp1_isp_stop_streaming(struct device *dev)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
guard(mutex)(&vspx_pipe->mutex);
scoped_guard(spinlock_irq, &vspx_pipe->lock) {
if (!vspx_pipe->enabled)
return;
vspx_pipe->enabled = false;
}
WARN_ON_ONCE(vsp1_pipeline_stop(pipe));
vspx_pipe->vspx_frame_end = NULL;
vsp1_dlm_reset(pipe->output->dlm);
vsp1_device_put(vsp1);
}
EXPORT_SYMBOL_GPL(vsp1_isp_stop_streaming);
int vsp1_isp_job_prepare(struct device *dev, struct vsp1_isp_job_desc *job)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
const struct v4l2_pix_format_mplane *pix_mp;
struct vsp1_dl_list *second_dl = NULL;
struct vsp1_dl_body *dlb;
struct vsp1_dl_list *dl;
int ret;
job->dl = vsp1_dl_list_get(pipe->output->dlm);
if (!job->dl)
return -ENOMEM;
dl = job->dl;
dlb = vsp1_dl_list_get_body0(dl);
vsp1_entity_route_setup(pipe->iif, pipe, dlb);
vsp1_entity_configure_stream(pipe->iif, pipe->iif->state, pipe,
dl, dlb);
vsp1_entity_route_setup(&pipe->output->entity, pipe, dlb);
vsp1_entity_configure_stream(&pipe->output->entity,
pipe->output->entity.state, pipe,
dl, dlb);
if (job->config.pairs) {
if (job->config.pairs <= 16) {
ret = -EINVAL;
goto error_put_dl;
}
ret = vsp1_vspx_pipeline_configure(vsp1, job->config.mem,
V4L2_META_FMT_GENERIC_8,
job->config.pairs * 2 + 2, 1,
job->config.pairs * 2 + 2,
VSPX_IIF_SINK_PAD_CONFIG,
dl, dlb);
if (ret)
goto error_put_dl;
second_dl = vsp1_dl_list_get(pipe->output->dlm);
if (!second_dl) {
ret = -ENOMEM;
goto error_put_dl;
}
dl = second_dl;
dlb = vsp1_dl_list_get_body0(dl);
}
pix_mp = &job->img.fmt;
ret = vsp1_vspx_pipeline_configure(vsp1, job->img.mem,
pix_mp->pixelformat,
pix_mp->width, pix_mp->height,
pix_mp->plane_fmt[0].bytesperline,
VSPX_IIF_SINK_PAD_IMG, dl, dlb);
if (ret)
goto error_put_dl;
if (second_dl)
vsp1_dl_list_add_chain(job->dl, second_dl);
return 0;
error_put_dl:
if (second_dl)
vsp1_dl_list_put(second_dl);
vsp1_dl_list_put(job->dl);
job->dl = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(vsp1_isp_job_prepare);
int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
struct vsp1_pipeline *pipe = &vspx_pipe->pipe;
u32 value;
value = vsp1_read(vsp1, VI6_CMD(0));
if (value) {
dev_err(vsp1->dev,
"%s: Starting of WPF0 already reserved\n", __func__);
return -EBUSY;
}
scoped_guard(spinlock_irqsave, &vspx_pipe->lock) {
if (!vspx_pipe->enabled)
return -EINVAL;
vsp1_dl_list_commit(job->dl, 0);
job->dl = NULL;
}
scoped_guard(spinlock_irqsave, &pipe->irqlock) {
vsp1_pipeline_run(pipe);
}
return 0;
}
EXPORT_SYMBOL_GPL(vsp1_isp_job_run);
void vsp1_isp_job_release(struct device *dev,
struct vsp1_isp_job_desc *job)
{
vsp1_dl_list_put(job->dl);
}
EXPORT_SYMBOL_GPL(vsp1_isp_job_release);
int vsp1_vspx_init(struct vsp1_device *vsp1)
{
struct vsp1_vspx_pipeline *vspx_pipe;
struct vsp1_pipeline *pipe;
vsp1->vspx = devm_kzalloc(vsp1->dev, sizeof(*vsp1->vspx), GFP_KERNEL);
if (!vsp1->vspx)
return -ENOMEM;
vsp1->vspx->vsp1 = vsp1;
vspx_pipe = &vsp1->vspx->pipe;
vspx_pipe->enabled = false;
pipe = &vspx_pipe->pipe;
vsp1_pipeline_init(pipe);
pipe->partitions = 1;
pipe->part_table = &vspx_pipe->partition;
pipe->interlaced = false;
pipe->frame_end = vsp1_vspx_pipeline_frame_end;
mutex_init(&vspx_pipe->mutex);
spin_lock_init(&vspx_pipe->lock);
pipe->inputs[0] = vsp1->rpf[0];
pipe->inputs[0]->entity.pipe = pipe;
pipe->inputs[0]->entity.sink = &vsp1->iif->entity;
list_add_tail(&pipe->inputs[0]->entity.list_pipe, &pipe->entities);
pipe->iif = &vsp1->iif->entity;
pipe->iif->pipe = pipe;
pipe->iif->sink = &vsp1->wpf[0]->entity;
pipe->iif->sink_pad = RWPF_PAD_SINK;
list_add_tail(&pipe->iif->list_pipe, &pipe->entities);
pipe->output = vsp1->wpf[0];
pipe->output->entity.pipe = pipe;
list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities);
return 0;
}
void vsp1_vspx_cleanup(struct vsp1_device *vsp1)
{
struct vsp1_vspx_pipeline *vspx_pipe = &vsp1->vspx->pipe;
mutex_destroy(&vspx_pipe->mutex);
}