#ifdef CONFIG_ARM_ARCH_TIMER
#include <asm/arch_timer.h>
#endif
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/time64.h>
#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
#include <drm/gpu_scheduler.h>
#include <drm/panthor_drm.h>
#include "panthor_devfreq.h"
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_gpu.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
static int
panthor_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 kern_size, const void *in)
{
if (usr_size < min_size)
return -EINVAL;
if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_size, kern_size)))
return -EFAULT;
if (usr_size > kern_size &&
clear_user(u64_to_user_ptr(usr_ptr + kern_size), usr_size - kern_size)) {
return -EFAULT;
}
return 0;
}
static void *
panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride,
u32 obj_size)
{
int ret = 0;
void *out_alloc;
if (!in->count)
return NULL;
if (in->stride < min_stride)
return ERR_PTR(-EINVAL);
out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL);
if (!out_alloc)
return ERR_PTR(-ENOMEM);
if (obj_size == in->stride) {
if (copy_from_user(out_alloc, u64_to_user_ptr(in->array),
(unsigned long)obj_size * in->count))
ret = -EFAULT;
} else {
void __user *in_ptr = u64_to_user_ptr(in->array);
void *out_ptr = out_alloc;
for (u32 i = 0; i < in->count; i++) {
ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride);
if (ret)
break;
out_ptr += obj_size;
in_ptr += in->stride;
}
}
if (ret) {
kvfree(out_alloc);
return ERR_PTR(ret);
}
return out_alloc;
}
#define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
(offsetof(_typename, _last_mandatory_field) + \
sizeof(((_typename *)NULL)->_last_mandatory_field))
#define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
_typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
#define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
_Generic(_obj_name, \
PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \
PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \
PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs), \
PANTHOR_UOBJ_DECL(struct drm_panthor_bo_sync_op, size))
#define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
panthor_set_uobj(_dest_usr_ptr, _usr_size, \
PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
sizeof(_src_obj), &(_src_obj))
#define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
({ \
typeof(_dest_array) _tmp; \
_tmp = panthor_get_uobj_array(_uobj_array, \
PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
sizeof((_dest_array)[0])); \
if (!IS_ERR(_tmp)) \
_dest_array = _tmp; \
PTR_ERR_OR_ZERO(_tmp); \
})
struct panthor_sync_signal {
struct list_head node;
u32 handle;
u64 point;
struct drm_syncobj *syncobj;
struct dma_fence_chain *chain;
struct dma_fence *fence;
};
struct panthor_job_ctx {
struct drm_sched_job *job;
struct drm_panthor_sync_op *syncops;
u32 syncop_count;
};
struct panthor_submit_ctx {
struct drm_file *file;
struct list_head signals;
struct panthor_job_ctx *jobs;
u32 job_count;
struct drm_exec exec;
};
#define PANTHOR_SYNC_OP_FLAGS_MASK \
(DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK | DRM_PANTHOR_SYNC_OP_SIGNAL)
static bool sync_op_is_signal(const struct drm_panthor_sync_op *sync_op)
{
return !!(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
}
static bool sync_op_is_wait(const struct drm_panthor_sync_op *sync_op)
{
return !(sync_op->flags & DRM_PANTHOR_SYNC_OP_SIGNAL);
}
static int
panthor_check_sync_op(const struct drm_panthor_sync_op *sync_op)
{
u8 handle_type;
if (sync_op->flags & ~PANTHOR_SYNC_OP_FLAGS_MASK)
return -EINVAL;
handle_type = sync_op->flags & DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK;
if (handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
handle_type != DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ)
return -EINVAL;
if (handle_type == DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ &&
sync_op->timeline_value != 0)
return -EINVAL;
return 0;
}
static void
panthor_sync_signal_free(struct panthor_sync_signal *sig_sync)
{
if (!sig_sync)
return;
drm_syncobj_put(sig_sync->syncobj);
dma_fence_chain_free(sig_sync->chain);
dma_fence_put(sig_sync->fence);
kfree(sig_sync);
}
static int
panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
{
struct panthor_sync_signal *sig_sync;
struct dma_fence *cur_fence;
int ret;
sig_sync = kzalloc_obj(*sig_sync);
if (!sig_sync)
return -ENOMEM;
sig_sync->handle = handle;
sig_sync->point = point;
if (point > 0) {
sig_sync->chain = dma_fence_chain_alloc();
if (!sig_sync->chain) {
ret = -ENOMEM;
goto err_free_sig_sync;
}
}
sig_sync->syncobj = drm_syncobj_find(ctx->file, handle);
if (!sig_sync->syncobj) {
ret = -EINVAL;
goto err_free_sig_sync;
}
if (!drm_syncobj_find_fence(ctx->file, handle, point, 0, &cur_fence))
sig_sync->fence = cur_fence;
list_add_tail(&sig_sync->node, &ctx->signals);
return 0;
err_free_sig_sync:
panthor_sync_signal_free(sig_sync);
return ret;
}
static struct panthor_sync_signal *
panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
{
struct panthor_sync_signal *sig_sync;
list_for_each_entry(sig_sync, &ctx->signals, node) {
if (handle == sig_sync->handle && point == sig_sync->point)
return sig_sync;
}
return NULL;
}
static int
panthor_submit_ctx_add_job(struct panthor_submit_ctx *ctx, u32 idx,
struct drm_sched_job *job,
const struct drm_panthor_obj_array *syncs)
{
int ret;
ctx->jobs[idx].job = job;
ret = PANTHOR_UOBJ_GET_ARRAY(ctx->jobs[idx].syncops, syncs);
if (ret)
return ret;
ctx->jobs[idx].syncop_count = syncs->count;
return 0;
}
static int
panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx *ctx, u32 handle, u64 point)
{
struct panthor_sync_signal *sig_sync;
sig_sync = panthor_submit_ctx_search_sync_signal(ctx, handle, point);
if (sig_sync)
return 0;
return panthor_submit_ctx_add_sync_signal(ctx, handle, point);
}
static int
panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx *ctx,
u32 job_idx)
{
struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
struct panthor_device,
base);
struct dma_fence *done_fence = &ctx->jobs[job_idx].job->s_fence->finished;
const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
for (u32 i = 0; i < sync_op_count; i++) {
struct dma_fence *old_fence;
struct panthor_sync_signal *sig_sync;
if (!sync_op_is_signal(&sync_ops[i]))
continue;
sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
sync_ops[i].timeline_value);
if (drm_WARN_ON(&ptdev->base, !sig_sync))
return -EINVAL;
old_fence = sig_sync->fence;
sig_sync->fence = dma_fence_get(done_fence);
dma_fence_put(old_fence);
if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
return -EINVAL;
}
return 0;
}
static int
panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx *ctx,
u32 job_idx)
{
const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
for (u32 i = 0; i < sync_op_count; i++) {
int ret;
if (!sync_op_is_signal(&sync_ops[i]))
continue;
ret = panthor_check_sync_op(&sync_ops[i]);
if (ret)
return ret;
ret = panthor_submit_ctx_get_sync_signal(ctx,
sync_ops[i].handle,
sync_ops[i].timeline_value);
if (ret)
return ret;
}
return 0;
}
static void
panthor_submit_ctx_push_fences(struct panthor_submit_ctx *ctx)
{
struct panthor_sync_signal *sig_sync;
list_for_each_entry(sig_sync, &ctx->signals, node) {
if (sig_sync->chain) {
drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
sig_sync->fence, sig_sync->point);
sig_sync->chain = NULL;
} else {
drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
}
}
}
static int
panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx *ctx,
u32 job_idx)
{
struct panthor_device *ptdev = container_of(ctx->file->minor->dev,
struct panthor_device,
base);
const struct drm_panthor_sync_op *sync_ops = ctx->jobs[job_idx].syncops;
struct drm_sched_job *job = ctx->jobs[job_idx].job;
u32 sync_op_count = ctx->jobs[job_idx].syncop_count;
int ret = 0;
for (u32 i = 0; i < sync_op_count; i++) {
struct panthor_sync_signal *sig_sync;
struct dma_fence *fence;
if (!sync_op_is_wait(&sync_ops[i]))
continue;
ret = panthor_check_sync_op(&sync_ops[i]);
if (ret)
return ret;
sig_sync = panthor_submit_ctx_search_sync_signal(ctx, sync_ops[i].handle,
sync_ops[i].timeline_value);
if (sig_sync) {
if (drm_WARN_ON(&ptdev->base, !sig_sync->fence))
return -EINVAL;
fence = dma_fence_get(sig_sync->fence);
} else {
ret = drm_syncobj_find_fence(ctx->file, sync_ops[i].handle,
sync_ops[i].timeline_value,
0, &fence);
if (ret)
return ret;
}
ret = drm_sched_job_add_dependency(job, fence);
if (ret)
return ret;
}
return 0;
}
static int
panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx *ctx)
{
for (u32 i = 0; i < ctx->job_count; i++) {
int ret;
ret = panthor_submit_ctx_collect_job_signal_ops(ctx, i);
if (ret)
return ret;
}
return 0;
}
static int
panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx *ctx)
{
for (u32 i = 0; i < ctx->job_count; i++) {
int ret;
ret = panthor_submit_ctx_add_sync_deps_to_job(ctx, i);
if (ret)
return ret;
drm_sched_job_arm(ctx->jobs[i].job);
ret = panthor_submit_ctx_update_job_sync_signal_fences(ctx, i);
if (ret)
return ret;
}
return 0;
}
static void
panthor_submit_ctx_push_jobs(struct panthor_submit_ctx *ctx,
void (*upd_resvs)(struct drm_exec *, struct drm_sched_job *))
{
for (u32 i = 0; i < ctx->job_count; i++) {
upd_resvs(&ctx->exec, ctx->jobs[i].job);
drm_sched_entity_push_job(ctx->jobs[i].job);
ctx->jobs[i].job = NULL;
}
panthor_submit_ctx_push_fences(ctx);
}
static int panthor_submit_ctx_init(struct panthor_submit_ctx *ctx,
struct drm_file *file, u32 job_count)
{
ctx->jobs = kvmalloc_objs(*ctx->jobs, job_count,
GFP_KERNEL | __GFP_ZERO);
if (!ctx->jobs)
return -ENOMEM;
ctx->file = file;
ctx->job_count = job_count;
INIT_LIST_HEAD(&ctx->signals);
drm_exec_init(&ctx->exec,
DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES,
0);
return 0;
}
static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx,
void (*job_put)(struct drm_sched_job *))
{
struct panthor_sync_signal *sig_sync, *tmp;
unsigned long i;
drm_exec_fini(&ctx->exec);
list_for_each_entry_safe(sig_sync, tmp, &ctx->signals, node)
panthor_sync_signal_free(sig_sync);
for (i = 0; i < ctx->job_count; i++) {
job_put(ctx->jobs[i].job);
kvfree(ctx->jobs[i].syncops);
}
kvfree(ctx->jobs);
}
static int panthor_query_timestamp_info(struct panthor_device *ptdev,
struct drm_panthor_timestamp_info *arg)
{
int ret;
ret = panthor_device_resume_and_get(ptdev);
if (ret)
return ret;
#ifdef CONFIG_ARM_ARCH_TIMER
arg->timestamp_frequency = arch_timer_get_cntfrq();
#else
arg->timestamp_frequency = 0;
#endif
arg->current_timestamp = gpu_read64_counter(ptdev, GPU_TIMESTAMP);
arg->timestamp_offset = gpu_read64(ptdev, GPU_TIMESTAMP_OFFSET);
pm_runtime_put(ptdev->base.dev);
return 0;
}
static int group_priority_permit(struct drm_file *file,
u8 priority)
{
if (priority > PANTHOR_GROUP_PRIORITY_REALTIME)
return -EINVAL;
if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
return 0;
if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
return 0;
return -EACCES;
}
static void panthor_query_group_priorities_info(struct drm_file *file,
struct drm_panthor_group_priorities_info *arg)
{
int prio;
memset(arg, 0, sizeof(*arg));
for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
if (!group_priority_permit(file, prio))
arg->allowed_mask |= BIT(prio);
}
}
static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
struct drm_panthor_dev_query *args = data;
struct drm_panthor_timestamp_info timestamp_info;
struct drm_panthor_group_priorities_info priorities_info;
int ret;
if (!args->pointer) {
switch (args->type) {
case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
args->size = sizeof(ptdev->gpu_info);
return 0;
case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
args->size = sizeof(ptdev->csif_info);
return 0;
case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
args->size = sizeof(timestamp_info);
return 0;
case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
args->size = sizeof(priorities_info);
return 0;
default:
return -EINVAL;
}
}
switch (args->type) {
case DRM_PANTHOR_DEV_QUERY_GPU_INFO:
return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->gpu_info);
case DRM_PANTHOR_DEV_QUERY_CSIF_INFO:
return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info);
case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO:
ret = panthor_query_timestamp_info(ptdev, ×tamp_info);
if (ret)
return ret;
return PANTHOR_UOBJ_SET(args->pointer, args->size, timestamp_info);
case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO:
panthor_query_group_priorities_info(file, &priorities_info);
return PANTHOR_UOBJ_SET(args->pointer, args->size, priorities_info);
default:
return -EINVAL;
}
}
#define PANTHOR_VM_CREATE_FLAGS 0
static int panthor_ioctl_vm_create(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_vm_create *args = data;
int cookie, ret;
if (!drm_dev_enter(ddev, &cookie))
return -ENODEV;
ret = panthor_vm_pool_create_vm(ptdev, pfile->vms, args);
if (ret >= 0) {
args->id = ret;
ret = 0;
}
drm_dev_exit(cookie);
return ret;
}
static int panthor_ioctl_vm_destroy(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_vm_destroy *args = data;
if (args->pad)
return -EINVAL;
return panthor_vm_pool_destroy_vm(pfile->vms, args->id);
}
#define PANTHOR_BO_FLAGS (DRM_PANTHOR_BO_NO_MMAP | \
DRM_PANTHOR_BO_WB_MMAP)
static int panthor_ioctl_bo_create(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_bo_create *args = data;
struct panthor_vm *vm = NULL;
int cookie, ret;
if (!drm_dev_enter(ddev, &cookie))
return -ENODEV;
if (!args->size || args->pad ||
(args->flags & ~PANTHOR_BO_FLAGS)) {
ret = -EINVAL;
goto out_dev_exit;
}
if ((args->flags & DRM_PANTHOR_BO_NO_MMAP) &&
(args->flags & DRM_PANTHOR_BO_WB_MMAP)) {
ret = -EINVAL;
goto out_dev_exit;
}
if (args->exclusive_vm_id) {
vm = panthor_vm_pool_get_vm(pfile->vms, args->exclusive_vm_id);
if (!vm) {
ret = -EINVAL;
goto out_dev_exit;
}
}
ret = panthor_gem_create_with_handle(file, ddev, vm, &args->size,
args->flags, &args->handle);
panthor_vm_put(vm);
out_dev_exit:
drm_dev_exit(cookie);
return ret;
}
static int panthor_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_mmap_offset *args = data;
struct panthor_gem_object *bo;
struct drm_gem_object *obj;
int ret;
if (args->pad)
return -EINVAL;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
bo = to_panthor_bo(obj);
if (bo->flags & DRM_PANTHOR_BO_NO_MMAP) {
ret = -EPERM;
goto out;
}
ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
args->offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
drm_gem_object_put(obj);
return ret;
}
static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_group_submit *args = data;
struct drm_panthor_queue_submit *jobs_args;
struct panthor_submit_ctx ctx;
int ret = 0, cookie;
if (args->pad)
return -EINVAL;
if (!drm_dev_enter(ddev, &cookie))
return -ENODEV;
ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->queue_submits);
if (ret)
goto out_dev_exit;
ret = panthor_submit_ctx_init(&ctx, file, args->queue_submits.count);
if (ret)
goto out_free_jobs_args;
for (u32 i = 0; i < args->queue_submits.count; i++) {
const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
struct drm_sched_job *job;
job = panthor_job_create(pfile, args->group_handle, qsubmit,
file->client_id);
if (IS_ERR(job)) {
ret = PTR_ERR(job);
goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_add_job(&ctx, i, job, &qsubmit->syncs);
if (ret)
goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
if (ret)
goto out_cleanup_submit_ctx;
if (args->queue_submits.count > 0) {
struct panthor_vm *vm = panthor_job_vm(ctx.jobs[0].job);
drm_exec_until_all_locked(&ctx.exec) {
ret = panthor_vm_prepare_mapped_bos_resvs(&ctx.exec, vm,
args->queue_submits.count);
}
if (ret)
goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
if (ret)
goto out_cleanup_submit_ctx;
panthor_submit_ctx_push_jobs(&ctx, panthor_job_update_resvs);
out_cleanup_submit_ctx:
panthor_submit_ctx_cleanup(&ctx, panthor_job_put);
out_free_jobs_args:
kvfree(jobs_args);
out_dev_exit:
drm_dev_exit(cookie);
return ret;
}
static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_group_destroy *args = data;
if (args->pad)
return -EINVAL;
return panthor_group_destroy(pfile, args->group_handle);
}
static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_group_create *args = data;
struct drm_panthor_queue_create *queue_args;
int ret;
if (!args->queues.count || args->queues.count > MAX_CS_PER_CSG)
return -EINVAL;
ret = PANTHOR_UOBJ_GET_ARRAY(queue_args, &args->queues);
if (ret)
return ret;
ret = group_priority_permit(file, args->priority);
if (ret)
goto out;
ret = panthor_group_create(pfile, args, queue_args, file->client_id);
if (ret < 0)
goto out;
args->group_handle = ret;
ret = 0;
out:
kvfree(queue_args);
return ret;
}
static int panthor_ioctl_group_get_state(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_group_get_state *args = data;
return panthor_group_get_state(pfile, args);
}
static int panthor_ioctl_tiler_heap_create(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_tiler_heap_create *args = data;
struct panthor_heap_pool *pool;
struct panthor_vm *vm;
int ret;
vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
if (!vm)
return -EINVAL;
pool = panthor_vm_get_heap_pool(vm, true);
if (IS_ERR(pool)) {
ret = PTR_ERR(pool);
goto out_put_vm;
}
ret = panthor_heap_create(pool,
args->initial_chunk_count,
args->chunk_size,
args->max_chunks,
args->target_in_flight,
&args->tiler_heap_ctx_gpu_va,
&args->first_heap_chunk_gpu_va);
if (ret < 0)
goto out_put_heap_pool;
args->handle = (args->vm_id << 16) | ret;
ret = 0;
out_put_heap_pool:
panthor_heap_pool_put(pool);
out_put_vm:
panthor_vm_put(vm);
return ret;
}
static int panthor_ioctl_tiler_heap_destroy(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_tiler_heap_destroy *args = data;
struct panthor_heap_pool *pool;
struct panthor_vm *vm;
int ret;
if (args->pad)
return -EINVAL;
vm = panthor_vm_pool_get_vm(pfile->vms, args->handle >> 16);
if (!vm)
return -EINVAL;
pool = panthor_vm_get_heap_pool(vm, false);
if (IS_ERR(pool)) {
ret = PTR_ERR(pool);
goto out_put_vm;
}
ret = panthor_heap_destroy(pool, args->handle & GENMASK(15, 0));
panthor_heap_pool_put(pool);
out_put_vm:
panthor_vm_put(vm);
return ret;
}
static int panthor_ioctl_vm_bind_async(struct drm_device *ddev,
struct drm_panthor_vm_bind *args,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_vm_bind_op *jobs_args;
struct panthor_submit_ctx ctx;
struct panthor_vm *vm;
int ret = 0;
vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
if (!vm)
return -EINVAL;
ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
if (ret)
goto out_put_vm;
ret = panthor_submit_ctx_init(&ctx, file, args->ops.count);
if (ret)
goto out_free_jobs_args;
for (u32 i = 0; i < args->ops.count; i++) {
struct drm_panthor_vm_bind_op *op = &jobs_args[i];
struct drm_sched_job *job;
job = panthor_vm_bind_job_create(file, vm, op);
if (IS_ERR(job)) {
ret = PTR_ERR(job);
goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_add_job(&ctx, i, job, &op->syncs);
if (ret)
goto out_cleanup_submit_ctx;
}
ret = panthor_submit_ctx_collect_jobs_signal_ops(&ctx);
if (ret)
goto out_cleanup_submit_ctx;
drm_exec_until_all_locked(&ctx.exec) {
for (u32 i = 0; i < ctx.job_count; i++) {
ret = panthor_vm_bind_job_prepare_resvs(&ctx.exec, ctx.jobs[i].job);
drm_exec_retry_on_contention(&ctx.exec);
if (ret)
goto out_cleanup_submit_ctx;
}
}
ret = panthor_submit_ctx_add_deps_and_arm_jobs(&ctx);
if (ret)
goto out_cleanup_submit_ctx;
panthor_submit_ctx_push_jobs(&ctx, panthor_vm_bind_job_update_resvs);
out_cleanup_submit_ctx:
panthor_submit_ctx_cleanup(&ctx, panthor_vm_bind_job_put);
out_free_jobs_args:
kvfree(jobs_args);
out_put_vm:
panthor_vm_put(vm);
return ret;
}
static int panthor_ioctl_vm_bind_sync(struct drm_device *ddev,
struct drm_panthor_vm_bind *args,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_vm_bind_op *jobs_args;
struct panthor_vm *vm;
int ret;
vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
if (!vm)
return -EINVAL;
ret = PANTHOR_UOBJ_GET_ARRAY(jobs_args, &args->ops);
if (ret)
goto out_put_vm;
for (u32 i = 0; i < args->ops.count; i++) {
ret = panthor_vm_bind_exec_sync_op(file, vm, &jobs_args[i]);
if (ret) {
args->ops.count = i;
break;
}
}
kvfree(jobs_args);
out_put_vm:
panthor_vm_put(vm);
return ret;
}
#define PANTHOR_VM_BIND_FLAGS DRM_PANTHOR_VM_BIND_ASYNC
static int panthor_ioctl_vm_bind(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_vm_bind *args = data;
int cookie, ret;
if (!drm_dev_enter(ddev, &cookie))
return -ENODEV;
if (args->flags & DRM_PANTHOR_VM_BIND_ASYNC)
ret = panthor_ioctl_vm_bind_async(ddev, args, file);
else
ret = panthor_ioctl_vm_bind_sync(ddev, args, file);
drm_dev_exit(cookie);
return ret;
}
static int panthor_ioctl_vm_get_state(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
struct drm_panthor_vm_get_state *args = data;
struct panthor_vm *vm;
vm = panthor_vm_pool_get_vm(pfile->vms, args->vm_id);
if (!vm)
return -EINVAL;
if (panthor_vm_is_unusable(vm))
args->state = DRM_PANTHOR_VM_STATE_UNUSABLE;
else
args->state = DRM_PANTHOR_VM_STATE_USABLE;
panthor_vm_put(vm);
return 0;
}
static int panthor_ioctl_bo_set_label(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_set_label *args = data;
struct drm_gem_object *obj;
const char *label = NULL;
int ret = 0;
if (args->pad)
return -EINVAL;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
if (args->label) {
label = strndup_user((const char __user *)(uintptr_t)args->label,
PANTHOR_BO_LABEL_MAXLEN);
if (IS_ERR(label)) {
ret = PTR_ERR(label);
if (ret == -EINVAL)
ret = -E2BIG;
goto err_put_obj;
}
}
panthor_gem_bo_set_label(obj, label);
err_put_obj:
drm_gem_object_put(obj);
return ret;
}
static int panthor_ioctl_set_user_mmio_offset(struct drm_device *ddev,
void *data, struct drm_file *file)
{
struct drm_panthor_set_user_mmio_offset *args = data;
struct panthor_file *pfile = file->driver_priv;
if (args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_32BIT &&
args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
return -EINVAL;
WRITE_ONCE(pfile->user_mmio.offset, args->offset);
return 0;
}
static int panthor_ioctl_bo_sync(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_sync *args = data;
struct drm_panthor_bo_sync_op *ops;
struct drm_gem_object *obj;
int ret;
if (!args->ops.count)
return 0;
ret = PANTHOR_UOBJ_GET_ARRAY(ops, &args->ops);
if (ret)
return ret;
for (u32 i = 0; i < args->ops.count; i++) {
obj = drm_gem_object_lookup(file, ops[i].handle);
if (!obj) {
ret = -ENOENT;
goto err_ops;
}
ret = panthor_gem_sync(obj, ops[i].type, ops[i].offset,
ops[i].size);
drm_gem_object_put(obj);
if (ret)
goto err_ops;
}
err_ops:
kvfree(ops);
return ret;
}
static int panthor_ioctl_bo_query_info(struct drm_device *ddev, void *data,
struct drm_file *file)
{
struct drm_panthor_bo_query_info *args = data;
struct panthor_gem_object *bo;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
bo = to_panthor_bo(obj);
args->pad = 0;
args->create_flags = bo->flags;
args->extra_flags = 0;
if (drm_gem_is_imported(&bo->base.base))
args->extra_flags |= DRM_PANTHOR_BO_IS_IMPORTED;
drm_gem_object_put(obj);
return 0;
}
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
struct panthor_file *pfile;
int ret;
pfile = kzalloc_obj(*pfile);
if (!pfile)
return -ENOMEM;
pfile->ptdev = ptdev;
pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
#ifdef CONFIG_ARM64
if (test_tsk_thread_flag(current, TIF_32BIT))
pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
#endif
ret = panthor_vm_pool_create(pfile);
if (ret)
goto err_free_file;
ret = panthor_group_pool_create(pfile);
if (ret)
goto err_destroy_vm_pool;
file->driver_priv = pfile;
return 0;
err_destroy_vm_pool:
panthor_vm_pool_destroy(pfile);
err_free_file:
kfree(pfile);
return ret;
}
static void
panthor_postclose(struct drm_device *ddev, struct drm_file *file)
{
struct panthor_file *pfile = file->driver_priv;
panthor_group_pool_destroy(pfile);
panthor_vm_pool_destroy(pfile);
kfree(pfile);
}
static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
#define PANTHOR_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(PANTHOR_##n, panthor_ioctl_##func, flags)
PANTHOR_IOCTL(DEV_QUERY, dev_query, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(VM_CREATE, vm_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(VM_DESTROY, vm_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(VM_BIND, vm_bind, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(VM_GET_STATE, vm_get_state, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_CREATE, bo_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_CREATE, group_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_DESTROY, group_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_GET_STATE, group_get_state, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(TILER_HEAP_CREATE, tiler_heap_create, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_SYNC, bo_sync, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_QUERY_INFO, bo_query_info, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file = filp->private_data;
struct panthor_file *pfile = file->driver_priv;
struct panthor_device *ptdev = pfile->ptdev;
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
u64 user_mmio_offset;
int ret, cookie;
if (!drm_dev_enter(file->minor->dev, &cookie))
return -ENODEV;
user_mmio_offset = READ_ONCE(pfile->user_mmio.offset);
if (offset >= user_mmio_offset) {
offset -= user_mmio_offset;
offset += DRM_PANTHOR_USER_MMIO_OFFSET;
vma->vm_pgoff = offset >> PAGE_SHIFT;
ret = panthor_device_mmap_io(ptdev, vma);
} else {
ret = drm_gem_mmap(filp, vma);
}
drm_dev_exit(cookie);
return ret;
}
static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
struct panthor_file *pfile,
struct drm_printer *p)
{
if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_ALL)
panthor_fdinfo_gather_group_samples(pfile);
if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_TIMESTAMP) {
#ifdef CONFIG_ARM_ARCH_TIMER
drm_printf(p, "drm-engine-panthor:\t%llu ns\n",
DIV_ROUND_UP_ULL((pfile->stats.time * NSEC_PER_SEC),
arch_timer_get_cntfrq()));
#endif
}
if (ptdev->profile_mask & PANTHOR_DEVICE_PROFILING_CYCLES)
drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles);
drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate);
drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n",
panthor_devfreq_get_freq(ptdev));
}
static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
{
char *drv_name = file->minor->dev->driver->name;
struct panthor_file *pfile = file->driver_priv;
struct drm_memory_stats stats = {0};
panthor_fdinfo_gather_group_mem_info(pfile, &stats);
panthor_vm_heaps_sizes(pfile, &stats);
drm_fdinfo_print_size(p, drv_name, "resident", "memory", stats.resident);
drm_fdinfo_print_size(p, drv_name, "active", "memory", stats.active);
}
static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
struct drm_device *dev = file->minor->dev;
struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
panthor_gpu_show_fdinfo(ptdev, file->driver_priv, p);
panthor_show_internal_memory_stats(p, file);
drm_show_memory_stats(p, file);
}
static const struct file_operations panthor_drm_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
.mmap = panthor_mmap,
.get_unmapped_area = drm_gem_get_unmapped_area,
.show_fdinfo = drm_show_fdinfo,
.fop_flags = FOP_UNSIGNED_OFFSET,
};
#ifdef CONFIG_DEBUG_FS
static int panthor_gems_show(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct panthor_device *ptdev = container_of(dev, struct panthor_device, base);
panthor_gem_debugfs_print_bos(ptdev, m);
return 0;
}
static struct drm_info_list panthor_debugfs_list[] = {
{"gems", panthor_gems_show, 0, NULL},
};
static int panthor_gems_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(panthor_debugfs_list,
ARRAY_SIZE(panthor_debugfs_list),
minor->debugfs_root, minor);
return 0;
}
static void panthor_debugfs_init(struct drm_minor *minor)
{
panthor_mmu_debugfs_init(minor);
panthor_gems_debugfs_init(minor);
}
#endif
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
.open = panthor_open,
.postclose = panthor_postclose,
.show_fdinfo = panthor_show_fdinfo,
.ioctls = panthor_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(panthor_drm_driver_ioctls),
.fops = &panthor_drm_driver_fops,
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
.minor = 7,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
.gem_prime_import = panthor_gem_prime_import,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = panthor_debugfs_init,
#endif
};
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool panthor_transparent_hugepage = true;
module_param_named(transparent_hugepage, panthor_transparent_hugepage, bool, 0400);
MODULE_PARM_DESC(transparent_hugepage, "Use a dedicated tmpfs mount point with Transparent Hugepage enabled (true = default)");
#endif
static int panthor_probe(struct platform_device *pdev)
{
struct panthor_device *ptdev;
ptdev = devm_drm_dev_alloc(&pdev->dev, &panthor_drm_driver,
struct panthor_device, base);
if (IS_ERR(ptdev))
return -ENOMEM;
platform_set_drvdata(pdev, ptdev);
return panthor_device_init(ptdev);
}
static void panthor_remove(struct platform_device *pdev)
{
struct panthor_device *ptdev = platform_get_drvdata(pdev);
panthor_device_unplug(ptdev);
}
static ssize_t profiling_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", ptdev->profile_mask);
}
static ssize_t profiling_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct panthor_device *ptdev = dev_get_drvdata(dev);
u32 value;
int err;
err = kstrtou32(buf, 0, &value);
if (err)
return err;
if ((value & ~PANTHOR_DEVICE_PROFILING_ALL) != 0)
return -EINVAL;
ptdev->profile_mask = value;
return len;
}
static DEVICE_ATTR_RW(profiling);
static struct attribute *panthor_attrs[] = {
&dev_attr_profiling.attr,
NULL,
};
ATTRIBUTE_GROUPS(panthor);
static const struct panthor_soc_data soc_data_mediatek_mt8196 = {
.asn_hash_enable = true,
.asn_hash = { 0xb, 0xe, 0x0, },
};
static const struct of_device_id dt_match[] = {
{ .compatible = "mediatek,mt8196-mali", .data = &soc_data_mediatek_mt8196, },
{ .compatible = "rockchip,rk3588-mali" },
{ .compatible = "arm,mali-valhall-csf" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops,
panthor_device_suspend,
panthor_device_resume,
NULL);
static struct platform_driver panthor_driver = {
.probe = panthor_probe,
.remove = panthor_remove,
.driver = {
.name = "panthor",
.pm = pm_ptr(&panthor_pm_ops),
.of_match_table = dt_match,
.dev_groups = panthor_groups,
},
};
struct workqueue_struct *panthor_cleanup_wq;
static int __init panthor_init(void)
{
int ret;
ret = panthor_mmu_pt_cache_init();
if (ret)
return ret;
panthor_cleanup_wq = alloc_workqueue("panthor-cleanup", WQ_UNBOUND, 0);
if (!panthor_cleanup_wq) {
pr_err("panthor: Failed to allocate the workqueues");
ret = -ENOMEM;
goto err_mmu_pt_cache_fini;
}
ret = platform_driver_register(&panthor_driver);
if (ret)
goto err_destroy_cleanup_wq;
return 0;
err_destroy_cleanup_wq:
destroy_workqueue(panthor_cleanup_wq);
err_mmu_pt_cache_fini:
panthor_mmu_pt_cache_fini();
return ret;
}
module_init(panthor_init);
static void __exit panthor_exit(void)
{
platform_driver_unregister(&panthor_driver);
destroy_workqueue(panthor_cleanup_wq);
panthor_mmu_pt_cache_fini();
}
module_exit(panthor_exit);
MODULE_AUTHOR("Panthor Project Developers");
MODULE_DESCRIPTION("Panthor DRM Driver");
MODULE_LICENSE("Dual MIT/GPL");