root/drivers/accel/amdxdna/amdxdna_ctx.c
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
 */

#include <drm/amdxdna_accel.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <linux/xarray.h>
#include <trace/events/amdxdna.h>

#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
#include "amdxdna_pm.h"

#define MAX_HWCTX_ID            255
#define MAX_ARG_COUNT           4095

struct amdxdna_fence {
        struct dma_fence        base;
        spinlock_t              lock; /* for base */
        struct amdxdna_hwctx    *hwctx;
};

static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
{
        return KBUILD_MODNAME;
}

static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
{
        struct amdxdna_fence *xdna_fence;

        xdna_fence = container_of(fence, struct amdxdna_fence, base);

        return xdna_fence->hwctx->name;
}

static const struct dma_fence_ops fence_ops = {
        .get_driver_name = amdxdna_fence_get_driver_name,
        .get_timeline_name = amdxdna_fence_get_timeline_name,
};

static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
{
        struct amdxdna_fence *fence;

        fence = kzalloc_obj(*fence);
        if (!fence)
                return NULL;

        fence->hwctx = hwctx;
        spin_lock_init(&fence->lock);
        dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
        return &fence->base;
}

static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
                                      struct srcu_struct *ss)
{
        struct amdxdna_dev *xdna = hwctx->client->xdna;

        synchronize_srcu(ss);

        /* At this point, user is not able to submit new commands */
        xdna->dev_info->ops->hwctx_fini(hwctx);

        kfree(hwctx->name);
        kfree(hwctx);
}

int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
                       int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
{
        struct amdxdna_hwctx *hwctx;
        unsigned long hwctx_id;
        int ret = 0, idx;

        idx = srcu_read_lock(&client->hwctx_srcu);
        amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
                ret = walk(hwctx, arg);
                if (ret)
                        break;
        }
        srcu_read_unlock(&client->hwctx_srcu, idx);

        return ret;
}

void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
{
        struct amdxdna_cmd *cmd = abo->mem.kva;
        u32 num_masks, count;

        if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
                num_masks = 0;
        else
                num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);

        if (size) {
                count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
                if (unlikely(count <= num_masks ||
                             count * sizeof(u32) +
                             offsetof(struct amdxdna_cmd, data[0]) >
                             abo->mem.size)) {
                        *size = 0;
                        return NULL;
                }
                *size = (count - num_masks) * sizeof(u32);
        }
        return &cmd->data[num_masks];
}

u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
{
        struct amdxdna_cmd *cmd = abo->mem.kva;
        u32 num_masks, i;
        u32 *cu_mask;

        if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
                return INVALID_CU_IDX;

        num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
        cu_mask = cmd->data;
        for (i = 0; i < num_masks; i++) {
                if (cu_mask[i])
                        return ffs(cu_mask[i]) - 1;
        }

        return INVALID_CU_IDX;
}

int amdxdna_cmd_set_error(struct amdxdna_gem_obj *abo,
                          struct amdxdna_sched_job *job, u32 cmd_idx,
                          enum ert_cmd_state error_state)
{
        struct amdxdna_client *client = job->hwctx->client;
        struct amdxdna_cmd *cmd = abo->mem.kva;
        struct amdxdna_cmd_chain *cc = NULL;

        cmd->header &= ~AMDXDNA_CMD_STATE;
        cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, error_state);

        if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN) {
                cc = amdxdna_cmd_get_payload(abo, NULL);
                cc->error_index = (cmd_idx < cc->command_count) ? cmd_idx : 0;
                abo = amdxdna_gem_get_obj(client, cc->data[0], AMDXDNA_BO_CMD);
                if (!abo)
                        return -EINVAL;
                cmd = abo->mem.kva;
        }

        memset(cmd->data, 0xff, abo->mem.size - sizeof(*cmd));
        if (cc)
                amdxdna_gem_put_obj(abo);

        return 0;
}

/*
 * This should be called in close() and remove(). DO NOT call in other syscalls.
 * This guarantee that when hwctx and resources will be released, if user
 * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
 */
void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
{
        struct amdxdna_hwctx *hwctx;
        unsigned long hwctx_id;

        amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
                XDNA_DBG(client->xdna, "PID %d close HW context %d",
                         client->pid, hwctx->id);
                xa_erase(&client->hwctx_xa, hwctx->id);
                amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
        }
}

int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
        struct amdxdna_client *client = filp->driver_priv;
        struct amdxdna_drm_create_hwctx *args = data;
        struct amdxdna_dev *xdna = to_xdna_dev(dev);
        struct amdxdna_hwctx *hwctx;
        int ret, idx;

        if (args->ext || args->ext_flags)
                return -EINVAL;

        hwctx = kzalloc_obj(*hwctx);
        if (!hwctx)
                return -ENOMEM;

        if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
                XDNA_ERR(xdna, "Access QoS info failed");
                kfree(hwctx);
                return -EFAULT;
        }

        hwctx->client = client;
        hwctx->fw_ctx_id = -1;
        hwctx->num_tiles = args->num_tiles;
        hwctx->mem_size = args->mem_size;
        hwctx->max_opc = args->max_opc;

        guard(mutex)(&xdna->dev_lock);

        if (!drm_dev_enter(dev, &idx)) {
                ret = -ENODEV;
                goto free_hwctx;
        }

        ret = xdna->dev_info->ops->hwctx_init(hwctx);
        if (ret) {
                XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
                goto dev_exit;
        }

        hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
        if (!hwctx->name) {
                ret = -ENOMEM;
                goto fini_hwctx;
        }

        ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
                              XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
                              &client->next_hwctxid, GFP_KERNEL);
        if (ret < 0) {
                XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
                goto free_name;
        }

        args->handle = hwctx->id;
        args->syncobj_handle = hwctx->syncobj_hdl;

        atomic64_set(&hwctx->job_submit_cnt, 0);
        atomic64_set(&hwctx->job_free_cnt, 0);
        XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
        drm_dev_exit(idx);
        return 0;

free_name:
        kfree(hwctx->name);
fini_hwctx:
        xdna->dev_info->ops->hwctx_fini(hwctx);
dev_exit:
        drm_dev_exit(idx);
free_hwctx:
        kfree(hwctx);
        return ret;
}

int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
        struct amdxdna_client *client = filp->driver_priv;
        struct amdxdna_drm_destroy_hwctx *args = data;
        struct amdxdna_dev *xdna = to_xdna_dev(dev);
        struct amdxdna_hwctx *hwctx;
        int ret = 0, idx;

        if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
                return -EINVAL;

        if (!drm_dev_enter(dev, &idx))
                return -ENODEV;

        mutex_lock(&xdna->dev_lock);
        hwctx = xa_erase(&client->hwctx_xa, args->handle);
        if (!hwctx) {
                ret = -EINVAL;
                XDNA_DBG(xdna, "PID %d HW context %d not exist",
                         client->pid, args->handle);
                goto out;
        }

        /*
         * The pushed jobs are handled by DRM scheduler during destroy.
         * SRCU to synchronize with exec command ioctls.
         */
        amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);

        XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
out:
        mutex_unlock(&xdna->dev_lock);
        drm_dev_exit(idx);
        return ret;
}

int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
        struct amdxdna_client *client = filp->driver_priv;
        struct amdxdna_drm_config_hwctx *args = data;
        struct amdxdna_dev *xdna = to_xdna_dev(dev);
        struct amdxdna_hwctx *hwctx;
        u32 buf_size;
        void *buf;
        int ret;
        u64 val;

        if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
                return -EINVAL;

        if (!xdna->dev_info->ops->hwctx_config)
                return -EOPNOTSUPP;

        val = args->param_val;
        buf_size = args->param_val_size;

        switch (args->param_type) {
        case DRM_AMDXDNA_HWCTX_CONFIG_CU:
                /* For those types that param_val is pointer */
                if (buf_size > PAGE_SIZE) {
                        XDNA_ERR(xdna, "Config CU param buffer too large");
                        return -E2BIG;
                }

                /* Hwctx needs to keep buf */
                buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
                if (!buf)
                        return -ENOMEM;

                if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
                        kfree(buf);
                        return -EFAULT;
                }

                break;
        case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
        case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
                /* For those types that param_val is a value */
                buf = NULL;
                buf_size = 0;
                break;
        default:
                XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
                return -EINVAL;
        }

        guard(mutex)(&xdna->dev_lock);
        hwctx = xa_load(&client->hwctx_xa, args->handle);
        if (!hwctx) {
                XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
                ret = -EINVAL;
                goto free_buf;
        }

        ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);

free_buf:
        kfree(buf);
        return ret;
}

int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
{
        struct amdxdna_dev *xdna = client->xdna;
        struct amdxdna_hwctx *hwctx;
        struct amdxdna_gem_obj *abo;
        struct drm_gem_object *gobj;
        int ret;

        if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
                return -EOPNOTSUPP;

        gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
        if (!gobj)
                return -EINVAL;

        abo = to_xdna_obj(gobj);
        guard(mutex)(&xdna->dev_lock);
        hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
        if (!hwctx) {
                ret = -EINVAL;
                goto put_obj;
        }

        ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);

put_obj:
        drm_gem_object_put(gobj);
        return ret;
}

static void
amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
{
        int i;

        for (i = 0; i < job->bo_cnt; i++) {
                if (!job->bos[i])
                        break;
                drm_gem_object_put(job->bos[i]);
        }
}

static int
amdxdna_arg_bos_lookup(struct amdxdna_client *client,
                       struct amdxdna_sched_job *job,
                       u32 *bo_hdls, u32 bo_cnt)
{
        struct drm_gem_object *gobj;
        int i, ret;

        job->bo_cnt = bo_cnt;
        for (i = 0; i < job->bo_cnt; i++) {
                struct amdxdna_gem_obj *abo;

                gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
                if (!gobj) {
                        ret = -ENOENT;
                        goto put_shmem_bo;
                }
                abo = to_xdna_obj(gobj);

                mutex_lock(&abo->lock);
                if (abo->pinned) {
                        mutex_unlock(&abo->lock);
                        job->bos[i] = gobj;
                        continue;
                }

                ret = amdxdna_gem_pin_nolock(abo);
                if (ret) {
                        mutex_unlock(&abo->lock);
                        drm_gem_object_put(gobj);
                        goto put_shmem_bo;
                }
                abo->pinned = true;
                mutex_unlock(&abo->lock);

                job->bos[i] = gobj;
        }

        return 0;

put_shmem_bo:
        amdxdna_arg_bos_put(job);
        return ret;
}

void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
{
        trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
        amdxdna_pm_suspend_put(job->hwctx->client->xdna);
        amdxdna_arg_bos_put(job);
        amdxdna_gem_put_obj(job->cmd_bo);
        dma_fence_put(job->fence);
}

int amdxdna_cmd_submit(struct amdxdna_client *client,
                       struct amdxdna_drv_cmd *drv_cmd,
                       u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
                       u32 hwctx_hdl, u64 *seq)
{
        struct amdxdna_dev *xdna = client->xdna;
        struct amdxdna_sched_job *job;
        struct amdxdna_hwctx *hwctx;
        int ret, idx;

        XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
        job = kzalloc_flex(*job, bos, arg_bo_cnt);
        if (!job)
                return -ENOMEM;

        job->drv_cmd = drv_cmd;

        if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
                job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
                if (!job->cmd_bo) {
                        XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
                        ret = -EINVAL;
                        goto free_job;
                }
        }

        ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
        if (ret) {
                XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
                goto cmd_put;
        }

        ret = amdxdna_pm_resume_get(xdna);
        if (ret) {
                XDNA_ERR(xdna, "Resume failed, ret %d", ret);
                goto put_bos;
        }

        idx = srcu_read_lock(&client->hwctx_srcu);
        hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
        if (!hwctx) {
                XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
                         client->pid, hwctx_hdl);
                ret = -EINVAL;
                goto unlock_srcu;
        }


        job->hwctx = hwctx;
        job->mm = current->mm;

        job->fence = amdxdna_fence_create(hwctx);
        if (!job->fence) {
                XDNA_ERR(xdna, "Failed to create fence");
                ret = -ENOMEM;
                goto unlock_srcu;
        }
        kref_init(&job->refcnt);

        ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
        if (ret)
                goto put_fence;

        /*
         * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
         * resource after synchronize_srcu(). The submitted jobs should be
         * handled by the queue, for example DRM scheduler, in device layer.
         * For here we can unlock SRCU.
         */
        srcu_read_unlock(&client->hwctx_srcu, idx);
        trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");

        return 0;

put_fence:
        dma_fence_put(job->fence);
unlock_srcu:
        srcu_read_unlock(&client->hwctx_srcu, idx);
        amdxdna_pm_suspend_put(xdna);
put_bos:
        amdxdna_arg_bos_put(job);
cmd_put:
        amdxdna_gem_put_obj(job->cmd_bo);
free_job:
        kfree(job);
        return ret;
}

/*
 * The submit command ioctl submits a command to firmware. One firmware command
 * may contain multiple command BOs for processing as a whole.
 * The command sequence number is returned which can be used for wait command ioctl.
 */
static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
                                      struct amdxdna_drm_exec_cmd *args)
{
        struct amdxdna_dev *xdna = client->xdna;
        u32 *arg_bo_hdls = NULL;
        u32 cmd_bo_hdl;
        int ret;

        if (args->arg_count > MAX_ARG_COUNT) {
                XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
                return -EINVAL;
        }

        /* Only support single command for now. */
        if (args->cmd_count != 1) {
                XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
                return -EINVAL;
        }

        cmd_bo_hdl = (u32)args->cmd_handles;
        if (args->arg_count) {
                arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
                if (!arg_bo_hdls)
                        return -ENOMEM;
                ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
                                     args->arg_count * sizeof(u32));
                if (ret) {
                        ret = -EFAULT;
                        goto free_cmd_bo_hdls;
                }
        }

        ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
                                 args->arg_count, args->hwctx, &args->seq);
        if (ret)
                XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);

free_cmd_bo_hdls:
        kfree(arg_bo_hdls);
        if (!ret)
                XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
        return ret;
}

int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
        struct amdxdna_client *client = filp->driver_priv;
        struct amdxdna_drm_exec_cmd *args = data;

        if (args->ext || args->ext_flags)
                return -EINVAL;

        switch (args->type) {
        case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
                return amdxdna_drm_submit_execbuf(client, args);
        }

        XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
        return -EINVAL;
}