#include "xe_pxp.h"
#include <drm/drm_managed.h>
#include <uapi/drm/xe_drm.h>
#include "xe_bo.h"
#include "xe_bo_types.h"
#include "xe_device_types.h"
#include "xe_exec_queue.h"
#include "xe_force_wake.h"
#include "xe_guc_submit.h"
#include "xe_gsc_proxy.h"
#include "xe_gt_types.h"
#include "xe_huc.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_pxp_submit.h"
#include "xe_pxp_types.h"
#include "xe_uc_fw.h"
#include "regs/xe_irq_regs.h"
#include "regs/xe_pxp_regs.h"
#define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION
#define PXP_ACTIVATION_TIMEOUT_MS 300
#define PXP_TERMINATION_TIMEOUT_MS 500
bool xe_pxp_is_supported(const struct xe_device *xe)
{
return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
}
bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
{
return pxp;
}
static bool pxp_prerequisites_done(const struct xe_pxp *pxp)
{
struct xe_gt *gt = pxp->gt;
bool ready;
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL));
ready = xe_huc_is_authenticated(>->uc.huc, XE_HUC_AUTH_VIA_GSC) &&
xe_gsc_proxy_init_done(>->uc.gsc);
return ready;
}
int xe_pxp_get_readiness_status(struct xe_pxp *pxp)
{
int ret = 0;
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) ||
xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status))
return -EIO;
guard(xe_pm_runtime)(pxp->xe);
if (pxp_prerequisites_done(pxp))
ret = 1;
return ret;
}
static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id)
{
struct xe_gt *gt = pxp->gt;
return xe_mmio_read32(>->mmio, KCR_SIP) & BIT(id);
}
static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
{
struct xe_gt *gt = pxp->gt;
u32 mask = BIT(id);
return xe_mmio_wait32(>->mmio, KCR_SIP, mask, in_play ? mask : 0,
250, NULL, false);
}
static void pxp_invalidate_queues(struct xe_pxp *pxp);
static int pxp_terminate_hw(struct xe_pxp *pxp)
{
struct xe_gt *gt = pxp->gt;
int ret = 0;
drm_dbg(&pxp->xe->drm, "Terminating PXP\n");
CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
return -EIO;
ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION);
if (ret)
return ret;
ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
if (ret)
return ret;
xe_mmio_write32(>->mmio, KCR_GLOBAL_TERMINATE, 1);
return xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION);
}
static void mark_termination_in_progress(struct xe_pxp *pxp)
{
lockdep_assert_held(&pxp->mutex);
reinit_completion(&pxp->termination);
pxp->status = XE_PXP_TERMINATION_IN_PROGRESS;
}
static void pxp_terminate(struct xe_pxp *pxp)
{
int ret = 0;
struct xe_device *xe = pxp->xe;
if (!wait_for_completion_timeout(&pxp->activation,
msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
drm_err(&xe->drm, "failed to wait for PXP start before termination\n");
mutex_lock(&pxp->mutex);
if (pxp->status == XE_PXP_ACTIVE)
pxp->key_instance++;
if (pxp->status == XE_PXP_SUSPENDED) {
mutex_unlock(&pxp->mutex);
return;
}
if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) {
pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION;
mutex_unlock(&pxp->mutex);
return;
}
mark_termination_in_progress(pxp);
mutex_unlock(&pxp->mutex);
pxp_invalidate_queues(pxp);
ret = pxp_terminate_hw(pxp);
if (ret) {
drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
mutex_lock(&pxp->mutex);
pxp->status = XE_PXP_ERROR;
complete_all(&pxp->termination);
mutex_unlock(&pxp->mutex);
}
}
static void pxp_terminate_complete(struct xe_pxp *pxp)
{
mutex_lock(&pxp->mutex);
switch (pxp->status) {
case XE_PXP_TERMINATION_IN_PROGRESS:
pxp->status = XE_PXP_READY_TO_START;
break;
case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
pxp->status = XE_PXP_NEEDS_TERMINATION;
break;
case XE_PXP_SUSPENDED:
break;
default:
drm_err(&pxp->xe->drm,
"PXP termination complete while status was %u\n",
pxp->status);
}
complete_all(&pxp->termination);
mutex_unlock(&pxp->mutex);
}
static void pxp_irq_work(struct work_struct *work)
{
struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
struct xe_device *xe = pxp->xe;
u32 events = 0;
spin_lock_irq(&xe->irq.lock);
events = pxp->irq.events;
pxp->irq.events = 0;
spin_unlock_irq(&xe->irq.lock);
if (!events)
return;
if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe))
return;
if (events & PXP_TERMINATION_REQUEST) {
events &= ~PXP_TERMINATION_COMPLETE;
pxp_terminate(pxp);
}
if (events & PXP_TERMINATION_COMPLETE)
pxp_terminate_complete(pxp);
if (events & PXP_TERMINATION_REQUEST)
xe_pm_runtime_put(xe);
}
void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
{
struct xe_pxp *pxp = xe->pxp;
if (!xe_pxp_is_enabled(pxp)) {
drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
return;
}
lockdep_assert_held(&xe->irq.lock);
if (unlikely(!iir))
return;
if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT |
KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT))
pxp->irq.events |= PXP_TERMINATION_REQUEST;
if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT)
pxp->irq.events |= PXP_TERMINATION_COMPLETE;
if (pxp->irq.events)
queue_work(pxp->irq.wq, &pxp->irq.work);
}
static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
{
u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
_MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
return -EIO;
xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val);
return 0;
}
static int kcr_pxp_enable(const struct xe_pxp *pxp)
{
return kcr_pxp_set_status(pxp, true);
}
static int kcr_pxp_disable(const struct xe_pxp *pxp)
{
return kcr_pxp_set_status(pxp, false);
}
static void pxp_fini(void *arg)
{
struct xe_pxp *pxp = arg;
destroy_workqueue(pxp->irq.wq);
xe_pxp_destroy_execution_resources(pxp);
}
int xe_pxp_init(struct xe_device *xe)
{
struct xe_gt *gt = xe->tiles[0].media_gt;
struct xe_pxp *pxp;
int err;
if (!xe_pxp_is_supported(xe))
return 0;
if (xe->info.tile_count > 1 || !gt)
return 0;
if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
return 0;
if (!xe_uc_fw_is_loadable(>->uc.gsc.fw) ||
!xe_uc_fw_is_loadable(>->uc.huc.fw)) {
drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies");
return 0;
}
if (xe->info.platform == XE_PANTHERLAKE &&
gt->uc.gsc.fw.versions.found[XE_UC_FW_VER_RELEASE].build < 1396) {
drm_info(&xe->drm, "PXP requires PTL GSC build 1396 or newer\n");
return 0;
}
pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL);
if (!pxp) {
err = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&pxp->queues.list);
spin_lock_init(&pxp->queues.lock);
INIT_WORK(&pxp->irq.work, pxp_irq_work);
pxp->xe = xe;
pxp->gt = gt;
pxp->key_instance = 1;
pxp->last_suspend_key_instance = 1;
init_completion(&pxp->activation);
init_completion(&pxp->termination);
complete_all(&pxp->termination);
complete_all(&pxp->activation);
mutex_init(&pxp->mutex);
pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0);
if (!pxp->irq.wq) {
err = -ENOMEM;
goto out_free;
}
err = kcr_pxp_enable(pxp);
if (err)
goto out_wq;
err = xe_pxp_allocate_execution_resources(pxp);
if (err)
goto out_kcr_disable;
xe->pxp = pxp;
return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp);
out_kcr_disable:
kcr_pxp_disable(pxp);
out_wq:
destroy_workqueue(pxp->irq.wq);
out_free:
drmm_kfree(&xe->drm, pxp);
out:
drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err));
return err;
}
static int __pxp_start_arb_session(struct xe_pxp *pxp)
{
int ret;
CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
return -EIO;
if (pxp_session_is_in_play(pxp, ARB_SESSION))
return -EEXIST;
ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION);
if (ret) {
drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret));
return ret;
}
ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
if (ret) {
drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret));
return ret;
}
drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n");
return 0;
}
int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
{
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
q->pxp.type = type;
return 0;
}
static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{
int ret = 0;
mutex_lock(&pxp->mutex);
if (pxp->status == XE_PXP_ACTIVE) {
spin_lock_irq(&pxp->queues.lock);
list_add_tail(&q->pxp.link, &pxp->queues.list);
spin_unlock_irq(&pxp->queues.lock);
} else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
ret = -EIO;
} else {
ret = -EBUSY;
}
mutex_unlock(&pxp->mutex);
return ret;
}
static int pxp_start(struct xe_pxp *pxp, u8 type)
{
int ret = 0;
bool restart;
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
ret = xe_pxp_get_readiness_status(pxp);
if (ret <= 0)
return ret ?: -EBUSY;
ret = 0;
wait_for_idle:
if (!wait_for_completion_timeout(&pxp->termination,
msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
return -ETIMEDOUT;
if (!wait_for_completion_timeout(&pxp->activation,
msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
return -ETIMEDOUT;
restart = false;
mutex_lock(&pxp->mutex);
switch (pxp->status) {
case XE_PXP_ERROR:
ret = -EIO;
goto out_unlock;
case XE_PXP_ACTIVE:
goto out_unlock;
case XE_PXP_READY_TO_START:
pxp->status = XE_PXP_START_IN_PROGRESS;
reinit_completion(&pxp->activation);
break;
case XE_PXP_START_IN_PROGRESS:
XE_WARN_ON(completion_done(&pxp->activation));
restart = true;
goto out_unlock;
case XE_PXP_NEEDS_TERMINATION:
mark_termination_in_progress(pxp);
break;
case XE_PXP_TERMINATION_IN_PROGRESS:
case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
XE_WARN_ON(completion_done(&pxp->termination));
restart = true;
goto out_unlock;
case XE_PXP_SUSPENDED:
default:
drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
ret = -EIO;
goto out_unlock;
}
mutex_unlock(&pxp->mutex);
if (!completion_done(&pxp->termination)) {
ret = pxp_terminate_hw(pxp);
if (ret) {
drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
mutex_lock(&pxp->mutex);
pxp->status = XE_PXP_ERROR;
complete_all(&pxp->termination);
goto out_unlock;
}
goto wait_for_idle;
}
XE_WARN_ON(completion_done(&pxp->activation));
ret = __pxp_start_arb_session(pxp);
mutex_lock(&pxp->mutex);
complete_all(&pxp->activation);
if (pxp->status != XE_PXP_START_IN_PROGRESS) {
drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
pxp->status = XE_PXP_NEEDS_TERMINATION;
restart = true;
goto out_unlock;
}
if (!ret)
pxp->status = XE_PXP_ACTIVE;
else
pxp->status = XE_PXP_ERROR;
out_unlock:
mutex_unlock(&pxp->mutex);
if (restart)
goto wait_for_idle;
return ret;
}
int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{
int ret;
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
xe_pm_runtime_get(pxp->xe);
start:
ret = pxp_start(pxp, q->pxp.type);
if (!ret) {
ret = __exec_queue_add(pxp, q);
if (ret == -EBUSY)
goto start;
}
if (ret)
xe_pm_runtime_put(pxp->xe);
return ret;
}
ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO);
static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
{
bool need_pm_put = false;
if (!xe_pxp_is_enabled(pxp))
return;
if (lock)
spin_lock_irq(&pxp->queues.lock);
if (!list_empty(&q->pxp.link)) {
list_del_init(&q->pxp.link);
need_pm_put = true;
}
q->pxp.type = DRM_XE_PXP_TYPE_NONE;
if (lock)
spin_unlock_irq(&pxp->queues.lock);
if (need_pm_put)
xe_pm_runtime_put(pxp->xe);
}
void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
{
__pxp_exec_queue_remove(pxp, q, true);
}
static void pxp_invalidate_queues(struct xe_pxp *pxp)
{
struct xe_exec_queue *tmp, *q;
LIST_HEAD(to_clean);
spin_lock_irq(&pxp->queues.lock);
list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
q = xe_exec_queue_get_unless_zero(q);
if (!q)
continue;
list_move_tail(&q->pxp.link, &to_clean);
}
spin_unlock_irq(&pxp->queues.lock);
list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
xe_exec_queue_kill(q);
__pxp_exec_queue_remove(pxp, q, false);
xe_exec_queue_put(q);
}
}
int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo)
{
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
xe_assert(pxp->xe, !bo->pxp_key_instance);
bo->pxp_key_instance = pxp->key_instance;
return 0;
}
int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
{
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
if (!xe_bo_is_protected(bo))
return -EINVAL;
xe_assert(pxp->xe, bo->pxp_key_instance);
if (bo->pxp_key_instance != pxp->key_instance)
return -ENOEXEC;
return 0;
}
int xe_pxp_obj_key_check(struct drm_gem_object *obj)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = xe_bo_device(bo);
struct xe_pxp *pxp = xe->pxp;
return xe_pxp_bo_key_check(pxp, bo);
}
int xe_pxp_pm_suspend(struct xe_pxp *pxp)
{
bool needs_queue_inval = false;
int ret = 0;
if (!xe_pxp_is_enabled(pxp))
return 0;
wait_for_activation:
if (!wait_for_completion_timeout(&pxp->activation,
msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
ret = -ETIMEDOUT;
mutex_lock(&pxp->mutex);
switch (pxp->status) {
case XE_PXP_ERROR:
case XE_PXP_READY_TO_START:
case XE_PXP_SUSPENDED:
case XE_PXP_TERMINATION_IN_PROGRESS:
case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
break;
case XE_PXP_START_IN_PROGRESS:
mutex_unlock(&pxp->mutex);
goto wait_for_activation;
case XE_PXP_NEEDS_TERMINATION:
if (pxp->key_instance == pxp->last_suspend_key_instance)
break;
fallthrough;
case XE_PXP_ACTIVE:
pxp->key_instance++;
needs_queue_inval = true;
break;
}
pxp->status = XE_PXP_SUSPENDED;
mutex_unlock(&pxp->mutex);
if (needs_queue_inval)
pxp_invalidate_queues(pxp);
if (!wait_for_completion_timeout(&pxp->termination,
msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
ret = -ETIMEDOUT;
pxp->last_suspend_key_instance = pxp->key_instance;
return ret;
}
void xe_pxp_pm_resume(struct xe_pxp *pxp)
{
int err;
if (!xe_pxp_is_enabled(pxp))
return;
err = kcr_pxp_enable(pxp);
mutex_lock(&pxp->mutex);
xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED);
if (err)
pxp->status = XE_PXP_ERROR;
else
pxp->status = XE_PXP_NEEDS_TERMINATION;
mutex_unlock(&pxp->mutex);
}