#include <drm/drm_managed.h>
#include "abi/guc_actions_sriov_abi.h"
#include "xe_gt.h"
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc.h"
#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit.h"
#include "xe_pm.h"
static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size)
{
u32 request[] = {
GUC_ACTION_PF2GUC_UPDATE_VGT_POLICY,
lower_32_bits(addr),
upper_32_bits(addr),
size,
};
return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
}
static int pf_send_policy_klvs(struct xe_gt *gt, struct xe_guc_buf buf, u32 num_dwords)
{
struct xe_guc *guc = >->uc.guc;
return guc_action_update_vgt_policy(guc, xe_guc_buf_flush(buf), num_dwords);
}
static int pf_push_policy_buf_klvs(struct xe_gt *gt, u32 num_klvs,
struct xe_guc_buf buf, u32 num_dwords)
{
int ret;
ret = pf_send_policy_klvs(gt, buf, num_dwords);
if (ret != num_klvs) {
int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
struct drm_printer p = xe_gt_info_printer(gt);
void *klvs = xe_guc_buf_cpu_ptr(buf);
xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n",
num_klvs, str_plural(num_klvs), ERR_PTR(err));
xe_guc_klv_print(klvs, num_dwords, &p);
return err;
}
return 0;
}
static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
const u32 *klvs, u32 num_dwords)
{
CLASS(xe_guc_buf_from_data, buf)(>->uc.guc.buf, klvs, num_dwords * sizeof(u32));
xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
if (!xe_guc_buf_is_valid(buf))
return -ENOBUFS;
return pf_push_policy_buf_klvs(gt, num_klvs, buf, num_dwords);
}
static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value)
{
u32 klv[] = {
PREP_GUC_KLV(key, 1),
value,
};
return pf_push_policy_klvs(gt, 1, klv, ARRAY_SIZE(klv));
}
static int pf_push_policy_payload(struct xe_gt *gt, u16 key, void *payload, u32 num_dwords)
{
CLASS(xe_guc_buf, buf)(>->uc.guc.buf, GUC_KLV_LEN_MIN + num_dwords);
u32 *klv;
if (!xe_guc_buf_is_valid(buf))
return -ENOBUFS;
klv = xe_guc_buf_cpu_ptr(buf);
klv[0] = PREP_GUC_KLV(key, num_dwords);
if (num_dwords)
memcpy(&klv[1], payload, num_dwords * sizeof(u32));
return pf_push_policy_buf_klvs(gt, 1, buf, GUC_KLV_LEN_MIN + num_dwords);
}
static int pf_update_policy_bool(struct xe_gt *gt, u16 key, bool *policy, bool value)
{
int err;
err = pf_push_policy_u32(gt, key, value);
if (unlikely(err)) {
xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
key, xe_guc_klv_key_to_string(key),
str_enabled_disabled(value), ERR_PTR(err));
return err;
}
xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to '%s'\n",
key, xe_guc_klv_key_to_string(key),
str_enabled_disabled(value));
*policy = value;
return 0;
}
static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 value)
{
int err;
err = pf_push_policy_u32(gt, key, value);
if (unlikely(err)) {
xe_gt_sriov_notice(gt, "Failed to update policy %#x '%s' to '%s' (%pe)\n",
key, xe_guc_klv_key_to_string(key),
str_enabled_disabled(value), ERR_PTR(err));
return err;
}
xe_gt_sriov_dbg(gt, "policy key %#x '%s' updated to %u\n",
key, xe_guc_klv_key_to_string(key), value);
*policy = value;
return 0;
}
static void pf_bulk_reset_sched_priority(struct xe_gt *gt, u32 priority)
{
unsigned int total_vfs = 1 + xe_gt_sriov_pf_get_totalvfs(gt);
unsigned int n;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
for (n = 0; n < total_vfs; n++)
gt->sriov.pf.vfs[n].config.sched_priority = priority;
}
static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
{
int err;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
err = pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
>->sriov.pf.policy.guc.sched_if_idle,
enable);
if (!err)
pf_bulk_reset_sched_priority(gt, enable ? GUC_SCHED_PRIORITY_NORMAL :
GUC_SCHED_PRIORITY_LOW);
return err;
}
static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
return pf_provision_sched_if_idle(gt, gt->sriov.pf.policy.guc.sched_if_idle);
}
static void pf_sanitize_sched_if_idle(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
gt->sriov.pf.policy.guc.sched_if_idle = false;
}
int xe_gt_sriov_pf_policy_set_sched_if_idle(struct xe_gt *gt, bool enable)
{
int err;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
err = pf_provision_sched_if_idle(gt, enable);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return err;
}
bool xe_gt_sriov_pf_policy_get_sched_if_idle(struct xe_gt *gt)
{
bool enable;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
enable = gt->sriov.pf.policy.guc.sched_if_idle;
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return enable;
}
static int pf_provision_reset_engine(struct xe_gt *gt, bool enable)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY,
>->sriov.pf.policy.guc.reset_engine, enable);
}
static int pf_reprovision_reset_engine(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
return pf_provision_reset_engine(gt, gt->sriov.pf.policy.guc.reset_engine);
}
static void pf_sanitize_reset_engine(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
gt->sriov.pf.policy.guc.reset_engine = false;
}
int xe_gt_sriov_pf_policy_set_reset_engine(struct xe_gt *gt, bool enable)
{
int err;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
err = pf_provision_reset_engine(gt, enable);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return err;
}
bool xe_gt_sriov_pf_policy_get_reset_engine(struct xe_gt *gt)
{
bool enable;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
enable = gt->sriov.pf.policy.guc.reset_engine;
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return enable;
}
static int pf_provision_sample_period(struct xe_gt *gt, u32 value)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
return pf_update_policy_u32(gt, GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY,
>->sriov.pf.policy.guc.sample_period, value);
}
static int pf_reprovision_sample_period(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
return pf_provision_sample_period(gt, gt->sriov.pf.policy.guc.sample_period);
}
static void pf_sanitize_sample_period(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
gt->sriov.pf.policy.guc.sample_period = 0;
}
int xe_gt_sriov_pf_policy_set_sample_period(struct xe_gt *gt, u32 value)
{
int err;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
err = pf_provision_sample_period(gt, value);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return err;
}
u32 xe_gt_sriov_pf_policy_get_sample_period(struct xe_gt *gt)
{
u32 value;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
value = gt->sriov.pf.policy.guc.sample_period;
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return value;
}
static void pf_sched_group_media_slices(struct xe_gt *gt, struct guc_sched_group **groups,
u32 *num_groups)
{
u8 slice_to_group[MAX_MEDIA_SLICES];
u32 vecs_mask = VECS_INSTANCES(gt);
u32 gsc_mask = GSCCS_INSTANCES(gt);
u32 vcs_mask = VCS_INSTANCES(gt);
struct guc_sched_group *values;
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
int group = 0;
int slice;
xe_gt_assert(gt, xe_gt_is_media_type(gt));
if (gt_to_xe(gt)->info.platform > XE_BATTLEMAGE)
return;
for (slice = 0; slice < MAX_MEDIA_SLICES; slice++) {
if ((vcs_mask & 0x3) || (vecs_mask & 0x1) || (gsc_mask & 0x1))
slice_to_group[slice] = group++;
vcs_mask >>= 2;
vecs_mask >>= 1;
gsc_mask >>= 1;
}
xe_gt_assert(gt, !vcs_mask);
xe_gt_assert(gt, !vecs_mask);
xe_gt_assert(gt, !gsc_mask);
if (group < 2)
return;
if (group > gt->sriov.pf.policy.guc.sched_groups.max_groups) {
xe_gt_sriov_notice(gt, "media_slice mode has too many groups: %u vs %u\n",
group, gt->sriov.pf.policy.guc.sched_groups.max_groups);
return;
}
values = drmm_kcalloc(>_to_xe(gt)->drm, group, sizeof(struct guc_sched_group),
GFP_KERNEL);
if (!values)
return;
for_each_hw_engine(hwe, gt, id) {
u8 guc_class = xe_engine_class_to_guc_class(hwe->class);
switch (hwe->class) {
case XE_ENGINE_CLASS_VIDEO_DECODE:
slice = hwe->instance / 2;
break;
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
slice = hwe->instance;
break;
case XE_ENGINE_CLASS_OTHER:
slice = 0;
break;
default:
xe_gt_assert_msg(gt, false,
"unknown media gt class %u (%s) during EGS setup\n",
hwe->class, hwe->name);
slice = 0;
}
values[slice_to_group[slice]].engines[guc_class] |= BIT(hwe->logical_instance);
}
*groups = values;
*num_groups = group;
}
bool xe_sriov_gt_pf_policy_has_sched_groups_support(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
return GUC_FIRMWARE_VER_AT_LEAST(>->uc.guc, 70, 55, 1) &&
gt_to_xe(gt)->info.platform >= XE_BATTLEMAGE;
}
static void pf_init_sched_groups(struct xe_gt *gt)
{
enum xe_sriov_sched_group_modes m;
if (!xe_sriov_gt_pf_policy_has_sched_groups_support(gt))
return;
gt->sriov.pf.policy.guc.sched_groups.max_groups = 2;
for (m = XE_SRIOV_SCHED_GROUPS_DISABLED + 1; m < XE_SRIOV_SCHED_GROUPS_MODES_COUNT; m++) {
u32 *num_groups = >->sriov.pf.policy.guc.sched_groups.modes[m].num_groups;
struct guc_sched_group **groups =
>->sriov.pf.policy.guc.sched_groups.modes[m].groups;
switch (m) {
case XE_SRIOV_SCHED_GROUPS_MEDIA_SLICES:
if (xe_gt_is_media_type(gt))
pf_sched_group_media_slices(gt, groups, num_groups);
break;
case XE_SRIOV_SCHED_GROUPS_DISABLED:
case XE_SRIOV_SCHED_GROUPS_MODES_COUNT:
xe_gt_assert(gt, false);
break;
}
xe_gt_assert(gt, *num_groups < GUC_MAX_SCHED_GROUPS);
if (*num_groups)
gt->sriov.pf.policy.guc.sched_groups.supported_modes |= BIT(m);
}
}
bool xe_sriov_gt_pf_policy_has_multi_group_modes(struct xe_gt *gt)
{
return gt->sriov.pf.policy.guc.sched_groups.supported_modes;
}
bool xe_sriov_gt_pf_policy_has_sched_group_mode(struct xe_gt *gt,
enum xe_sriov_sched_group_modes mode)
{
if (mode == XE_SRIOV_SCHED_GROUPS_DISABLED)
return true;
return gt->sriov.pf.policy.guc.sched_groups.supported_modes & BIT(mode);
}
static int __pf_provision_sched_groups(struct xe_gt *gt, enum xe_sriov_sched_group_modes mode)
{
struct guc_sched_group *groups = gt->sriov.pf.policy.guc.sched_groups.modes[mode].groups;
u32 num_groups = gt->sriov.pf.policy.guc.sched_groups.modes[mode].num_groups;
return pf_push_policy_payload(gt, GUC_KLV_VGT_POLICY_ENGINE_GROUP_CONFIG_KEY,
groups, num_groups * GUC_MAX_ENGINE_CLASSES);
}
static int pf_provision_sched_groups(struct xe_gt *gt, enum xe_sriov_sched_group_modes mode)
{
int err;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
if (!xe_sriov_gt_pf_policy_has_sched_group_mode(gt, mode))
return -EINVAL;
if (gt->sriov.pf.policy.guc.sched_groups.current_mode == mode)
return 0;
if (xe_sriov_pf_num_vfs(gt_to_xe(gt)))
return -EBUSY;
if (mode != XE_SRIOV_SCHED_GROUPS_DISABLED &&
xe_guc_has_registered_mlrc_queues(>->uc.guc)) {
xe_gt_sriov_notice(gt, "can't enable sched groups with active MLRC queues\n");
return -EPERM;
}
err = __pf_provision_sched_groups(gt, mode);
if (err)
return err;
gt->sriov.pf.policy.guc.sched_groups.current_mode = mode;
return 0;
}
static int pf_reprovision_sched_groups(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
if (!xe_sriov_gt_pf_policy_has_multi_group_modes(gt))
return 0;
return __pf_provision_sched_groups(gt, gt->sriov.pf.policy.guc.sched_groups.current_mode);
}
static void pf_sanitize_sched_groups(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
gt->sriov.pf.policy.guc.sched_groups.current_mode = XE_SRIOV_SCHED_GROUPS_DISABLED;
}
int xe_gt_sriov_pf_policy_set_sched_groups_mode(struct xe_gt *gt,
enum xe_sriov_sched_group_modes mode)
{
if (!xe_sriov_gt_pf_policy_has_multi_group_modes(gt))
return -ENODEV;
guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
return pf_provision_sched_groups(gt, mode);
}
bool xe_gt_sriov_pf_policy_sched_groups_enabled(struct xe_gt *gt)
{
return gt->sriov.pf.policy.guc.sched_groups.current_mode != XE_SRIOV_SCHED_GROUPS_DISABLED;
}
static void pf_sanitize_guc_policies(struct xe_gt *gt)
{
pf_sanitize_sched_if_idle(gt);
pf_sanitize_reset_engine(gt);
pf_sanitize_sample_period(gt);
pf_sanitize_sched_groups(gt);
}
void xe_gt_sriov_pf_policy_sanitize(struct xe_gt *gt)
{
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
pf_sanitize_guc_policies(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
}
int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
{
int err = 0;
xe_pm_runtime_get_noresume(gt_to_xe(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (reset)
pf_sanitize_guc_policies(gt);
err |= pf_reprovision_sched_if_idle(gt);
err |= pf_reprovision_reset_engine(gt);
err |= pf_reprovision_sample_period(gt);
err |= pf_reprovision_sched_groups(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
xe_pm_runtime_put(gt_to_xe(gt));
return err ? -ENXIO : 0;
}
void xe_gt_sriov_pf_policy_init(struct xe_gt *gt)
{
pf_init_sched_groups(gt);
}
static void print_guc_policies(struct drm_printer *p, struct xe_gt_sriov_guc_policies *policy)
{
drm_printf(p, "%s:\t%s\n",
xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY),
str_enabled_disabled(policy->sched_if_idle));
drm_printf(p, "%s:\t%s\n",
xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_RESET_AFTER_VF_SWITCH_KEY),
str_enabled_disabled(policy->reset_engine));
drm_printf(p, "%s:\t%u %s\n",
xe_guc_klv_key_to_string(GUC_KLV_VGT_POLICY_ADVERSE_SAMPLE_PERIOD_KEY),
policy->sample_period, policy->sample_period ? "ms" : "(disabled)");
}
int xe_gt_sriov_pf_policy_print(struct xe_gt *gt, struct drm_printer *p)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
print_guc_policies(p, >->sriov.pf.policy.guc);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return 0;
}