#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_policy.h"
#include "xe_sriov.h"
#include "xe_sriov_pf_helpers.h"
#include "xe_sriov_pf_provision.h"
#include "xe_sriov_pf_provision_types.h"
#include "xe_sriov_printk.h"
static const char *mode_to_string(enum xe_sriov_provisioning_mode mode)
{
switch (mode) {
case XE_SRIOV_PROVISIONING_MODE_AUTO:
return "auto";
case XE_SRIOV_PROVISIONING_MODE_CUSTOM:
return "custom";
default:
return "<invalid>";
}
}
static bool pf_auto_provisioning_mode(struct xe_device *xe)
{
xe_assert(xe, IS_SRIOV_PF(xe));
return xe->sriov.pf.provision.mode == XE_SRIOV_PROVISIONING_MODE_AUTO;
}
static bool pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
{
unsigned int n;
for (n = 1; n <= num_vfs; n++)
if (!xe_gt_sriov_pf_config_is_empty(gt, n))
return false;
return true;
}
static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
{
struct xe_gt *gt;
unsigned int id;
int result = 0;
int err;
for_each_gt(gt, xe, id) {
if (!pf_needs_provisioning(gt, num_vfs))
return -EUCLEAN;
err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
result = result ?: err;
}
return result;
}
static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
{
struct xe_gt *gt;
unsigned int id;
unsigned int n;
for_each_gt(gt, xe, id)
for (n = 1; n <= num_vfs; n++)
xe_gt_sriov_pf_config_release(gt, n, true);
}
static void pf_unprovision_all_vfs(struct xe_device *xe)
{
pf_unprovision_vfs(xe, xe_sriov_pf_get_totalvfs(xe));
}
int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
{
xe_assert(xe, IS_SRIOV_PF(xe));
if (!pf_auto_provisioning_mode(xe))
return 0;
return pf_provision_vfs(xe, num_vfs);
}
int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
{
xe_assert(xe, IS_SRIOV_PF(xe));
if (!pf_auto_provisioning_mode(xe))
return 0;
pf_unprovision_vfs(xe, num_vfs);
return 0;
}
int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode)
{
xe_assert(xe, IS_SRIOV_PF(xe));
if (mode == xe->sriov.pf.provision.mode)
return 0;
if (mode == XE_SRIOV_PROVISIONING_MODE_AUTO) {
if (xe_sriov_pf_num_vfs(xe)) {
xe_sriov_dbg(xe, "can't restore %s: VFs must be disabled!\n",
mode_to_string(mode));
return -EBUSY;
}
pf_unprovision_all_vfs(xe);
}
xe_sriov_dbg(xe, "mode %s changed to %s by %ps\n",
mode_to_string(xe->sriov.pf.provision.mode),
mode_to_string(mode), __builtin_return_address(0));
xe->sriov.pf.provision.mode = mode;
return 0;
}
int xe_sriov_pf_provision_bulk_apply_eq(struct xe_device *xe, u32 eq)
{
struct xe_gt *gt;
unsigned int id;
int result = 0;
int err;
guard(mutex)(xe_sriov_pf_master_mutex(xe));
for_each_gt(gt, xe, id) {
err = xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(gt, eq);
result = result ?: err;
}
return result;
}
int xe_sriov_pf_provision_apply_vf_eq(struct xe_device *xe, unsigned int vfid, u32 eq)
{
struct xe_gt *gt;
unsigned int id;
int result = 0;
int err;
guard(mutex)(xe_sriov_pf_master_mutex(xe));
for_each_gt(gt, xe, id) {
err = xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, eq);
result = result ?: err;
}
return result;
}
static int pf_report_unclean(struct xe_gt *gt, unsigned int vfid,
const char *what, u32 found, u32 expected)
{
char name[8];
xe_sriov_dbg(gt_to_xe(gt), "%s on GT%u has %s=%u (expected %u)\n",
xe_sriov_function_name(vfid, name, sizeof(name)),
gt->info.id, what, found, expected);
return -EUCLEAN;
}
int xe_sriov_pf_provision_query_vf_eq(struct xe_device *xe, unsigned int vfid, u32 *eq)
{
struct xe_gt *gt;
unsigned int id;
int count = 0;
u32 value;
guard(mutex)(xe_sriov_pf_master_mutex(xe));
for_each_gt(gt, xe, id) {
value = xe_gt_sriov_pf_config_get_exec_quantum_locked(gt, vfid);
if (!count++)
*eq = value;
else if (value != *eq)
return pf_report_unclean(gt, vfid, "EQ", value, *eq);
}
return !count ? -ENODATA : 0;
}
int xe_sriov_pf_provision_bulk_apply_pt(struct xe_device *xe, u32 pt)
{
struct xe_gt *gt;
unsigned int id;
int result = 0;
int err;
guard(mutex)(xe_sriov_pf_master_mutex(xe));
for_each_gt(gt, xe, id) {
err = xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(gt, pt);
result = result ?: err;
}
return result;
}
int xe_sriov_pf_provision_apply_vf_pt(struct xe_device *xe, unsigned int vfid, u32 pt)
{
struct xe_gt *gt;
unsigned int id;
int result = 0;
int err;
guard(mutex)(xe_sriov_pf_master_mutex(xe));
for_each_gt(gt, xe, id) {
err = xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, pt);
result = result ?: err;
}
return result;
}
int xe_sriov_pf_provision_query_vf_pt(struct xe_device *xe, unsigned int vfid, u32 *pt)
{
struct xe_gt *gt;
unsigned int id;
int count = 0;
u32 value;
guard(mutex)(xe_sriov_pf_master_mutex(xe));
for_each_gt(gt, xe, id) {
value = xe_gt_sriov_pf_config_get_preempt_timeout_locked(gt, vfid);
if (!count++)
*pt = value;
else if (value != *pt)
return pf_report_unclean(gt, vfid, "PT", value, *pt);
}
return !count ? -ENODATA : 0;
}
int xe_sriov_pf_provision_bulk_apply_priority(struct xe_device *xe, u32 prio)
{
bool sched_if_idle;
struct xe_gt *gt;
unsigned int id;
int result = 0;
int err;
xe_assert(xe, prio < GUC_SCHED_PRIORITY_HIGH);
sched_if_idle = prio == GUC_SCHED_PRIORITY_NORMAL;
for_each_gt(gt, xe, id) {
err = xe_gt_sriov_pf_policy_set_sched_if_idle(gt, sched_if_idle);
result = result ?: err;
}
return result;
}
int xe_sriov_pf_provision_apply_vf_priority(struct xe_device *xe, unsigned int vfid, u32 prio)
{
struct xe_gt *gt;
unsigned int id;
int result = 0;
int err;
for_each_gt(gt, xe, id) {
err = xe_gt_sriov_pf_config_set_sched_priority(gt, vfid, prio);
result = result ?: err;
}
return result;
}
int xe_sriov_pf_provision_query_vf_priority(struct xe_device *xe, unsigned int vfid, u32 *prio)
{
struct xe_gt *gt;
unsigned int id;
int count = 0;
u32 value;
for_each_gt(gt, xe, id) {
value = xe_gt_sriov_pf_config_get_sched_priority(gt, vfid);
if (!count++)
*prio = value;
else if (value != *prio)
return pf_report_unclean(gt, vfid, "priority", value, *prio);
}
return !count ? -ENODATA : 0;
}