#include "xe_pm.h"
#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/dmi.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
#include "display/xe_display.h"
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_gt_idle.h"
#include "xe_i2c.h"
#include "xe_irq.h"
#include "xe_late_bind_fw.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
#include "xe_sriov_vf_ccs.h"
#include "xe_trace.h"
#include "xe_vm.h"
#include "xe_wa.h"
#ifdef CONFIG_LOCKDEP
static struct lockdep_map xe_pm_runtime_d3cold_map = {
.name = "xe_rpm_d3cold_map"
};
static struct lockdep_map xe_pm_runtime_nod3cold_map = {
.name = "xe_rpm_nod3cold_map"
};
static struct lockdep_map xe_pm_block_lockdep_map = {
.name = "xe_pm_block_map",
};
#endif
static void xe_pm_block_begin_signalling(void)
{
lock_acquire_shared_recursive(&xe_pm_block_lockdep_map, 0, 1, NULL, _RET_IP_);
}
static void xe_pm_block_end_signalling(void)
{
lock_release(&xe_pm_block_lockdep_map, _RET_IP_);
}
void xe_pm_might_block_on_suspend(void)
{
lock_map_acquire(&xe_pm_block_lockdep_map);
lock_map_release(&xe_pm_block_lockdep_map);
}
int xe_pm_block_on_suspend(struct xe_device *xe)
{
xe_pm_might_block_on_suspend();
return wait_for_completion_interruptible(&xe->pm_block);
}
bool xe_rpm_reclaim_safe(const struct xe_device *xe)
{
return !xe->d3cold.capable;
}
static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
{
lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
&xe_pm_runtime_nod3cold_map :
&xe_pm_runtime_d3cold_map);
}
static void xe_rpm_lockmap_release(const struct xe_device *xe)
{
lock_map_release(xe_rpm_reclaim_safe(xe) ?
&xe_pm_runtime_nod3cold_map :
&xe_pm_runtime_d3cold_map);
}
int xe_pm_suspend(struct xe_device *xe)
{
struct xe_gt *gt;
u8 id;
int err;
drm_dbg(&xe->drm, "Suspending device\n");
xe_pm_block_begin_signalling();
trace_xe_pm_suspend(xe, __builtin_return_address(0));
err = xe_pxp_pm_suspend(xe->pxp);
if (err)
goto err;
xe_late_bind_wait_for_worker_completion(&xe->late_bind);
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
xe_display_pm_suspend(xe);
err = xe_bo_evict_all(xe);
if (err)
goto err_display;
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err)
goto err_display;
}
xe_irq_suspend(xe);
xe_display_pm_suspend_late(xe);
xe_i2c_pm_suspend(xe);
drm_dbg(&xe->drm, "Device suspended\n");
xe_pm_block_end_signalling();
return 0;
err_display:
xe_display_pm_resume(xe);
xe_pxp_pm_resume(xe->pxp);
err:
drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
xe_pm_block_end_signalling();
return err;
}
int xe_pm_resume(struct xe_device *xe)
{
struct xe_tile *tile;
struct xe_gt *gt;
u8 id;
int err;
xe_pm_block_begin_signalling();
drm_dbg(&xe->drm, "Resuming device\n");
trace_xe_pm_resume(xe, __builtin_return_address(0));
for_each_gt(gt, xe, id)
xe_gt_idle_disable_c6(gt);
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
err = xe_pcode_ready(xe, true);
if (err)
return err;
xe_display_pm_resume_early(xe);
err = xe_bo_restore_early(xe);
if (err)
goto err;
xe_i2c_pm_resume(xe, true);
xe_irq_resume(xe);
for_each_gt(gt, xe, id) {
err = xe_gt_resume(gt);
if (err)
break;
}
xe_display_pm_resume(xe);
if (err)
goto err;
err = xe_bo_restore_late(xe);
if (err)
goto err;
xe_pxp_pm_resume(xe->pxp);
if (IS_VF_CCS_READY(xe))
xe_sriov_vf_ccs_register_context(xe);
xe_late_bind_fw_load(&xe->late_bind);
drm_dbg(&xe->drm, "Device resumed\n");
xe_pm_block_end_signalling();
return 0;
err:
drm_dbg(&xe->drm, "Device resume failed %d\n", err);
xe_pm_block_end_signalling();
return err;
}
static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_dev *root_pdev;
root_pdev = pcie_find_root_port(pdev);
if (!root_pdev)
return false;
if (!pci_pme_capable(root_pdev, PCI_D3cold)) {
drm_dbg(&xe->drm, "d3cold: PME# not supported\n");
return false;
}
if (!pci_pr3_present(root_pdev)) {
drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n");
return false;
}
return true;
}
static void xe_pm_runtime_init(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
if (IS_SRIOV_VF(xe))
return;
if (IS_DGFX(xe))
dev_pm_set_driver_flags(dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_set_active(dev);
pm_runtime_allow(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put(dev);
}
int xe_pm_init_early(struct xe_device *xe)
{
int err;
INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list);
err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock);
if (err)
return err;
err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock);
if (err)
return err;
xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
return 0;
}
ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO);
static u32 vram_threshold_value(struct xe_device *xe)
{
if (xe->info.platform == XE_BATTLEMAGE) {
const char *product_name;
product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
if (product_name && strstr(product_name, "NUC13RNG")) {
drm_warn(&xe->drm, "BMG + D3Cold not supported on this platform\n");
return 0;
}
}
return DEFAULT_VRAM_THRESHOLD;
}
static void xe_pm_wake_rebind_workers(struct xe_device *xe)
{
struct xe_vm *vm, *next;
mutex_lock(&xe->rebind_resume_lock);
list_for_each_entry_safe(vm, next, &xe->rebind_resume_list,
preempt.pm_activate_link) {
list_del_init(&vm->preempt.pm_activate_link);
xe_vm_resume_rebind_worker(vm);
}
mutex_unlock(&xe->rebind_resume_lock);
}
static int xe_pm_notifier_callback(struct notifier_block *nb,
unsigned long action, void *data)
{
struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
int err = 0;
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
{
struct xe_validation_ctx ctx;
reinit_completion(&xe->pm_block);
xe_pm_block_begin_signalling();
xe_pm_runtime_get(xe);
(void)xe_validation_ctx_init(&ctx, &xe->val, NULL,
(struct xe_val_flags) {.exclusive = true});
err = xe_bo_evict_all_user(xe);
xe_validation_ctx_fini(&ctx);
if (err)
drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
err = xe_bo_notifier_prepare_all_pinned(xe);
if (err)
drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
xe_pm_block_end_signalling();
break;
}
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
complete_all(&xe->pm_block);
xe_pm_wake_rebind_workers(xe);
xe_bo_notifier_unprepare_all_pinned(xe);
xe_pm_runtime_put(xe);
break;
}
return NOTIFY_DONE;
}
int xe_pm_init(struct xe_device *xe)
{
u32 vram_threshold;
int err;
xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
err = register_pm_notifier(&xe->pm_notifier);
if (err)
return err;
err = drmm_mutex_init(&xe->drm, &xe->rebind_resume_lock);
if (err)
goto err_unregister;
init_completion(&xe->pm_block);
complete_all(&xe->pm_block);
INIT_LIST_HEAD(&xe->rebind_resume_list);
if (!xe_device_uc_enabled(xe))
return 0;
if (xe->d3cold.capable) {
vram_threshold = vram_threshold_value(xe);
err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
goto err_unregister;
}
xe_pm_runtime_init(xe);
return 0;
err_unregister:
unregister_pm_notifier(&xe->pm_notifier);
return err;
}
static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
if (IS_SRIOV_VF(xe))
return;
pm_runtime_get_sync(dev);
pm_runtime_forbid(dev);
}
void xe_pm_fini(struct xe_device *xe)
{
if (xe_device_uc_enabled(xe))
xe_pm_runtime_fini(xe);
unregister_pm_notifier(&xe->pm_notifier);
}
static void xe_pm_write_callback_task(struct xe_device *xe,
struct task_struct *task)
{
WRITE_ONCE(xe->pm_callback_task, task);
smp_mb();
}
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe)
{
smp_mb();
return READ_ONCE(xe->pm_callback_task);
}
bool xe_pm_runtime_suspended(struct xe_device *xe)
{
return pm_runtime_suspended(xe->drm.dev);
}
int xe_pm_runtime_suspend(struct xe_device *xe)
{
struct xe_bo *bo, *on;
struct xe_gt *gt;
u8 id;
int err = 0;
trace_xe_pm_runtime_suspend(xe, __builtin_return_address(0));
xe_pm_write_callback_task(xe, current);
xe_rpm_lockmap_acquire(xe);
err = xe_pxp_pm_suspend(xe->pxp);
if (err)
goto out;
mutex_lock(&xe->mem_access.vram_userfault.lock);
list_for_each_entry_safe(bo, on,
&xe->mem_access.vram_userfault.list, vram_userfault_link)
xe_bo_runtime_pm_release_mmap_offset(bo);
mutex_unlock(&xe->mem_access.vram_userfault.lock);
xe_display_pm_runtime_suspend(xe);
if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
goto out_resume;
}
for_each_gt(gt, xe, id) {
err = xe->d3cold.allowed ? xe_gt_suspend(gt) : xe_gt_runtime_suspend(gt);
if (err)
goto out_resume;
}
xe_irq_suspend(xe);
xe_display_pm_runtime_suspend_late(xe);
xe_i2c_pm_suspend(xe);
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return 0;
out_resume:
xe_display_pm_runtime_resume(xe);
xe_pxp_pm_resume(xe->pxp);
out:
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
int xe_pm_runtime_resume(struct xe_device *xe)
{
struct xe_gt *gt;
u8 id;
int err = 0;
trace_xe_pm_runtime_resume(xe, __builtin_return_address(0));
xe_pm_write_callback_task(xe, current);
xe_rpm_lockmap_acquire(xe);
if (xe->d3cold.allowed) {
for_each_gt(gt, xe, id)
xe_gt_idle_disable_c6(gt);
err = xe_pcode_ready(xe, true);
if (err)
goto out;
xe_display_pm_resume_early(xe);
err = xe_bo_restore_early(xe);
if (err)
goto out;
}
xe_i2c_pm_resume(xe, xe->d3cold.allowed);
xe_irq_resume(xe);
for_each_gt(gt, xe, id) {
err = xe->d3cold.allowed ? xe_gt_resume(gt) : xe_gt_runtime_resume(gt);
if (err)
break;
}
xe_display_pm_runtime_resume(xe);
if (err)
goto out;
if (xe->d3cold.allowed) {
err = xe_bo_restore_late(xe);
if (err)
goto out;
}
xe_pxp_pm_resume(xe->pxp);
if (IS_VF_CCS_READY(xe))
xe_sriov_vf_ccs_register_context(xe);
if (xe->d3cold.allowed)
xe_late_bind_fw_load(&xe->late_bind);
out:
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
}
static void xe_rpm_might_enter_cb(const struct xe_device *xe)
{
xe_rpm_lockmap_acquire(xe);
xe_rpm_lockmap_release(xe);
}
static void xe_pm_runtime_lockdep_prime(void)
{
struct dma_resv lockdep_resv;
dma_resv_init(&lockdep_resv);
lock_map_acquire(&xe_pm_runtime_d3cold_map);
dma_resv_lock(&lockdep_resv, NULL);
dma_resv_unlock(&lockdep_resv);
lock_map_release(&xe_pm_runtime_d3cold_map);
fs_reclaim_acquire(GFP_KERNEL);
lock_map_acquire(&xe_pm_runtime_nod3cold_map);
lock_map_release(&xe_pm_runtime_nod3cold_map);
fs_reclaim_release(GFP_KERNEL);
}
void xe_pm_runtime_get(struct xe_device *xe)
{
trace_xe_pm_runtime_get(xe, __builtin_return_address(0));
pm_runtime_get_noresume(xe->drm.dev);
if (xe_pm_read_callback_task(xe) == current)
return;
xe_rpm_might_enter_cb(xe);
pm_runtime_resume(xe->drm.dev);
}
void xe_pm_runtime_put(struct xe_device *xe)
{
trace_xe_pm_runtime_put(xe, __builtin_return_address(0));
if (xe_pm_read_callback_task(xe) == current) {
pm_runtime_put_noidle(xe->drm.dev);
} else {
pm_runtime_mark_last_busy(xe->drm.dev);
pm_runtime_put(xe->drm.dev);
}
}
int xe_pm_runtime_get_ioctl(struct xe_device *xe)
{
trace_xe_pm_runtime_get_ioctl(xe, __builtin_return_address(0));
if (WARN_ON(xe_pm_read_callback_task(xe) == current))
return -ELOOP;
xe_rpm_might_enter_cb(xe);
return pm_runtime_get_sync(xe->drm.dev);
}
bool xe_pm_runtime_get_if_active(struct xe_device *xe)
{
return pm_runtime_get_if_active(xe->drm.dev) > 0;
}
bool xe_pm_runtime_get_if_in_use(struct xe_device *xe)
{
if (xe_pm_read_callback_task(xe) == current) {
pm_runtime_get_noresume(xe->drm.dev);
return true;
}
return pm_runtime_get_if_in_use(xe->drm.dev) > 0;
}
static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
{
#ifdef CONFIG_PM
struct device *dev = xe->drm.dev;
return dev->power.runtime_status == RPM_SUSPENDING ||
dev->power.runtime_status == RPM_RESUMING ||
pm_suspend_in_progress();
#else
return false;
#endif
}
void xe_pm_runtime_get_noresume(struct xe_device *xe)
{
bool ref;
ref = xe_pm_runtime_get_if_in_use(xe);
if (!ref) {
pm_runtime_get_noresume(xe->drm.dev);
drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe),
"Missing outer runtime PM protection\n");
}
}
bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
{
if (xe_pm_read_callback_task(xe) == current) {
pm_runtime_get_noresume(xe->drm.dev);
return true;
}
xe_rpm_might_enter_cb(xe);
return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
}
void xe_pm_assert_unbounded_bridge(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_dev *bridge = pci_upstream_bridge(pdev);
if (!bridge)
return;
if (!bridge->driver) {
drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n");
device_set_pm_not_required(&pdev->dev);
}
}
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold)
{
struct ttm_resource_manager *man;
u32 vram_total_mb = 0;
int i;
for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
man = ttm_manager_type(&xe->ttm, i);
if (man)
vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024);
}
drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb);
if (threshold > vram_total_mb)
return -EINVAL;
mutex_lock(&xe->d3cold.lock);
xe->d3cold.vram_threshold = threshold;
mutex_unlock(&xe->d3cold.lock);
return 0;
}
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
{
struct ttm_resource_manager *man;
u32 total_vram_used_mb = 0;
u64 vram_used;
int i;
if (!xe->d3cold.capable) {
xe->d3cold.allowed = false;
return;
}
for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
man = ttm_manager_type(&xe->ttm, i);
if (man) {
vram_used = ttm_resource_manager_usage(man);
total_vram_used_mb += DIV_ROUND_UP_ULL(vram_used, 1024 * 1024);
}
}
mutex_lock(&xe->d3cold.lock);
if (total_vram_used_mb < xe->d3cold.vram_threshold)
xe->d3cold.allowed = true;
else
xe->d3cold.allowed = false;
mutex_unlock(&xe->d3cold.lock);
}
int __init xe_pm_module_init(void)
{
xe_pm_runtime_lockdep_prime();
return 0;
}