#include <linux/kernel.h>
#include <drm/drm_print.h>
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_dmc_regs.h"
#include "intel_dmc_wl.h"
#define DMC_WAKELOCK_CTL_TIMEOUT_US 5000
#define DMC_WAKELOCK_HOLD_TIME 50
enum {
ENABLE_DMC_WL_DISABLED,
ENABLE_DMC_WL_ENABLED,
ENABLE_DMC_WL_ANY_REGISTER,
ENABLE_DMC_WL_ALWAYS_LOCKED,
ENABLE_DMC_WL_MAX,
};
struct intel_dmc_wl_range {
u32 start;
u32 end;
};
static const struct intel_dmc_wl_range powered_off_ranges[] = {
{ .start = 0x44400, .end = 0x4447f },
{ .start = 0x60000, .end = 0x7ffff },
{},
};
static const struct intel_dmc_wl_range xe3lpd_dc5_dc6_dmc_ranges[] = {
{ .start = 0x45500 },
{ .start = 0x457a0, .end = 0x457b0 },
{ .start = 0x45504 },
{ .start = 0x45400, .end = 0x4540c },
{ .start = 0x454f0 },
{ .start = 0x44300 },
{ .start = 0x44304 },
{ .start = 0x44f00 },
{ .start = 0x44f04 },
{ .start = 0x44fe8 },
{ .start = 0x45008 },
{ .start = 0x46070 },
{ .start = 0x46000 },
{ .start = 0x46008 },
{ .start = 0x6fa88 },
{ .start = 0x6fb88 },
{ .start = 0x46430 },
{ .start = 0x46434 },
{ .start = 0x454a0 },
{ .start = 0x42084 },
{ .start = 0x42088 },
{ .start = 0x46160 },
{ .start = 0x8f000, .end = 0x8ffff },
{ .start = 0x45230 },
{},
};
static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = {
{ .start = 0x454a0 },
{ .start = 0x45504 },
{ .start = 0x44300 },
{ .start = 0x44304 },
{ .start = 0x44f00 },
{ .start = 0x44f04 },
{ .start = 0x44fe8 },
{ .start = 0x45008 },
{ .start = 0x46070 },
{ .start = 0x46000 },
{ .start = 0x46008 },
{ .start = 0x8f000, .end = 0x8ffff },
{ .start = 0x70000 },
{ .start = 0x70004 },
{ .start = 0x70014 },
{ .start = 0x70018 },
{ .start = 0x71000 },
{ .start = 0x71004 },
{ .start = 0x71014 },
{ .start = 0x71018 },
{ .start = 0x72000 },
{ .start = 0x72004 },
{ .start = 0x72014 },
{ .start = 0x72018 },
{ .start = 0x73000 },
{ .start = 0x73004 },
{ .start = 0x73014 },
{ .start = 0x73018 },
{ .start = 0x7b000 },
{ .start = 0x7b004 },
{ .start = 0x7b014 },
{ .start = 0x7b018 },
{ .start = 0x7c000 },
{ .start = 0x7c004 },
{ .start = 0x7c014 },
{ .start = 0x7c018 },
{},
};
static void __intel_dmc_wl_release(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
WARN_ON(refcount_read(&wl->refcount));
queue_delayed_work(display->wq.unordered, &wl->work,
msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME));
}
static void intel_dmc_wl_work(struct work_struct *work)
{
struct intel_dmc_wl *wl =
container_of(work, struct intel_dmc_wl, work.work);
struct intel_display *display =
container_of(wl, struct intel_display, wl);
unsigned long flags;
spin_lock_irqsave(&wl->lock, flags);
if (refcount_read(&wl->refcount))
goto out_unlock;
intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
DMC_WAKELOCK_CTL_ACK, 0,
DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
WARN_RATELIMIT(1, "DMC wakelock release timed out");
goto out_unlock;
}
wl->taken = false;
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
static void __intel_dmc_wl_take(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
if (wl->taken)
return;
intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, 0, DMC_WAKELOCK_CTL_REQ);
if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
DMC_WAKELOCK_CTL_ACK,
DMC_WAKELOCK_CTL_ACK,
DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
WARN_RATELIMIT(1, "DMC wakelock ack timed out");
return;
}
wl->taken = true;
}
static bool intel_dmc_wl_reg_in_range(i915_reg_t reg,
const struct intel_dmc_wl_range ranges[])
{
u32 offset = i915_mmio_reg_offset(reg);
for (int i = 0; ranges[i].start; i++) {
u32 end = ranges[i].end ?: ranges[i].start;
if (ranges[i].start <= offset && offset <= end)
return true;
}
return false;
}
static bool intel_dmc_wl_check_range(struct intel_display *display,
i915_reg_t reg,
u32 dc_state)
{
const struct intel_dmc_wl_range *ranges;
if (display->params.enable_dmc_wl == ENABLE_DMC_WL_ANY_REGISTER)
return true;
if (intel_dmc_wl_reg_in_range(reg, powered_off_ranges))
return true;
switch (dc_state) {
case DC_STATE_EN_DC3CO:
ranges = xe3lpd_dc3co_dmc_ranges;
break;
case DC_STATE_EN_UPTO_DC5:
case DC_STATE_EN_UPTO_DC6:
ranges = xe3lpd_dc5_dc6_dmc_ranges;
break;
default:
ranges = NULL;
}
if (ranges && intel_dmc_wl_reg_in_range(reg, ranges))
return true;
return false;
}
static bool __intel_dmc_wl_supported(struct intel_display *display)
{
return display->params.enable_dmc_wl;
}
static void intel_dmc_wl_sanitize_param(struct intel_display *display)
{
const char *desc;
if (!HAS_DMC_WAKELOCK(display)) {
display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
} else if (display->params.enable_dmc_wl < 0) {
if (DISPLAY_VER(display) >= 30)
display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
else
display->params.enable_dmc_wl = ENABLE_DMC_WL_DISABLED;
} else if (display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX) {
display->params.enable_dmc_wl = ENABLE_DMC_WL_ENABLED;
}
drm_WARN_ON(display->drm,
display->params.enable_dmc_wl < 0 ||
display->params.enable_dmc_wl >= ENABLE_DMC_WL_MAX);
switch (display->params.enable_dmc_wl) {
case ENABLE_DMC_WL_DISABLED:
desc = "disabled";
break;
case ENABLE_DMC_WL_ENABLED:
desc = "enabled";
break;
case ENABLE_DMC_WL_ANY_REGISTER:
desc = "match any register";
break;
case ENABLE_DMC_WL_ALWAYS_LOCKED:
desc = "always locked";
break;
default:
desc = "unknown";
break;
}
drm_dbg_kms(display->drm, "Sanitized enable_dmc_wl value: %d (%s)\n",
display->params.enable_dmc_wl, desc);
}
void intel_dmc_wl_init(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
intel_dmc_wl_sanitize_param(display);
if (!display->params.enable_dmc_wl)
return;
INIT_DELAYED_WORK(&wl->work, intel_dmc_wl_work);
spin_lock_init(&wl->lock);
refcount_set(&wl->refcount,
display->params.enable_dmc_wl == ENABLE_DMC_WL_ALWAYS_LOCKED ? 1 : 0);
}
void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
spin_lock_irqsave(&wl->lock, flags);
wl->dc_state = dc_state;
if (drm_WARN_ON(display->drm, wl->enabled))
goto out_unlock;
intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
wl->enabled = true;
if (refcount_read(&wl->refcount))
__intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
void intel_dmc_wl_disable(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
intel_dmc_wl_flush_release_work(display);
spin_lock_irqsave(&wl->lock, flags);
if (drm_WARN_ON(display->drm, !wl->enabled))
goto out_unlock;
intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
wl->enabled = false;
intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
wl->taken = false;
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
void intel_dmc_wl_flush_release_work(struct intel_display *display)
{
struct intel_dmc_wl *wl = &display->wl;
if (!__intel_dmc_wl_supported(display))
return;
flush_delayed_work(&wl->work);
}
void intel_dmc_wl_get(struct intel_display *display, i915_reg_t reg)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
spin_lock_irqsave(&wl->lock, flags);
if (i915_mmio_reg_valid(reg) &&
!intel_dmc_wl_check_range(display, reg, wl->dc_state))
goto out_unlock;
if (!wl->enabled) {
if (!refcount_inc_not_zero(&wl->refcount))
refcount_set(&wl->refcount, 1);
goto out_unlock;
}
cancel_delayed_work(&wl->work);
if (refcount_inc_not_zero(&wl->refcount))
goto out_unlock;
refcount_set(&wl->refcount, 1);
__intel_dmc_wl_take(display);
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
void intel_dmc_wl_put(struct intel_display *display, i915_reg_t reg)
{
struct intel_dmc_wl *wl = &display->wl;
unsigned long flags;
if (!__intel_dmc_wl_supported(display))
return;
spin_lock_irqsave(&wl->lock, flags);
if (i915_mmio_reg_valid(reg) &&
!intel_dmc_wl_check_range(display, reg, wl->dc_state))
goto out_unlock;
if (WARN_RATELIMIT(!refcount_read(&wl->refcount),
"Tried to put wakelock with refcount zero\n"))
goto out_unlock;
if (refcount_dec_and_test(&wl->refcount)) {
if (!wl->enabled)
goto out_unlock;
__intel_dmc_wl_release(display);
goto out_unlock;
}
out_unlock:
spin_unlock_irqrestore(&wl->lock, flags);
}
void intel_dmc_wl_get_noreg(struct intel_display *display)
{
intel_dmc_wl_get(display, INVALID_MMIO_REG);
}
void intel_dmc_wl_put_noreg(struct intel_display *display)
{
intel_dmc_wl_put(display, INVALID_MMIO_REG);
}