root/drivers/gpu/drm/i915/display/intel_psr.c
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

#include <linux/debugfs.h>

#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>

#include "i915_reg.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_cursor_regs.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dsb.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
#include "intel_snps_phy.h"
#include "intel_step.h"
#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"

/**
 * DOC: Panel Self Refresh (PSR/SRD)
 *
 * Since Haswell Display controller supports Panel Self-Refresh on display
 * panels witch have a remote frame buffer (RFB) implemented according to PSR
 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
 * when system is idle but display is on as it eliminates display refresh
 * request to DDR memory completely as long as the frame buffer for that
 * display is unchanged.
 *
 * Panel Self Refresh must be supported by both Hardware (source) and
 * Panel (sink).
 *
 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
 * to power down the link and memory controller. For DSI panels the same idea
 * is called "manual mode".
 *
 * The implementation uses the hardware-based PSR support which automatically
 * enters/exits self-refresh mode. The hardware takes care of sending the
 * required DP aux message and could even retrain the link (that part isn't
 * enabled yet though). The hardware also keeps track of any frontbuffer
 * changes to know when to exit self-refresh mode again. Unfortunately that
 * part doesn't work too well, hence why the i915 PSR support uses the
 * software frontbuffer tracking to make sure it doesn't miss a screen
 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
 * get called by the frontbuffer tracking code. Note that because of locking
 * issues the self-refresh re-enable code is done from a work queue, which
 * must be correctly synchronized/cancelled when shutting down the pipe."
 *
 * DC3CO (DC3 clock off)
 *
 * On top of PSR2, GEN12 adds a intermediate power savings state that turns
 * clock off automatically during PSR2 idle state.
 * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
 * entry/exit allows the HW to enter a low-power state even when page flipping
 * periodically (for instance a 30fps video playback scenario).
 *
 * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
 * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
 * frames, if no other flip occurs and the function above is executed, DC3CO is
 * disabled and PSR2 is configured to enter deep sleep, resetting again in case
 * of another flip.
 * Front buffer modifications do not trigger DC3CO activation on purpose as it
 * would bring a lot of complexity and most of the moderns systems will only
 * use page flips.
 */

/*
 * Description of PSR mask bits:
 *
 * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
 *
 *  When unmasked (nearly) all display register writes (eg. even
 *  SWF) trigger a PSR exit. Some registers are excluded from this
 *  and they have a more specific mask (described below). On icl+
 *  this bit no longer exists and is effectively always set.
 *
 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
 *
 *  When unmasked (nearly) all pipe/plane register writes
 *  trigger a PSR exit. Some plane registers are excluded from this
 *  and they have a more specific mask (described below).
 *
 * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
 * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
 * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
 *
 *  When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
 *  SPR_SURF/CURBASE are not included in this and instead are
 *  controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
 *  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
 *
 * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
 * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
 *
 *  When unmasked PSR is blocked as long as the sprite
 *  plane is enabled. skl+ with their universal planes no
 *  longer have a mask bit like this, and no plane being
 *  enabledb blocks PSR.
 *
 * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
 * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
 *
 *  When umasked CURPOS writes trigger a PSR exit. On skl+
 *  this doesn't exit but CURPOS is included in the
 *  PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
 *
 * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
 * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
 *
 *  When unmasked PSR is blocked as long as vblank and/or vsync
 *  interrupt is unmasked in IMR *and* enabled in IER.
 *
 * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
 * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
 *
 *  Selectcs whether PSR exit generates an extra vblank before
 *  the first frame is transmitted. Also note the opposite polarity
 *  if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
 *  unmasked==do not generate the extra vblank).
 *
 *  With DC states enabled the extra vblank happens after link training,
 *  with DC states disabled it happens immediately upuon PSR exit trigger.
 *  No idea as of now why there is a difference. HSW/BDW (which don't
 *  even have DMC) always generate it after link training. Go figure.
 *
 *  Unfortunately CHICKEN_TRANS itself seems to be double buffered
 *  and thus won't latch until the first vblank. So with DC states
 *  enabled the register effectively uses the reset value during DC5
 *  exit+PSR exit sequence, and thus the bit does nothing until
 *  latched by the vblank that it was trying to prevent from being
 *  generated in the first place. So we should probably call this
 *  one a chicken/egg bit instead on skl+.
 *
 *  In standby mode (as opposed to link-off) this makes no difference
 *  as the timing generator keeps running the whole time generating
 *  normal periodic vblanks.
 *
 *  WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
 *  and doing so makes the behaviour match the skl+ reset value.
 *
 * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
 * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
 *
 *  On BDW without this bit is no vblanks whatsoever are
 *  generated after PSR exit. On HSW this has no apparent effect.
 *  WaPsrDPRSUnmaskVBlankInSRD says to set this.
 *
 * The rest of the bits are more self-explanatory and/or
 * irrelevant for normal operation.
 *
 * Description of intel_crtc_state variables. has_psr, has_panel_replay and
 * has_sel_update:
 *
 *  has_psr (alone):                                    PSR1
 *  has_psr + has_sel_update:                           PSR2
 *  has_psr + has_panel_replay:                         Panel Replay
 *  has_psr + has_panel_replay + has_sel_update:        Panel Replay Selective Update
 *
 * Description of some intel_psr variables. enabled, panel_replay_enabled,
 * sel_update_enabled
 *
 *  enabled (alone):                                            PSR1
 *  enabled + sel_update_enabled:                               PSR2
 *  enabled + panel_replay_enabled:                             Panel Replay
 *  enabled + panel_replay_enabled + sel_update_enabled:        Panel Replay SU
 */

#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
                           (intel_dp)->psr.source_support)

bool intel_encoder_can_psr(struct intel_encoder *encoder)
{
        if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
                return CAN_PSR(enc_to_intel_dp(encoder)) ||
                       CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
        else
                return false;
}

bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state)
{
        /*
         * For PSR/PR modes only eDP requires the AUX IO power to be enabled whenever
         * the output is enabled. For non-eDP outputs the main link is always
         * on, hence it doesn't require the HW initiated AUX wake-up signaling used
         * for eDP.
         *
         * TODO:
         * - Consider leaving AUX IO disabled for eDP / PR as well, in case
         *   the ALPM with main-link off mode is not enabled.
         * - Leave AUX IO enabled for DP / PR, once support for ALPM with
         *   main-link off mode is added for it and this mode gets enabled.
         */
        return intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
               intel_encoder_can_psr(encoder);
}

static bool psr_global_enabled(struct intel_dp *intel_dp)
{
        struct intel_connector *connector = intel_dp->attached_connector;

        switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
        case I915_PSR_DEBUG_DEFAULT:
                return intel_dp_is_edp(intel_dp) ?
                        connector->panel.vbt.psr.enable : true;
        case I915_PSR_DEBUG_DISABLE:
                return false;
        default:
                return true;
        }
}

static bool sel_update_global_enabled(struct intel_dp *intel_dp)
{
        switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
        case I915_PSR_DEBUG_DISABLE:
        case I915_PSR_DEBUG_FORCE_PSR1:
                return false;
        default:
                return true;
        }
}

static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
                display->params.enable_panel_replay;
}

static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
                EDP_PSR_ERROR(intel_dp->psr.transcoder);
}

static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
                EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
}

static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
                EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
}

static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
                EDP_PSR_MASK(intel_dp->psr.transcoder);
}

static i915_reg_t psr_ctl_reg(struct intel_display *display,
                              enum transcoder cpu_transcoder)
{
        if (DISPLAY_VER(display) >= 8)
                return EDP_PSR_CTL(display, cpu_transcoder);
        else
                return HSW_SRD_CTL;
}

static i915_reg_t psr_debug_reg(struct intel_display *display,
                                enum transcoder cpu_transcoder)
{
        if (DISPLAY_VER(display) >= 8)
                return EDP_PSR_DEBUG(display, cpu_transcoder);
        else
                return HSW_SRD_DEBUG;
}

static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
                                   enum transcoder cpu_transcoder)
{
        if (DISPLAY_VER(display) >= 8)
                return EDP_PSR_PERF_CNT(display, cpu_transcoder);
        else
                return HSW_SRD_PERF_CNT;
}

static i915_reg_t psr_status_reg(struct intel_display *display,
                                 enum transcoder cpu_transcoder)
{
        if (DISPLAY_VER(display) >= 8)
                return EDP_PSR_STATUS(display, cpu_transcoder);
        else
                return HSW_SRD_STATUS;
}

static i915_reg_t psr_imr_reg(struct intel_display *display,
                              enum transcoder cpu_transcoder)
{
        if (DISPLAY_VER(display) >= 12)
                return TRANS_PSR_IMR(display, cpu_transcoder);
        else
                return EDP_PSR_IMR;
}

static i915_reg_t psr_iir_reg(struct intel_display *display,
                              enum transcoder cpu_transcoder)
{
        if (DISPLAY_VER(display) >= 12)
                return TRANS_PSR_IIR(display, cpu_transcoder);
        else
                return EDP_PSR_IIR;
}

static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
                                  enum transcoder cpu_transcoder)
{
        if (DISPLAY_VER(display) >= 8)
                return EDP_PSR_AUX_CTL(display, cpu_transcoder);
        else
                return HSW_SRD_AUX_CTL;
}

static i915_reg_t psr_aux_data_reg(struct intel_display *display,
                                   enum transcoder cpu_transcoder, int i)
{
        if (DISPLAY_VER(display) >= 8)
                return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
        else
                return HSW_SRD_AUX_DATA(i);
}

static void psr_irq_control(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 mask;

        if (intel_dp->psr.panel_replay_enabled)
                return;

        mask = psr_irq_psr_error_bit_get(intel_dp);
        if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
                mask |= psr_irq_post_exit_bit_get(intel_dp) |
                        psr_irq_pre_entry_bit_get(intel_dp);

        intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
                     psr_irq_mask_get(intel_dp), ~mask);
}

static void psr_event_print(struct intel_display *display,
                            u32 val, bool sel_update_enabled)
{
        drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
        if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
                drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
        if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
                drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
        if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
                drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
        if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
                drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
        if (val & PSR_EVENT_GRAPHICS_RESET)
                drm_dbg_kms(display->drm, "\tGraphics reset\n");
        if (val & PSR_EVENT_PCH_INTERRUPT)
                drm_dbg_kms(display->drm, "\tPCH interrupt\n");
        if (val & PSR_EVENT_MEMORY_UP)
                drm_dbg_kms(display->drm, "\tMemory up\n");
        if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
                drm_dbg_kms(display->drm, "\tFront buffer modification\n");
        if (val & PSR_EVENT_WD_TIMER_EXPIRE)
                drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
        if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
                drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
        if (val & PSR_EVENT_REGISTER_UPDATE)
                drm_dbg_kms(display->drm, "\tRegister updated\n");
        if (val & PSR_EVENT_HDCP_ENABLE)
                drm_dbg_kms(display->drm, "\tHDCP enabled\n");
        if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
                drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
        if (val & PSR_EVENT_VBI_ENABLE)
                drm_dbg_kms(display->drm, "\tVBI enabled\n");
        if (val & PSR_EVENT_LPSP_MODE_EXIT)
                drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
        if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
                drm_dbg_kms(display->drm, "\tPSR disabled\n");
}

void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        ktime_t time_ns =  ktime_get();

        if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
                intel_dp->psr.last_entry_attempt = time_ns;
                drm_dbg_kms(display->drm,
                            "[transcoder %s] PSR entry attempt in 2 vblanks\n",
                            transcoder_name(cpu_transcoder));
        }

        if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
                intel_dp->psr.last_exit = time_ns;
                drm_dbg_kms(display->drm,
                            "[transcoder %s] PSR exit completed\n",
                            transcoder_name(cpu_transcoder));

                if (DISPLAY_VER(display) >= 9) {
                        u32 val;

                        val = intel_de_rmw(display,
                                           PSR_EVENT(display, cpu_transcoder),
                                           0, 0);

                        psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
                }
        }

        if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
                drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
                         transcoder_name(cpu_transcoder));

                intel_dp->psr.irq_aux_error = true;

                /*
                 * If this interruption is not masked it will keep
                 * interrupting so fast that it prevents the scheduled
                 * work to run.
                 * Also after a PSR error, we don't want to arm PSR
                 * again so we don't care about unmask the interruption
                 * or unset irq_aux_error.
                 */
                intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
                             0, psr_irq_psr_error_bit_get(intel_dp));

                queue_work(display->wq.unordered, &intel_dp->psr.work);
        }
}

static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        u8 val = 8; /* assume the worst if we can't read the value */

        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
                val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
        else
                drm_dbg_kms(display->drm,
                            "Unable to get sink synchronization latency, assuming 8 frames\n");
        return val;
}

static void _psr_compute_su_granularity(struct intel_dp *intel_dp,
                                        struct intel_connector *connector)
{
        struct intel_display *display = to_intel_display(intel_dp);
        ssize_t r;
        __le16 w;
        u8 y;

        /*
         * If sink don't have specific granularity requirements set legacy
         * ones.
         */
        if (!(connector->dp.psr_caps.dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
                /* As PSR2 HW sends full lines, we do not care about x granularity */
                w = cpu_to_le16(4);
                y = 4;
                goto exit;
        }

        r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, sizeof(w));
        if (r != sizeof(w))
                drm_dbg_kms(display->drm,
                            "Unable to read selective update x granularity\n");
        /*
         * Spec says that if the value read is 0 the default granularity should
         * be used instead.
         */
        if (r != sizeof(w) || w == 0)
                w = cpu_to_le16(4);

        r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
        if (r != 1) {
                drm_dbg_kms(display->drm,
                            "Unable to read selective update y granularity\n");
                y = 4;
        }
        if (y == 0)
                y = 1;

exit:
        connector->dp.psr_caps.su_w_granularity = le16_to_cpu(w);
        connector->dp.psr_caps.su_y_granularity = y;
}

static enum intel_panel_replay_dsc_support
compute_pr_dsc_support(struct intel_connector *connector)
{
        u8 pr_dsc_mode;
        u8 val;

        val = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
        pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);

        switch (pr_dsc_mode) {
        case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY:
                return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY;
        case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED:
                return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE;
        default:
                MISSING_CASE(pr_dsc_mode);
                fallthrough;
        case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED:
        case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED:
                return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
        }
}

static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)
{
        switch (dsc_support) {
        case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED:
                return "not supported";
        case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY:
                return "full frame only";
        case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE:
                return "selective update";
        default:
                MISSING_CASE(dsc_support);
                return "n/a";
        };
}

static void _panel_replay_compute_su_granularity(struct intel_connector *connector)
{
        u16 w;
        u8 y;

        if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
               DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED)) {
                w = 4;
                y = 4;
                goto exit;
        }

        /*
         * Spec says that if the value read is 0 the default granularity should
         * be used instead.
         */
        w = le16_to_cpu(*(__le16 *)&connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_X_GRANULARITY)]) ? : 4;
        y = connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_Y_GRANULARITY)] ? : 1;

exit:
        connector->dp.panel_replay_caps.su_w_granularity = w;
        connector->dp.panel_replay_caps.su_y_granularity = y;
}

static void _panel_replay_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
        struct intel_display *display = to_intel_display(intel_dp);
        int ret;

        /* TODO: Enable Panel Replay on MST once it's properly implemented. */
        if (intel_dp->mst_detect == DRM_DP_MST)
                return;

        ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
                                    &connector->dp.panel_replay_caps.dpcd,
                                    sizeof(connector->dp.panel_replay_caps.dpcd));
        if (ret < 0)
                return;

        if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
              DP_PANEL_REPLAY_SUPPORT))
                return;

        if (intel_dp_is_edp(intel_dp)) {
                if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
                        drm_dbg_kms(display->drm,
                                    "Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
                        return;
                }

                if (!(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
                      DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
                        drm_dbg_kms(display->drm,
                                    "Panel doesn't support early transport, eDP Panel Replay not possible\n");
                        return;
                }
        }

        connector->dp.panel_replay_caps.support = true;
        intel_dp->psr.sink_panel_replay_support = true;

        if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
            DP_PANEL_REPLAY_SU_SUPPORT) {
                connector->dp.panel_replay_caps.su_support = true;

                _panel_replay_compute_su_granularity(connector);
        }

        connector->dp.panel_replay_caps.dsc_support = compute_pr_dsc_support(connector);

        drm_dbg_kms(display->drm,
                    "Panel replay %sis supported by panel (in DSC mode: %s)\n",
                    connector->dp.panel_replay_caps.su_support ?
                    "selective_update " : "",
                    panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
}

static void _psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
        struct intel_display *display = to_intel_display(intel_dp);
        int ret;

        ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, connector->dp.psr_caps.dpcd,
                                    sizeof(connector->dp.psr_caps.dpcd));
        if (ret < 0)
                return;

        if (!connector->dp.psr_caps.dpcd[0])
                return;

        drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
                    connector->dp.psr_caps.dpcd[0]);

        if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
                drm_dbg_kms(display->drm,
                            "PSR support not currently available for this panel\n");
                return;
        }

        if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
                drm_dbg_kms(display->drm,
                            "Panel lacks power state control, PSR cannot be enabled\n");
                return;
        }

        connector->dp.psr_caps.support = true;
        intel_dp->psr.sink_support = true;

        connector->dp.psr_caps.sync_latency = intel_dp_get_sink_sync_latency(intel_dp);

        if (DISPLAY_VER(display) >= 9 &&
            connector->dp.psr_caps.dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
                bool y_req = connector->dp.psr_caps.dpcd[1] &
                             DP_PSR2_SU_Y_COORDINATE_REQUIRED;

                /*
                 * All panels that supports PSR version 03h (PSR2 +
                 * Y-coordinate) can handle Y-coordinates in VSC but we are
                 * only sure that it is going to be used when required by the
                 * panel. This way panel is capable to do selective update
                 * without a aux frame sync.
                 *
                 * To support PSR version 02h and PSR version 03h without
                 * Y-coordinate requirement panels we would need to enable
                 * GTC first.
                 */
                connector->dp.psr_caps.su_support = y_req &&
                        intel_alpm_aux_wake_supported(intel_dp);
                drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
                            connector->dp.psr_caps.su_support ? "" : "not ");
        }

        if (connector->dp.psr_caps.su_support)
                _psr_compute_su_granularity(intel_dp, connector);
}

void intel_psr_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
        _psr_init_dpcd(intel_dp, connector);

        _panel_replay_init_dpcd(intel_dp, connector);
}

static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 aux_clock_divider, aux_ctl;
        /* write DP_SET_POWER=D0 */
        static const u8 aux_msg[] = {
                [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
                [1] = (DP_SET_POWER >> 8) & 0xff,
                [2] = DP_SET_POWER & 0xff,
                [3] = 1 - 1,
                [4] = DP_SET_POWER_D0,
        };
        int i;

        BUILD_BUG_ON(sizeof(aux_msg) > 20);
        for (i = 0; i < sizeof(aux_msg); i += 4)
                intel_de_write(display,
                               psr_aux_data_reg(display, cpu_transcoder, i >> 2),
                               intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));

        aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);

        /* Start with bits set for DDI_AUX_CTL register */
        aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
                                             aux_clock_divider);

        /* Select only valid bits for SRD_AUX_CTL */
        aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
                EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
                EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
                EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;

        intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
                       aux_ctl);
}

static bool psr2_su_region_et_valid(struct intel_connector *connector, bool panel_replay)
{
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_display *display = to_intel_display(intel_dp);

        if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
            intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
                return false;

        return panel_replay ?
                connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
                DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
                connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
}

static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
                                      const struct intel_crtc_state *crtc_state)
{
        u8 val = DP_PANEL_REPLAY_ENABLE |
                DP_PANEL_REPLAY_VSC_SDP_CRC_EN |
                DP_PANEL_REPLAY_UNRECOVERABLE_ERROR_EN |
                DP_PANEL_REPLAY_RFB_STORAGE_ERROR_EN |
                DP_PANEL_REPLAY_ACTIVE_FRAME_CRC_ERROR_EN;
        u8 panel_replay_config2 = DP_PANEL_REPLAY_CRC_VERIFICATION;

        if (crtc_state->has_sel_update)
                val |= DP_PANEL_REPLAY_SU_ENABLE;

        if (crtc_state->enable_psr2_su_region_et)
                val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;

        if (crtc_state->req_psr2_sdp_prior_scanline)
                panel_replay_config2 |=
                        DP_PANEL_REPLAY_SU_REGION_SCANLINE_CAPTURE;

        drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG, val);

        drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG2,
                           panel_replay_config2);
}

static void _psr_enable_sink(struct intel_dp *intel_dp,
                             const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        u8 val = 0;

        if (crtc_state->has_sel_update) {
                val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
        } else {
                if (intel_dp->psr.link_standby)
                        val |= DP_PSR_MAIN_LINK_ACTIVE;

                if (DISPLAY_VER(display) >= 8)
                        val |= DP_PSR_CRC_VERIFICATION;
        }

        if (crtc_state->req_psr2_sdp_prior_scanline)
                val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;

        if (crtc_state->enable_psr2_su_region_et)
                val |= DP_PANEL_REPLAY_ENABLE_SU_REGION_ET;

        if (intel_dp->psr.entry_setup_frames > 0)
                val |= DP_PSR_FRAME_CAPTURE;
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);

        val |= DP_PSR_ENABLE;
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, val);
}

static void intel_psr_enable_sink(struct intel_dp *intel_dp,
                                  const struct intel_crtc_state *crtc_state)
{
        intel_alpm_enable_sink(intel_dp, crtc_state);

        crtc_state->has_panel_replay ?
                _panel_replay_enable_sink(intel_dp, crtc_state) :
                _psr_enable_sink(intel_dp, crtc_state);

        if (intel_dp_is_edp(intel_dp))
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}

void intel_psr_panel_replay_enable_sink(struct intel_dp *intel_dp)
{
        /*
         * NOTE: We might want to trigger mode set when
         * disabling/enabling Panel Replay via debugfs interface to
         * ensure this bit is cleared/set accordingly.
         */
        if (CAN_PANEL_REPLAY(intel_dp) && panel_replay_global_enabled(intel_dp))
                drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
                                   DP_PANEL_REPLAY_ENABLE);
}

static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_connector *connector = intel_dp->attached_connector;
        u32 val = 0;

        if (DISPLAY_VER(display) >= 11)
                val |= EDP_PSR_TP4_TIME_0us;

        if (display->params.psr_safest_params) {
                val |= EDP_PSR_TP1_TIME_2500us;
                val |= EDP_PSR_TP2_TP3_TIME_2500us;
                goto check_tp3_sel;
        }

        if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
                val |= EDP_PSR_TP1_TIME_0us;
        else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
                val |= EDP_PSR_TP1_TIME_100us;
        else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
                val |= EDP_PSR_TP1_TIME_500us;
        else
                val |= EDP_PSR_TP1_TIME_2500us;

        if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
                val |= EDP_PSR_TP2_TP3_TIME_0us;
        else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
                val |= EDP_PSR_TP2_TP3_TIME_100us;
        else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
                val |= EDP_PSR_TP2_TP3_TIME_500us;
        else
                val |= EDP_PSR_TP2_TP3_TIME_2500us;

        /*
         * WA 0479: hsw,bdw
         * "Do not skip both TP1 and TP2/TP3"
         */
        if (DISPLAY_VER(display) < 9 &&
            connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
            connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
                val |= EDP_PSR_TP2_TP3_TIME_100us;

check_tp3_sel:
        if (intel_dp_source_supports_tps3(display) &&
            drm_dp_tps3_supported(intel_dp->dpcd))
                val |= EDP_PSR_TP_TP1_TP3;
        else
                val |= EDP_PSR_TP_TP1_TP2;

        return val;
}

static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_connector *connector = intel_dp->attached_connector;
        int idle_frames;

        /* Let's use 6 as the minimum to cover all known cases including the
         * off-by-one issue that HW has in some cases.
         */
        idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
        idle_frames = max(idle_frames, connector->dp.psr_caps.sync_latency + 1);

        if (drm_WARN_ON(display->drm, idle_frames > 0xf))
                idle_frames = 0xf;

        return idle_frames;
}

static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        u32 current_dc_state = intel_display_power_get_current_dc_state(display);
        struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
        struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);

        return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
                current_dc_state != DC_STATE_EN_UPTO_DC6) ||
                intel_dp->psr.active_non_psr_pipes ||
                READ_ONCE(vblank->enabled);
}

static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 max_sleep_time = 0x1f;
        u32 val = EDP_PSR_ENABLE;

        val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));

        if (DISPLAY_VER(display) < 20)
                val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);

        if (display->platform.haswell)
                val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;

        if (intel_dp->psr.link_standby)
                val |= EDP_PSR_LINK_STANDBY;

        val |= intel_psr1_get_tp_time(intel_dp);

        if (DISPLAY_VER(display) >= 8)
                val |= EDP_PSR_CRC_ENABLE;

        if (DISPLAY_VER(display) >= 20)
                val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);

        intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
                     ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);

        /* Wa_16025596647 */
        if ((DISPLAY_VER(display) == 20 ||
             IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
            is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
                intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
                                                                       intel_dp->psr.pipe,
                                                                       true);
}

static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_connector *connector = intel_dp->attached_connector;
        u32 val = 0;

        if (display->params.psr_safest_params)
                return EDP_PSR2_TP2_TIME_2500us;

        if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
            connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
                val |= EDP_PSR2_TP2_TIME_50us;
        else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
                val |= EDP_PSR2_TP2_TIME_100us;
        else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
                val |= EDP_PSR2_TP2_TIME_500us;
        else
                val |= EDP_PSR2_TP2_TIME_2500us;

        return val;
}

static int
psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines)
{
        return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12;
}

static int psr2_block_count(struct intel_dp *intel_dp)
{
        return psr2_block_count_lines(intel_dp->psr.io_wake_lines,
                                      intel_dp->psr.fast_wake_lines) / 4;
}

static u8 frames_before_su_entry(struct intel_dp *intel_dp)
{
        struct intel_connector *connector = intel_dp->attached_connector;
        u8 frames_before_su_entry;

        frames_before_su_entry = max_t(u8,
                                       connector->dp.psr_caps.sync_latency + 1,
                                       2);

        /* Entry setup frames must be at least 1 less than frames before SU entry */
        if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
                frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;

        return frames_before_su_entry;
}

static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_psr *psr = &intel_dp->psr;
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;

        if (intel_dp_is_edp(intel_dp) && psr->sel_update_enabled) {
                u32 val = psr->su_region_et_enabled ?
                        LNL_EDP_PSR2_SU_REGION_ET_ENABLE : 0;

                if (intel_dp->psr.req_psr2_sdp_prior_scanline)
                        val |= EDP_PSR2_SU_SDP_SCANLINE;

                intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
                               val);
        }

        intel_de_rmw(display,
                     PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
                     0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);

        intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
                     TRANS_DP2_PANEL_REPLAY_ENABLE);
}

static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 val = EDP_PSR2_ENABLE;
        u32 psr_val = 0;
        u8 idle_frames;

        /* Wa_16025596647 */
        if ((DISPLAY_VER(display) == 20 ||
             IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
            is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
                idle_frames = 0;
        else
                idle_frames = psr_compute_idle_frames(intel_dp);
        val |= EDP_PSR2_IDLE_FRAMES(idle_frames);

        if (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)
                val |= EDP_SU_TRACK_ENABLE;

        if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
                val |= EDP_Y_COORDINATE_ENABLE;

        val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));

        val |= intel_psr2_get_tp_time(intel_dp);

        if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
                if (psr2_block_count(intel_dp) > 2)
                        val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
                else
                        val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
        }

        /* Wa_22012278275:adl-p */
        if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
                static const u8 map[] = {
                        2, /* 5 lines */
                        1, /* 6 lines */
                        0, /* 7 lines */
                        3, /* 8 lines */
                        6, /* 9 lines */
                        5, /* 10 lines */
                        4, /* 11 lines */
                        7, /* 12 lines */
                };
                /*
                 * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
                 * comments below for more information
                 */
                int tmp;

                tmp = map[intel_dp->psr.io_wake_lines -
                          TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
                val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);

                tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
                val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
        } else if (DISPLAY_VER(display) >= 20) {
                val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
        } else if (DISPLAY_VER(display) >= 12) {
                val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
                val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
        } else if (DISPLAY_VER(display) >= 9) {
                val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
                val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
        }

        if (intel_dp->psr.req_psr2_sdp_prior_scanline)
                val |= EDP_PSR2_SU_SDP_SCANLINE;

        if (DISPLAY_VER(display) >= 20)
                psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);

        if (intel_dp->psr.psr2_sel_fetch_enabled) {
                u32 tmp;

                tmp = intel_de_read(display,
                                    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
                drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
        } else if (HAS_PSR2_SEL_FETCH(display)) {
                intel_de_write(display,
                               PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
        }

        if (intel_dp->psr.su_region_et_enabled)
                val |= LNL_EDP_PSR2_SU_REGION_ET_ENABLE;

        /*
         * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
         * recommending keep this bit unset while PSR2 is enabled.
         */
        intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);

        intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
}

static bool
transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
        if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
                return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
        else if (DISPLAY_VER(display) >= 12)
                return cpu_transcoder == TRANSCODER_A;
        else if (DISPLAY_VER(display) >= 9)
                return cpu_transcoder == TRANSCODER_EDP;
        else
                return false;
}

static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
{
        if (!crtc_state->hw.active)
                return 0;

        return DIV_ROUND_UP(1000 * 1000,
                            drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
}

static void psr2_program_idle_frames(struct intel_dp *intel_dp,
                                     u32 idle_frames)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;

        intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
                     EDP_PSR2_IDLE_FRAMES_MASK,
                     EDP_PSR2_IDLE_FRAMES(idle_frames));
}

static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        psr2_program_idle_frames(intel_dp, 0);
        intel_display_power_set_target_dc_state(display, DC_STATE_EN_DC3CO);
}

static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        intel_display_power_set_target_dc_state(display, DC_STATE_EN_UPTO_DC6);
        psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
}

static void tgl_dc3co_disable_work(struct work_struct *work)
{
        struct intel_dp *intel_dp =
                container_of(work, typeof(*intel_dp), psr.dc3co_work.work);

        mutex_lock(&intel_dp->psr.lock);
        /* If delayed work is pending, it is not idle */
        if (delayed_work_pending(&intel_dp->psr.dc3co_work))
                goto unlock;

        tgl_psr2_disable_dc3co(intel_dp);
unlock:
        mutex_unlock(&intel_dp->psr.lock);
}

static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
{
        if (!intel_dp->psr.dc3co_exitline)
                return;

        cancel_delayed_work(&intel_dp->psr.dc3co_work);
        /* Before PSR2 exit disallow dc3co*/
        tgl_psr2_disable_dc3co(intel_dp);
}

static bool
dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
        enum port port = dig_port->base.port;

        if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
                return pipe <= PIPE_B && port <= PORT_B;
        else
                return pipe == PIPE_A && port == PORT_A;
}

static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
                                  struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
        struct i915_power_domains *power_domains = &display->power.domains;
        u32 exit_scanlines;

        /*
         * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
         * disable DC3CO until the changed dc3co activating/deactivating sequence
         * is applied. B.Specs:49196
         */
        return;

        /*
         * DMC's DC3CO exit mechanism has an issue with Selective Fecth
         * TODO: when the issue is addressed, this restriction should be removed.
         */
        if (crtc_state->enable_psr2_sel_fetch)
                return;

        if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
                return;

        if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
                return;

        /* Wa_16011303918:adl-p */
        if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
                return;

        /*
         * DC3CO Exit time 200us B.Spec 49196
         * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
         */
        exit_scanlines =
                intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;

        if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
                return;

        crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
}

static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
                                              struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);

        if (!display->params.enable_psr2_sel_fetch &&
            intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
                drm_dbg_kms(display->drm,
                            "PSR2 sel fetch not enabled, disabled by parameter\n");
                return false;
        }

        return crtc_state->enable_psr2_sel_fetch = true;
}

static bool psr2_granularity_check(struct intel_crtc_state *crtc_state,
                                   struct intel_connector *connector)
{
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_display *display = to_intel_display(intel_dp);
        const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
        const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
        const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
        u16 y_granularity = 0;
        u16 sink_y_granularity = crtc_state->has_panel_replay ?
                connector->dp.panel_replay_caps.su_y_granularity :
                connector->dp.psr_caps.su_y_granularity;
        u16 sink_w_granularity;

        if (crtc_state->has_panel_replay)
                sink_w_granularity = connector->dp.panel_replay_caps.su_w_granularity ==
                        DP_PANEL_REPLAY_FULL_LINE_GRANULARITY ?
                        crtc_hdisplay : connector->dp.panel_replay_caps.su_w_granularity;
        else
                sink_w_granularity = connector->dp.psr_caps.su_w_granularity;

        /* PSR2 HW only send full lines so we only need to validate the width */
        if (crtc_hdisplay % sink_w_granularity)
                return false;

        if (crtc_vdisplay % sink_y_granularity)
                return false;

        /* HW tracking is only aligned to 4 lines */
        if (!crtc_state->enable_psr2_sel_fetch)
                return sink_y_granularity == 4;

        /*
         * adl_p and mtl platforms have 1 line granularity.
         * For other platforms with SW tracking we can adjust the y coordinates
         * to match sink requirement if multiple of 4.
         */
        if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14)
                y_granularity = sink_y_granularity;
        else if (sink_y_granularity <= 2)
                y_granularity = 4;
        else if ((sink_y_granularity % 4) == 0)
                y_granularity = sink_y_granularity;

        if (y_granularity == 0 || crtc_vdisplay % y_granularity)
                return false;

        if (crtc_state->dsc.compression_enable &&
            vdsc_cfg->slice_height % y_granularity)
                return false;

        crtc_state->su_y_granularity = y_granularity;
        return true;
}

static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
                                                        struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
        u32 hblank_total, hblank_ns, req_ns;

        hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
        hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);

        /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
        req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);

        if ((hblank_ns - req_ns) > 100)
                return true;

        /* Not supported <13 / Wa_22012279113:adl-p */
        if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
                return false;

        crtc_state->req_psr2_sdp_prior_scanline = true;
        return true;
}

static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
                                        struct drm_connector_state *conn_state,
                                        const struct drm_display_mode *adjusted_mode)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        int psr_setup_time = drm_dp_psr_setup_time(connector->dp.psr_caps.dpcd);
        int entry_setup_frames = 0;

        if (psr_setup_time < 0) {
                drm_dbg_kms(display->drm,
                            "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
                            connector->dp.psr_caps.dpcd[1]);
                return -ETIME;
        }

        if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
            adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
                if (DISPLAY_VER(display) >= 20) {
                        /* setup entry frames can be up to 3 frames */
                        entry_setup_frames = 1;
                        drm_dbg_kms(display->drm,
                                    "PSR setup entry frames %d\n",
                                    entry_setup_frames);
                } else {
                        drm_dbg_kms(display->drm,
                                    "PSR condition failed: PSR setup time (%d us) too long\n",
                                    psr_setup_time);
                        return -ETIME;
                }
        }

        return entry_setup_frames;
}

static
int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state,
                                       bool needs_panel_replay,
                                       bool needs_sel_update)
{
        struct intel_display *display = to_intel_display(crtc_state);

        if (!crtc_state->has_psr)
                return 0;

        /* Wa_14015401596 */
        if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
                return 1;

        /* Rest is for SRD_STATUS needed on LunarLake and onwards */
        if (DISPLAY_VER(display) < 20)
                return 0;

        /*
         * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
         *
         * To deterministically capture the transition of the state machine
         * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
         * one line after the non-delayed V. Blank.
         *
         * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
         * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
         * - TRANS_VTOTAL[ Vertical Active ])
         *
         * SRD_STATUS is used only by PSR1 on PantherLake.
         * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
         */

        if (DISPLAY_VER(display) >= 30 && (needs_panel_replay ||
                                           needs_sel_update))
                return 0;
        else if (DISPLAY_VER(display) < 30 && (needs_sel_update ||
                                               intel_crtc_has_type(crtc_state,
                                                                   INTEL_OUTPUT_EDP)))
                return 0;
        else
                return 1;
}

static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state,
                                        int vblank,
                                        int wake_lines)
{
        if (crtc_state->req_psr2_sdp_prior_scanline)
                vblank -= 1;

        /* Vblank >= PSR2_CTL Block Count Number maximum line count */
        if (vblank < wake_lines)
                return false;

        return true;
}

static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
                                       const struct intel_crtc_state *crtc_state,
                                       bool aux_less,
                                       bool needs_panel_replay,
                                       bool needs_sel_update)
{
        struct intel_display *display = to_intel_display(intel_dp);
        int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
                crtc_state->hw.adjusted_mode.crtc_vblank_start;
        int wake_lines;
        int scl = _intel_psr_min_set_context_latency(crtc_state,
                                                     needs_panel_replay,
                                                     needs_sel_update);
        vblank -= scl;

        if (aux_less)
                wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
        else
                wake_lines = DISPLAY_VER(display) < 20 ?
                        psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
                                               crtc_state->alpm_state.fast_wake_lines) :
                        crtc_state->alpm_state.io_wake_lines;

        /*
         * Guardband has not been computed yet, so we conservatively check if the
         * full vblank duration is sufficient to accommodate wake line requirements
         * for PSR features like Panel Replay and Selective Update.
         *
         * Once the actual guardband is available, a more accurate validation is
         * performed in intel_psr_compute_config_late(), and PSR features are
         * disabled if wake lines exceed the available guardband.
         */
        return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines);
}

static bool alpm_config_valid(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state,
                              bool aux_less,
                              bool needs_panel_replay,
                              bool needs_sel_update)
{
        struct intel_display *display = to_intel_display(intel_dp);

        if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
                drm_dbg_kms(display->drm,
                            "PSR2/Panel Replay  not enabled, Unable to use long enough wake times\n");
                return false;
        }

        if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less,
                                        needs_panel_replay, needs_sel_update)) {
                drm_dbg_kms(display->drm,
                            "PSR2/Panel Replay not enabled, too short vblank time\n");
                return false;
        }

        return true;
}

static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                                    struct intel_crtc_state *crtc_state,
                                    struct drm_connector_state *conn_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
        int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
        int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;

        if (!connector->dp.psr_caps.su_support || display->params.enable_psr == 1)
                return false;

        /* JSL and EHL only supports eDP 1.3 */
        if (display->platform.jasperlake || display->platform.elkhartlake) {
                drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
                return false;
        }

        /* Wa_16011181250 */
        if (display->platform.rocketlake || display->platform.alderlake_s ||
            display->platform.dg2) {
                drm_dbg_kms(display->drm,
                            "PSR2 is defeatured for this platform\n");
                return false;
        }

        if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
                drm_dbg_kms(display->drm,
                            "PSR2 not completely functional in this stepping\n");
                return false;
        }

        if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
                drm_dbg_kms(display->drm,
                            "PSR2 not supported in transcoder %s\n",
                            transcoder_name(crtc_state->cpu_transcoder));
                return false;
        }

        /*
         * DSC and PSR2 cannot be enabled simultaneously. If a requested
         * resolution requires DSC to be enabled, priority is given to DSC
         * over PSR2.
         */
        if (crtc_state->dsc.compression_enable &&
            (DISPLAY_VER(display) < 14 && !display->platform.alderlake_p)) {
                drm_dbg_kms(display->drm,
                            "PSR2 cannot be enabled since DSC is enabled\n");
                return false;
        }

        if (DISPLAY_VER(display) >= 20) {
                psr_max_h = crtc_hdisplay;
                psr_max_v = crtc_vdisplay;
                max_bpp = crtc_state->pipe_bpp;
        } else if (IS_DISPLAY_VER(display, 12, 14)) {
                psr_max_h = 5120;
                psr_max_v = 3200;
                max_bpp = 30;
        } else if (IS_DISPLAY_VER(display, 10, 11)) {
                psr_max_h = 4096;
                psr_max_v = 2304;
                max_bpp = 24;
        } else if (DISPLAY_VER(display) == 9) {
                psr_max_h = 3640;
                psr_max_v = 2304;
                max_bpp = 24;
        }

        if (crtc_state->pipe_bpp > max_bpp) {
                drm_dbg_kms(display->drm,
                            "PSR2 not enabled, pipe bpp %d > max supported %d\n",
                            crtc_state->pipe_bpp, max_bpp);
                return false;
        }

        /* Wa_16011303918:adl-p */
        if (crtc_state->vrr.enable &&
            display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
                drm_dbg_kms(display->drm,
                            "PSR2 not enabled, not compatible with HW stepping + VRR\n");
                return false;
        }

        if (!alpm_config_valid(intel_dp, crtc_state, false, false, true))
                return false;

        if (!crtc_state->enable_psr2_sel_fetch &&
            (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
                drm_dbg_kms(display->drm,
                            "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
                            crtc_hdisplay, crtc_vdisplay,
                            psr_max_h, psr_max_v);
                return false;
        }

        tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);

        return true;
}

static bool intel_sel_update_config_valid(struct intel_crtc_state *crtc_state,
                                          struct drm_connector_state *conn_state)
{
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_display *display = to_intel_display(intel_dp);

        if (HAS_PSR2_SEL_FETCH(display) &&
            !intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
            !HAS_PSR_HW_TRACKING(display)) {
                drm_dbg_kms(display->drm,
                            "Selective update not enabled, selective fetch not valid and no HW tracking available\n");
                goto unsupported;
        }

        if (!sel_update_global_enabled(intel_dp)) {
                drm_dbg_kms(display->drm,
                            "Selective update disabled by flag\n");
                goto unsupported;
        }

        if (!crtc_state->has_panel_replay && !intel_psr2_config_valid(intel_dp, crtc_state,
                                                                      conn_state))
                goto unsupported;

        if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
                drm_dbg_kms(display->drm,
                            "Selective update not enabled, SDP indication do not fit in hblank\n");
                goto unsupported;
        }

        if (crtc_state->has_panel_replay) {
                if (DISPLAY_VER(display) < 14)
                        goto unsupported;

                if (!connector->dp.panel_replay_caps.su_support)
                        goto unsupported;

                if (intel_dsc_enabled_on_link(crtc_state) &&
                    connector->dp.panel_replay_caps.dsc_support !=
                    INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
                        drm_dbg_kms(display->drm,
                                    "Selective update with Panel Replay not enabled because it's not supported with DSC\n");
                        goto unsupported;
                }
        }

        if (crtc_state->crc_enabled) {
                drm_dbg_kms(display->drm,
                            "Selective update not enabled because it would inhibit pipe CRC calculation\n");
                goto unsupported;
        }

        if (!psr2_granularity_check(crtc_state, connector)) {
                drm_dbg_kms(display->drm,
                            "Selective update not enabled, SU granularity not compatible\n");
                goto unsupported;
        }

        crtc_state->enable_psr2_su_region_et = psr2_su_region_et_valid(connector,
                                                                       crtc_state->has_panel_replay);

        return true;

unsupported:
        crtc_state->enable_psr2_sel_fetch = false;
        return false;
}

static bool _psr_compute_config(struct intel_dp *intel_dp,
                                struct intel_crtc_state *crtc_state,
                                struct drm_connector_state *conn_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
        int entry_setup_frames;

        if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
                return false;

        /*
         * Currently PSR doesn't work reliably with VRR enabled.
         */
        if (crtc_state->vrr.enable)
                return false;

        entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, conn_state, adjusted_mode);

        if (entry_setup_frames >= 0) {
                crtc_state->entry_setup_frames = entry_setup_frames;
        } else {
                crtc_state->no_psr_reason = "PSR setup timing not met";
                drm_dbg_kms(display->drm,
                            "PSR condition failed: PSR setup timing not met\n");
                return false;
        }

        return true;
}

static inline bool compute_link_off_after_as_sdp_when_pr_active(struct intel_connector *connector)
{
        return (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
                DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP);
}

static inline bool compute_disable_as_sdp_when_pr_active(struct intel_connector *connector)
{
        return !(connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)] &
                 DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR);
}

static bool _panel_replay_compute_config(struct intel_crtc_state *crtc_state,
                                         const struct drm_connector_state *conn_state)
{
        struct intel_connector *connector =
                to_intel_connector(conn_state->connector);
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_hdcp *hdcp = &connector->hdcp;

        if (!CAN_PANEL_REPLAY(intel_dp))
                return false;

        if (!connector->dp.panel_replay_caps.support)
                return false;

        if (!panel_replay_global_enabled(intel_dp)) {
                drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
                return false;
        }

        if (crtc_state->crc_enabled) {
                drm_dbg_kms(display->drm,
                            "Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
                return false;
        }

        if (intel_dsc_enabled_on_link(crtc_state) &&
            connector->dp.panel_replay_caps.dsc_support ==
            INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
                drm_dbg_kms(display->drm,
                            "Panel Replay not enabled because it's not supported with DSC\n");
                return false;
        }

        crtc_state->link_off_after_as_sdp_when_pr_active = compute_link_off_after_as_sdp_when_pr_active(connector);
        crtc_state->disable_as_sdp_when_pr_active = compute_disable_as_sdp_when_pr_active(connector);

        if (!intel_dp_is_edp(intel_dp))
                return true;

        /* Remaining checks are for eDP only */

        if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A &&
            to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_B)
                return false;

        /* 128b/132b Panel Replay is not supported on eDP */
        if (intel_dp_is_uhbr(crtc_state)) {
                drm_dbg_kms(display->drm,
                            "Panel Replay is not supported with 128b/132b\n");
                return false;
        }

        /* HW will not allow Panel Replay on eDP when HDCP enabled */
        if (conn_state->content_protection ==
            DRM_MODE_CONTENT_PROTECTION_DESIRED ||
            (conn_state->content_protection ==
             DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
             DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
                drm_dbg_kms(display->drm,
                            "Panel Replay is not supported with HDCP\n");
                return false;
        }

        if (!alpm_config_valid(intel_dp, crtc_state, true, true, false))
                return false;

        return true;
}

static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
                                           struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);

        return (DISPLAY_VER(display) == 20 && crtc_state->entry_setup_frames > 0 &&
                !crtc_state->has_sel_update);
}

static
void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
                                 struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
        struct intel_crtc *crtc;
        u8 active_pipes = 0;

        /* Wa_16025596647 */
        if (DISPLAY_VER(display) != 20 &&
            !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
                return;

        /* Not needed by Panel Replay  */
        if (crtc_state->has_panel_replay)
                return;

        /* We ignore possible secondary PSR/Panel Replay capable eDP */
        for_each_intel_crtc(display->drm, crtc)
                active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;

        active_pipes = intel_calc_active_pipes(state, active_pipes);

        crtc_state->active_non_psr_pipes = active_pipes &
                ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}

void intel_psr_compute_config(struct intel_dp *intel_dp,
                              struct intel_crtc_state *crtc_state,
                              struct drm_connector_state *conn_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_connector *connector = to_intel_connector(conn_state->connector);
        const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;

        if (!psr_global_enabled(intel_dp)) {
                drm_dbg_kms(display->drm, "PSR disabled by flag\n");
                return;
        }

        if (intel_dp->psr.sink_not_reliable) {
                drm_dbg_kms(display->drm,
                            "PSR sink implementation is not reliable\n");
                return;
        }

        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
                drm_dbg_kms(display->drm,
                            "PSR condition failed: Interlaced mode enabled\n");
                return;
        }

        /*
         * FIXME figure out what is wrong with PSR+joiner and
         * fix it. Presumably something related to the fact that
         * PSR is a transcoder level feature.
         */
        if (crtc_state->joiner_pipes) {
                drm_dbg_kms(display->drm,
                            "PSR disabled due to joiner\n");
                return;
        }

        /* Only used for state verification. */
        crtc_state->panel_replay_dsc_support = connector->dp.panel_replay_caps.dsc_support;
        crtc_state->has_panel_replay = _panel_replay_compute_config(crtc_state, conn_state);

        crtc_state->has_psr = crtc_state->has_panel_replay ? true :
                _psr_compute_config(intel_dp, crtc_state, conn_state);

        if (!crtc_state->has_psr)
                return;

        crtc_state->has_sel_update = intel_sel_update_config_valid(crtc_state, conn_state);
}

void intel_psr_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
{
        struct intel_display *display = to_intel_display(encoder);
        struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        struct intel_dp *intel_dp;
        u32 val;

        if (!dig_port)
                return;

        intel_dp = &dig_port->dp;
        if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
                return;

        mutex_lock(&intel_dp->psr.lock);
        if (!intel_dp->psr.enabled)
                goto unlock;

        if (intel_dp->psr.panel_replay_enabled) {
                pipe_config->has_psr = pipe_config->has_panel_replay = true;
        } else {
                /*
                 * Not possible to read EDP_PSR/PSR2_CTL registers as it is
                 * enabled/disabled because of frontbuffer tracking and others.
                 */
                pipe_config->has_psr = true;
        }

        pipe_config->has_sel_update = intel_dp->psr.sel_update_enabled;
        pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);

        if (!intel_dp->psr.sel_update_enabled)
                goto unlock;

        if (HAS_PSR2_SEL_FETCH(display)) {
                val = intel_de_read(display,
                                    PSR2_MAN_TRK_CTL(display, cpu_transcoder));
                if (val & PSR2_MAN_TRK_CTL_ENABLE)
                        pipe_config->enable_psr2_sel_fetch = true;
        }

        pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;

        if (DISPLAY_VER(display) >= 12) {
                val = intel_de_read(display,
                                    TRANS_EXITLINE(display, cpu_transcoder));
                pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
        }
unlock:
        mutex_unlock(&intel_dp->psr.lock);
}

static void intel_psr_activate(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;

        drm_WARN_ON(display->drm,
                    transcoder_has_psr2(display, cpu_transcoder) &&
                    intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);

        drm_WARN_ON(display->drm,
                    intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);

        drm_WARN_ON(display->drm, intel_dp->psr.active);

        drm_WARN_ON(display->drm, !intel_dp->psr.enabled);

        lockdep_assert_held(&intel_dp->psr.lock);

        /* psr1, psr2 and panel-replay are mutually exclusive.*/
        if (intel_dp->psr.panel_replay_enabled)
                dg2_activate_panel_replay(intel_dp);
        else if (intel_dp->psr.sel_update_enabled)
                hsw_activate_psr2(intel_dp);
        else
                hsw_activate_psr1(intel_dp);

        intel_dp->psr.active = true;
        intel_dp->psr.no_psr_reason = NULL;
}

/*
 * Wa_16013835468
 * Wa_14015648006
 */
static void wm_optimization_wa(struct intel_dp *intel_dp,
                               const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum pipe pipe = intel_dp->psr.pipe;
        bool activate = false;

        /* Wa_14015648006 */
        if (IS_DISPLAY_VER(display, 11, 14) && crtc_state->wm_level_disabled)
                activate = true;

        /* Wa_16013835468 */
        if (DISPLAY_VER(display) == 12 &&
            crtc_state->hw.adjusted_mode.crtc_vblank_start !=
            crtc_state->hw.adjusted_mode.crtc_vdisplay)
                activate = true;

        if (activate)
                intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
                             0, LATENCY_REPORTING_REMOVED(pipe));
        else
                intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
                             LATENCY_REPORTING_REMOVED(pipe), 0);
}

static void intel_psr_enable_source(struct intel_dp *intel_dp,
                                    const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 mask = 0;

        /*
         * Only HSW and BDW have PSR AUX registers that need to be setup.
         * SKL+ use hardcoded values PSR AUX transactions
         */
        if (DISPLAY_VER(display) < 9)
                hsw_psr_setup_aux(intel_dp);

        /*
         * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
         * mask LPSP to avoid dependency on other drivers that might block
         * runtime_pm besides preventing  other hw tracking issues now we
         * can rely on frontbuffer tracking.
         *
         * From bspec prior LunarLake:
         * Only PSR_MASK[Mask FBC modify] and PSR_MASK[Mask Hotplug] are used in
         * panel replay mode.
         *
         * From bspec beyod LunarLake:
         * Panel Replay on DP: No bits are applicable
         * Panel Replay on eDP: All bits are applicable
         */
        if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
                mask = EDP_PSR_DEBUG_MASK_HPD;

        if (intel_dp_is_edp(intel_dp)) {
                mask |= EDP_PSR_DEBUG_MASK_MEMUP;

                /*
                 * For some unknown reason on HSW non-ULT (or at least on
                 * Dell Latitude E6540) external displays start to flicker
                 * when PSR is enabled on the eDP. SR/PC6 residency is much
                 * higher than should be possible with an external display.
                 * As a workaround leave LPSP unmasked to prevent PSR entry
                 * when external displays are active.
                 */
                if (DISPLAY_VER(display) >= 8 || display->platform.haswell_ult)
                        mask |= EDP_PSR_DEBUG_MASK_LPSP;

                if (DISPLAY_VER(display) < 20)
                        mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;

                /*
                 * No separate pipe reg write mask on hsw/bdw, so have to unmask all
                 * registers in order to keep the CURSURFLIVE tricks working :(
                 */
                if (IS_DISPLAY_VER(display, 9, 10))
                        mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;

                /* allow PSR with sprite enabled */
                if (display->platform.haswell)
                        mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
        }

        intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);

        psr_irq_control(intel_dp);

        /*
         * TODO: if future platforms supports DC3CO in more than one
         * transcoder, EXITLINE will need to be unset when disabling PSR
         */
        if (intel_dp->psr.dc3co_exitline)
                intel_de_rmw(display,
                             TRANS_EXITLINE(display, cpu_transcoder),
                             EXITLINE_MASK,
                             intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);

        if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
                intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
                             intel_dp->psr.psr2_sel_fetch_enabled ?
                             IGNORE_PSR2_HW_TRACKING : 0);

        /*
         * Wa_16013835468
         * Wa_14015648006
         */
        wm_optimization_wa(intel_dp, crtc_state);

        if (intel_dp->psr.sel_update_enabled) {
                if (DISPLAY_VER(display) == 9)
                        intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder), 0,
                                     PSR2_VSC_ENABLE_PROG_HEADER |
                                     PSR2_ADD_VERTICAL_LINE_COUNT);

                /*
                 * Wa_16014451276:adlp,mtl[a0,b0]
                 * All supported adlp panels have 1-based X granularity, this may
                 * cause issues if non-supported panels are used.
                 */
                if (!intel_dp->psr.panel_replay_enabled &&
                    (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
                     display->platform.alderlake_p))
                        intel_de_rmw(display, CHICKEN_TRANS(display, cpu_transcoder),
                                     0, ADLP_1_BASED_X_GRANULARITY);

                /* Wa_16012604467:adlp,mtl[a0,b0] */
                if (!intel_dp->psr.panel_replay_enabled &&
                    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
                        intel_de_rmw(display,
                                     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
                                     0,
                                     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
                else if (display->platform.alderlake_p)
                        intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
                                     CLKGATE_DIS_MISC_DMASC_GATING_DIS);
        }

        /* Wa_16025596647 */
        if ((DISPLAY_VER(display) == 20 ||
             IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
            !intel_dp->psr.panel_replay_enabled)
                intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);

        intel_alpm_configure(intel_dp, crtc_state);
}

static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 val;

        if (intel_dp->psr.panel_replay_enabled)
                goto no_err;

        /*
         * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
         * will still keep the error set even after the reset done in the
         * irq_preinstall and irq_uninstall hooks.
         * And enabling in this situation cause the screen to freeze in the
         * first time that PSR HW tries to activate so lets keep PSR disabled
         * to avoid any rendering problems.
         */
        val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
        val &= psr_irq_psr_error_bit_get(intel_dp);
        if (val) {
                intel_dp->psr.sink_not_reliable = true;
                drm_dbg_kms(display->drm,
                            "PSR interruption error set, not enabling PSR\n");
                return false;
        }

no_err:
        return true;
}

static void intel_psr_enable_locked(struct intel_dp *intel_dp,
                                    const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        u32 val;

        drm_WARN_ON(display->drm, intel_dp->psr.enabled);

        intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
        intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
        intel_dp->psr.busy_frontbuffer_bits = 0;
        intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
        intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
        /* DC5/DC6 requires at least 6 idle frames */
        val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
        intel_dp->psr.dc3co_exit_delay = val;
        intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
        intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
        intel_dp->psr.su_region_et_enabled = crtc_state->enable_psr2_su_region_et;
        intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
        intel_dp->psr.req_psr2_sdp_prior_scanline =
                crtc_state->req_psr2_sdp_prior_scanline;
        intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
        intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
        intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
        intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
        intel_dp->psr.entry_setup_frames = crtc_state->entry_setup_frames;

        if (!psr_interrupt_error_check(intel_dp))
                return;

        if (intel_dp->psr.panel_replay_enabled)
                drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
        else
                drm_dbg_kms(display->drm, "Enabling PSR%s\n",
                            intel_dp->psr.sel_update_enabled ? "2" : "1");

        /*
         * Enabling sink PSR/Panel Replay here only for PSR. Panel Replay enable
         * bit is already written at this point. Sink ALPM is enabled here for
         * PSR and Panel Replay. See
         * intel_psr_panel_replay_enable_sink. Modifiers/options:
         *  - Selective Update
         *  - Region Early Transport
         *  - Selective Update Region Scanline Capture
         *  - VSC_SDP_CRC
         *  - HPD on different Errors
         *  - CRC verification
         * are written for PSR and Panel Replay here.
         */
        intel_psr_enable_sink(intel_dp, crtc_state);

        if (intel_dp_is_edp(intel_dp))
                intel_snps_phy_update_psr_power_state(&dig_port->base, true);

        intel_psr_enable_source(intel_dp, crtc_state);
        intel_dp->psr.enabled = true;
        intel_dp->psr.pause_counter = 0;

        /*
         * Link_ok is sticky and set here on PSR enable. We can assume link
         * training is complete as we never continue to PSR enable with
         * untrained link. Link_ok is kept as set until first short pulse
         * interrupt. This is targeted to workaround panels stating bad link
         * after PSR is enabled.
         */
        intel_dp->psr.link_ok = true;

        intel_psr_activate(intel_dp);
}

static void intel_psr_exit(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        u32 val;

        if (!intel_dp->psr.active) {
                if (transcoder_has_psr2(display, cpu_transcoder)) {
                        val = intel_de_read(display,
                                            EDP_PSR2_CTL(display, cpu_transcoder));
                        drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
                }

                val = intel_de_read(display,
                                    psr_ctl_reg(display, cpu_transcoder));
                drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);

                return;
        }

        if (intel_dp->psr.panel_replay_enabled) {
                intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
                             TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
        } else if (intel_dp->psr.sel_update_enabled) {
                tgl_disallow_dc3co_on_psr2_exit(intel_dp);

                val = intel_de_rmw(display,
                                   EDP_PSR2_CTL(display, cpu_transcoder),
                                   EDP_PSR2_ENABLE, 0);

                drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
        } else {
                if ((DISPLAY_VER(display) == 20 ||
                     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
                        intel_dp->psr.pkg_c_latency_used)
                        intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
                                                                       intel_dp->psr.pipe,
                                                                       false);

                val = intel_de_rmw(display,
                                   psr_ctl_reg(display, cpu_transcoder),
                                   EDP_PSR_ENABLE, 0);

                drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
        }
        intel_dp->psr.active = false;
}

static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        i915_reg_t psr_status;
        u32 psr_status_mask;

        if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
                                          intel_dp->psr.panel_replay_enabled)) {
                psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
                psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
                psr_status = psr_status_reg(display, cpu_transcoder);
                psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
        }

        /* Wait till PSR is idle */
        if (intel_de_wait_for_clear_ms(display, psr_status,
                                       psr_status_mask, 2000))
                drm_err(display->drm, "Timed out waiting PSR idle state\n");
}

static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;

        lockdep_assert_held(&intel_dp->psr.lock);

        if (!intel_dp->psr.enabled)
                return;

        if (intel_dp->psr.panel_replay_enabled)
                drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
        else
                drm_dbg_kms(display->drm, "Disabling PSR%s\n",
                            intel_dp->psr.sel_update_enabled ? "2" : "1");

        intel_psr_exit(intel_dp);
        intel_psr_wait_exit_locked(intel_dp);

        /*
         * Wa_16013835468
         * Wa_14015648006
         */
        if (DISPLAY_VER(display) >= 11)
                intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
                             LATENCY_REPORTING_REMOVED(intel_dp->psr.pipe), 0);

        if (intel_dp->psr.sel_update_enabled) {
                /* Wa_16012604467:adlp,mtl[a0,b0] */
                if (!intel_dp->psr.panel_replay_enabled &&
                    IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0))
                        intel_de_rmw(display,
                                     MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
                                     MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
                else if (display->platform.alderlake_p)
                        intel_de_rmw(display, CLKGATE_DIS_MISC,
                                     CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
        }

        if (intel_dp_is_edp(intel_dp))
                intel_snps_phy_update_psr_power_state(&dp_to_dig_port(intel_dp)->base, false);

        if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp))
                intel_alpm_disable(intel_dp);

        /* Disable PSR on Sink */
        if (!intel_dp->psr.panel_replay_enabled) {
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

                if (intel_dp->psr.sel_update_enabled)
                        drm_dp_dpcd_writeb(&intel_dp->aux,
                                           DP_RECEIVER_ALPM_CONFIG, 0);
        }

        /* Wa_16025596647 */
        if ((DISPLAY_VER(display) == 20 ||
             IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
            !intel_dp->psr.panel_replay_enabled)
                intel_dmc_block_pkgc(display, intel_dp->psr.pipe, false);

        intel_dp->psr.enabled = false;
        intel_dp->psr.panel_replay_enabled = false;
        intel_dp->psr.sel_update_enabled = false;
        intel_dp->psr.psr2_sel_fetch_enabled = false;
        intel_dp->psr.su_region_et_enabled = false;
        intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
        intel_dp->psr.active_non_psr_pipes = 0;
        intel_dp->psr.pkg_c_latency_used = 0;
}

/**
 * intel_psr_disable - Disable PSR
 * @intel_dp: Intel DP
 * @old_crtc_state: old CRTC state
 *
 * This function needs to be called before disabling pipe.
 */
void intel_psr_disable(struct intel_dp *intel_dp,
                       const struct intel_crtc_state *old_crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);

        if (!old_crtc_state->has_psr)
                return;

        if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp) &&
                        !CAN_PANEL_REPLAY(intel_dp)))
                return;

        mutex_lock(&intel_dp->psr.lock);

        intel_psr_disable_locked(intel_dp);

        intel_dp->psr.link_ok = false;

        mutex_unlock(&intel_dp->psr.lock);
        cancel_work_sync(&intel_dp->psr.work);
        cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
}

/**
 * intel_psr_pause - Pause PSR
 * @intel_dp: Intel DP
 *
 * This function need to be called after enabling psr.
 */
void intel_psr_pause(struct intel_dp *intel_dp)
{
        struct intel_psr *psr = &intel_dp->psr;

        if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
                return;

        mutex_lock(&psr->lock);

        if (!psr->enabled) {
                mutex_unlock(&psr->lock);
                return;
        }

        if (intel_dp->psr.pause_counter++ == 0) {
                intel_psr_exit(intel_dp);
                intel_psr_wait_exit_locked(intel_dp);
        }

        mutex_unlock(&psr->lock);

        cancel_work_sync(&psr->work);
        cancel_delayed_work_sync(&psr->dc3co_work);
}

/**
 * intel_psr_resume - Resume PSR
 * @intel_dp: Intel DP
 *
 * This function need to be called after pausing psr.
 */
void intel_psr_resume(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_psr *psr = &intel_dp->psr;

        if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
                return;

        mutex_lock(&psr->lock);

        if (!psr->enabled)
                goto out;

        if (!psr->pause_counter) {
                drm_warn(display->drm, "Unbalanced PSR pause/resume!\n");
                goto out;
        }

        if (--intel_dp->psr.pause_counter == 0)
                intel_psr_activate(intel_dp);

out:
        mutex_unlock(&psr->lock);
}

/**
 * intel_psr_needs_vblank_notification - Check if PSR need vblank enable/disable
 * notification.
 * @crtc_state: CRTC status
 *
 * We need to block DC6 entry in case of Panel Replay as enabling VBI doesn't
 * prevent it in case of Panel Replay. Panel Replay switches main link off on
 * DC entry. This means vblank interrupts are not fired and is a problem if
 * user-space is polling for vblank events. Also Wa_16025596647 needs
 * information when vblank is enabled/disabled.
 */
bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state)
{
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct intel_display *display = to_intel_display(crtc_state);
        struct intel_encoder *encoder;

        for_each_encoder_on_crtc(crtc->base.dev, &crtc->base, encoder) {
                struct intel_dp *intel_dp;

                if (!intel_encoder_is_dp(encoder))
                        continue;

                intel_dp = enc_to_intel_dp(encoder);

                if (!intel_dp_is_edp(intel_dp))
                        continue;

                if (CAN_PANEL_REPLAY(intel_dp))
                        return true;

                if ((DISPLAY_VER(display) == 20 ||
                     IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
                    CAN_PSR(intel_dp))
                        return true;
        }

        return false;
}

/**
 * intel_psr_trigger_frame_change_event - Trigger "Frame Change" event
 * @dsb: DSB context
 * @state: the atomic state
 * @crtc: the CRTC
 *
 * Generate PSR "Frame Change" event.
 */
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
                                          struct intel_atomic_state *state,
                                          struct intel_crtc *crtc)
{
        const struct intel_crtc_state *crtc_state =
                intel_pre_commit_crtc_state(state, crtc);
        struct intel_display *display = to_intel_display(crtc);

        if (crtc_state->has_psr)
                intel_de_write_dsb(display, dsb,
                                   CURSURFLIVE(display, crtc->pipe), 0);
}

/**
 * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR
 * @crtc_state: the crtc state
 *
 * Return minimum SCL lines/delay needed by PSR.
 */
int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state)
{

        return _intel_psr_min_set_context_latency(crtc_state,
                                                  crtc_state->has_panel_replay,
                                                  crtc_state->has_sel_update);
}

static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
        return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ? 0 :
                PSR2_MAN_TRK_CTL_ENABLE;
}

static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
        return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
               ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
               PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}

static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
        return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
               ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
               PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}

static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
        return display->platform.alderlake_p || DISPLAY_VER(display) >= 14 ?
               ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
               PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}

static void intel_psr_force_update(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        /*
         * Display WA #0884: skl+
         * This documented WA for bxt can be safely applied
         * broadly so we can force HW tracking to exit PSR
         * instead of disabling and re-enabling.
         * Workaround tells us to write 0 to CUR_SURFLIVE_A,
         * but it makes more sense write to the current active
         * pipe.
         *
         * This workaround do not exist for platforms with display 10 or newer
         * but testing proved that it works for up display 13, for newer
         * than that testing will be needed.
         */
        intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
}

void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
                                          const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        struct intel_encoder *encoder;

        if (!crtc_state->enable_psr2_sel_fetch)
                return;

        for_each_intel_encoder_mask_with_psr(display->drm, encoder,
                                             crtc_state->uapi.encoder_mask) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                if (!dsb)
                        lockdep_assert_held(&intel_dp->psr.lock);

                if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_cff_enabled)
                        return;
                break;
        }

        intel_de_write_dsb(display, dsb,
                           PSR2_MAN_TRK_CTL(display, cpu_transcoder),
                           crtc_state->psr2_man_track_ctl);

        if (!crtc_state->enable_psr2_su_region_et)
                return;

        intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
                           crtc_state->pipe_srcsz_early_tpt);

        if (!crtc_state->dsc.compression_enable)
                return;

        intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state,
                                             drm_rect_height(&crtc_state->psr2_su_area));
}

static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
                                  bool full_update)
{
        struct intel_display *display = to_intel_display(crtc_state);
        u32 val = man_trk_ctl_enable_bit_get(display);

        /* SF partial frame enable has to be set even on full update */
        val |= man_trk_ctl_partial_frame_bit_get(display);

        if (full_update) {
                val |= man_trk_ctl_continuos_full_frame(display);
                goto exit;
        }

        if (crtc_state->psr2_su_area.y1 == -1)
                goto exit;

        if (display->platform.alderlake_p || DISPLAY_VER(display) >= 14) {
                val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
                val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
        } else {
                drm_WARN_ON(crtc_state->uapi.crtc->dev,
                            crtc_state->psr2_su_area.y1 % 4 ||
                            crtc_state->psr2_su_area.y2 % 4);

                val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(
                        crtc_state->psr2_su_area.y1 / 4 + 1);
                val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(
                        crtc_state->psr2_su_area.y2 / 4 + 1);
        }
exit:
        crtc_state->psr2_man_track_ctl = val;
}

static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
                                          bool full_update)
{
        int width, height;

        if (!crtc_state->enable_psr2_su_region_et || full_update)
                return 0;

        width = drm_rect_width(&crtc_state->psr2_su_area);
        height = drm_rect_height(&crtc_state->psr2_su_area);

        return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
}

static void clip_area_update(struct drm_rect *overlap_damage_area,
                             struct drm_rect *damage_area,
                             struct drm_rect *pipe_src)
{
        if (!drm_rect_intersect(damage_area, pipe_src))
                return;

        if (overlap_damage_area->y1 == -1) {
                overlap_damage_area->y1 = damage_area->y1;
                overlap_damage_area->y2 = damage_area->y2;
                return;
        }

        if (damage_area->y1 < overlap_damage_area->y1)
                overlap_damage_area->y1 = damage_area->y1;

        if (damage_area->y2 > overlap_damage_area->y2)
                overlap_damage_area->y2 = damage_area->y2;
}

static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);
        const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
        u16 y_alignment;
        bool su_area_changed = false;

        /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
        if (crtc_state->dsc.compression_enable &&
            (display->platform.alderlake_p || DISPLAY_VER(display) >= 14))
                y_alignment = vdsc_cfg->slice_height;
        else
                y_alignment = crtc_state->su_y_granularity;

        if (crtc_state->psr2_su_area.y1 % y_alignment) {
                crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
                su_area_changed = true;
        }

        if (crtc_state->psr2_su_area.y2 % y_alignment) {
                crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
                                                y_alignment) + 1) * y_alignment;
                su_area_changed = true;
        }

        return su_area_changed;
}

/*
 * When early transport is in use we need to extend SU area to cover
 * cursor fully when cursor is in SU area.
 */
static void
intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
                                  struct intel_crtc *crtc,
                                  bool *cursor_in_su_area)
{
        struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
        struct intel_plane_state *new_plane_state;
        struct intel_plane *plane;
        int i;

        if (!crtc_state->enable_psr2_su_region_et)
                return;

        for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
                struct drm_rect inter;

                if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
                        continue;

                if (plane->id != PLANE_CURSOR)
                        continue;

                if (!new_plane_state->uapi.visible)
                        continue;

                inter = crtc_state->psr2_su_area;
                if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
                        continue;

                clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
                                 &crtc_state->pipe_src);
                *cursor_in_su_area = true;
        }
}

/*
 * TODO: Not clear how to handle planes with negative position,
 * also planes are not updated if they have a negative X
 * position so for now doing a full update in this cases
 *
 * Plane scaling and rotation is not supported by selective fetch and both
 * properties can change without a modeset, so need to be check at every
 * atomic commit.
 */
static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
{
        if (plane_state->uapi.dst.y1 < 0 ||
            plane_state->uapi.dst.x1 < 0 ||
            plane_state->scaler_id >= 0 ||
            plane_state->hw.rotation != DRM_MODE_ROTATE_0)
                return false;

        return true;
}

/*
 * Check for pipe properties that is not supported by selective fetch.
 *
 * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
 * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
 * enabled and going to the full update path.
 */
static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
{
        if (crtc_state->scaler_state.scaler_id >= 0 ||
            crtc_state->async_flip_planes)
                return false;

        return true;
}

/* Wa 14019834836 */
static void intel_psr_apply_pr_link_on_su_wa(struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);
        struct intel_encoder *encoder;
        int hactive_limit;

        if (crtc_state->psr2_su_area.y1 != 0 ||
            crtc_state->psr2_su_area.y2 != 0)
                return;

        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
                hactive_limit = intel_dp_is_uhbr(crtc_state) ? 1230 : 546;
        else
                hactive_limit = intel_dp_is_uhbr(crtc_state) ? 615 : 273;

        if (crtc_state->hw.adjusted_mode.hdisplay < hactive_limit)
                return;

        for_each_intel_encoder_mask_with_psr(display->drm, encoder,
                                             crtc_state->uapi.encoder_mask) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                if (!intel_dp_is_edp(intel_dp) &&
                    intel_dp->psr.panel_replay_enabled &&
                    intel_dp->psr.sel_update_enabled) {
                        crtc_state->psr2_su_area.y2++;
                        return;
                }
        }
}

static void
intel_psr_apply_su_area_workarounds(struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);

        /* Wa_14014971492 */
        if (!crtc_state->has_panel_replay &&
            ((IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_B0) ||
              display->platform.alderlake_p || display->platform.tigerlake)) &&
            crtc_state->splitter.enable)
                crtc_state->psr2_su_area.y1 = 0;

        /* Wa 14019834836 */
        if (DISPLAY_VER(display) == 30)
                intel_psr_apply_pr_link_on_su_wa(crtc_state);
}

int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
                                struct intel_crtc *crtc)
{
        struct intel_display *display = to_intel_display(state);
        struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
        struct intel_plane_state *new_plane_state, *old_plane_state;
        struct intel_plane *plane;
        bool full_update = false, su_area_changed;
        int i, ret;

        if (!crtc_state->enable_psr2_sel_fetch)
                return 0;

        if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
                full_update = true;
                goto skip_sel_fetch_set_loop;
        }

        crtc_state->psr2_su_area.x1 = 0;
        crtc_state->psr2_su_area.y1 = -1;
        crtc_state->psr2_su_area.x2 = drm_rect_width(&crtc_state->pipe_src);
        crtc_state->psr2_su_area.y2 = -1;

        /*
         * Calculate minimal selective fetch area of each plane and calculate
         * the pipe damaged area.
         * In the next loop the plane selective fetch area will actually be set
         * using whole pipe damaged area.
         */
        for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
                                             new_plane_state, i) {
                struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
                                                      .x2 = INT_MAX };

                if (new_plane_state->hw.crtc != crtc_state->uapi.crtc)
                        continue;

                if (!new_plane_state->uapi.visible &&
                    !old_plane_state->uapi.visible)
                        continue;

                if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
                        full_update = true;
                        break;
                }

                /*
                 * If visibility or plane moved, mark the whole plane area as
                 * damaged as it needs to be complete redraw in the new and old
                 * position.
                 */
                if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
                    !drm_rect_equals(&new_plane_state->uapi.dst,
                                     &old_plane_state->uapi.dst)) {
                        if (old_plane_state->uapi.visible) {
                                damaged_area.y1 = old_plane_state->uapi.dst.y1;
                                damaged_area.y2 = old_plane_state->uapi.dst.y2;
                                clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
                                                 &crtc_state->pipe_src);
                        }

                        if (new_plane_state->uapi.visible) {
                                damaged_area.y1 = new_plane_state->uapi.dst.y1;
                                damaged_area.y2 = new_plane_state->uapi.dst.y2;
                                clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
                                                 &crtc_state->pipe_src);
                        }
                        continue;
                } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
                        /* If alpha changed mark the whole plane area as damaged */
                        damaged_area.y1 = new_plane_state->uapi.dst.y1;
                        damaged_area.y2 = new_plane_state->uapi.dst.y2;
                        clip_area_update(&crtc_state->psr2_su_area, &damaged_area,
                                         &crtc_state->pipe_src);
                        continue;
                }

                src = drm_plane_state_src(&new_plane_state->uapi);
                drm_rect_fp_to_int(&src, &src);

                if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
                                                     &new_plane_state->uapi, &damaged_area))
                        continue;

                damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
                damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
                damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
                damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;

                clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
        }

        /*
         * TODO: For now we are just using full update in case
         * selective fetch area calculation fails. To optimize this we
         * should identify cases where this happens and fix the area
         * calculation for those.
         */
        if (crtc_state->psr2_su_area.y1 == -1) {
                drm_info_once(display->drm,
                              "Selective fetch area calculation failed in pipe %c\n",
                              pipe_name(crtc->pipe));
                full_update = true;
        }

        if (full_update)
                goto skip_sel_fetch_set_loop;

        intel_psr_apply_su_area_workarounds(crtc_state);

        ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
        if (ret)
                return ret;

        do {
                bool cursor_in_su_area;

                /*
                 * Adjust su area to cover cursor fully as necessary
                 * (early transport). This needs to be done after
                 * drm_atomic_add_affected_planes to ensure visible
                 * cursor is added into affected planes even when
                 * cursor is not updated by itself.
                 */
                intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);

                su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state);

                /*
                 * If the cursor was outside the SU area before
                 * alignment, the alignment step (which only expands
                 * SU) may pull the cursor partially inside, so we
                 * must run ET alignment again to fully cover it. But
                 * if the cursor was already fully inside before
                 * alignment, expanding the SU area won't change that,
                 * so no further work is needed.
                 */
                if (cursor_in_su_area)
                        break;
        } while (su_area_changed);

        /*
         * Now that we have the pipe damaged area check if it intersect with
         * every plane, if it does set the plane selective fetch area.
         */
        for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
                                             new_plane_state, i) {
                struct drm_rect *sel_fetch_area, inter;
                struct intel_plane *linked = new_plane_state->planar_linked_plane;

                if (new_plane_state->hw.crtc != crtc_state->uapi.crtc ||
                    !new_plane_state->uapi.visible)
                        continue;

                inter = crtc_state->psr2_su_area;
                sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
                if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
                        sel_fetch_area->y1 = -1;
                        sel_fetch_area->y2 = -1;
                        /*
                         * if plane sel fetch was previously enabled ->
                         * disable it
                         */
                        if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
                                crtc_state->update_planes |= BIT(plane->id);

                        continue;
                }

                if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
                        full_update = true;
                        break;
                }

                sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
                sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
                sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
                crtc_state->update_planes |= BIT(plane->id);

                /*
                 * Sel_fetch_area is calculated for UV plane. Use
                 * same area for Y plane as well.
                 */
                if (linked) {
                        struct intel_plane_state *linked_new_plane_state;
                        struct drm_rect *linked_sel_fetch_area;

                        linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
                        if (IS_ERR(linked_new_plane_state))
                                return PTR_ERR(linked_new_plane_state);

                        linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
                        linked_sel_fetch_area->y1 = sel_fetch_area->y1;
                        linked_sel_fetch_area->y2 = sel_fetch_area->y2;
                        crtc_state->update_planes |= BIT(linked->id);
                }
        }

skip_sel_fetch_set_loop:
        if (full_update)
                clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src,
                                 &crtc_state->pipe_src);

        psr2_man_trk_ctl_calc(crtc_state, full_update);
        crtc_state->pipe_srcsz_early_tpt =
                psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
        return 0;
}

void intel_psr2_panic_force_full_update(const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
        u32 val = man_trk_ctl_enable_bit_get(display);

        /* SF partial frame enable has to be set even on full update */
        val |= man_trk_ctl_partial_frame_bit_get(display);
        val |= man_trk_ctl_continuos_full_frame(display);

        /* Directly write the register */
        intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val);

        if (!crtc_state->enable_psr2_su_region_et)
                return;

        intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0);
}

void intel_psr_pre_plane_update(struct intel_atomic_state *state,
                                struct intel_crtc *crtc)
{
        struct intel_display *display = to_intel_display(state);
        const struct intel_crtc_state *old_crtc_state =
                intel_atomic_get_old_crtc_state(state, crtc);
        const struct intel_crtc_state *new_crtc_state =
                intel_atomic_get_new_crtc_state(state, crtc);
        struct intel_encoder *encoder;

        if (!HAS_PSR(display))
                return;

        for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
                                             old_crtc_state->uapi.encoder_mask) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                struct intel_psr *psr = &intel_dp->psr;

                mutex_lock(&psr->lock);

                if (!new_crtc_state->has_psr)
                        psr->no_psr_reason = new_crtc_state->no_psr_reason;

                if (psr->enabled) {
                        /*
                         * Reasons to disable:
                         * - PSR disabled in new state
                         * - All planes will go inactive
                         * - Changing between PSR versions
                         * - Region Early Transport changing
                         * - Display WA #1136: skl, bxt
                         */
                        if (intel_crtc_needs_modeset(new_crtc_state) ||
                            new_crtc_state->update_m_n ||
                            new_crtc_state->update_lrr ||
                            !new_crtc_state->has_psr ||
                            !new_crtc_state->active_planes ||
                            new_crtc_state->has_sel_update != psr->sel_update_enabled ||
                            new_crtc_state->enable_psr2_su_region_et != psr->su_region_et_enabled ||
                            new_crtc_state->has_panel_replay != psr->panel_replay_enabled ||
                            (DISPLAY_VER(display) < 11 && new_crtc_state->wm_level_disabled))
                                intel_psr_disable_locked(intel_dp);
                        else if (new_crtc_state->wm_level_disabled)
                                /* Wa_14015648006 */
                                wm_optimization_wa(intel_dp, new_crtc_state);
                }

                mutex_unlock(&psr->lock);
        }
}

static void
verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);

        if (!crtc_state->has_panel_replay)
                return;

        drm_WARN_ON(display->drm,
                    intel_dsc_enabled_on_link(crtc_state) &&
                    crtc_state->panel_replay_dsc_support ==
                    INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED);
}

void intel_psr_post_plane_update(struct intel_atomic_state *state,
                                 struct intel_crtc *crtc)
{
        struct intel_display *display = to_intel_display(state);
        const struct intel_crtc_state *crtc_state =
                intel_atomic_get_new_crtc_state(state, crtc);
        struct intel_encoder *encoder;

        if (!crtc_state->has_psr)
                return;

        verify_panel_replay_dsc_state(crtc_state);

        for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
                                             crtc_state->uapi.encoder_mask) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                struct intel_psr *psr = &intel_dp->psr;
                bool keep_disabled = false;

                mutex_lock(&psr->lock);

                drm_WARN_ON(display->drm,
                            psr->enabled && !crtc_state->active_planes);

                if (psr->sink_not_reliable)
                        keep_disabled = true;

                if (!crtc_state->active_planes) {
                        psr->no_psr_reason = "All planes inactive";
                        keep_disabled = true;
                }

                /* Display WA #1136: skl, bxt */
                if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) {
                        psr->no_psr_reason = "Workaround #1136 for skl, bxt";
                        keep_disabled = true;
                }

                if (!psr->enabled && !keep_disabled)
                        intel_psr_enable_locked(intel_dp, crtc_state);
                else if (psr->enabled && !crtc_state->wm_level_disabled)
                        /* Wa_14015648006 */
                        wm_optimization_wa(intel_dp, crtc_state);

                /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
                if (crtc_state->crc_enabled && psr->enabled)
                        intel_psr_force_update(intel_dp);

                /*
                 * Clear possible busy bits in case we have
                 * invalidate -> flip -> flush sequence.
                 */
                intel_dp->psr.busy_frontbuffer_bits = 0;

                mutex_unlock(&psr->lock);
        }
}

/*
 * From bspec: Panel Self Refresh (BDW+)
 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
 * defensive enough to cover everything.
 */
#define PSR_IDLE_TIMEOUT_MS 50

static int
_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
                                   struct intel_dsb *dsb)
{
        struct intel_display *display = to_intel_display(new_crtc_state);
        enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;

        /*
         * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
         * As all higher states has bit 4 of PSR2 state set we can just wait for
         * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
         */
        if (dsb) {
                intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
                               EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
                               PSR_IDLE_TIMEOUT_MS * 1000 / 200);
                return true;
        }

        return intel_de_wait_for_clear_ms(display,
                                       EDP_PSR2_STATUS(display, cpu_transcoder),
                                       EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
                                       PSR_IDLE_TIMEOUT_MS);
}

static int
_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
                                   struct intel_dsb *dsb)
{
        struct intel_display *display = to_intel_display(new_crtc_state);
        enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;

        if (dsb) {
                intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
                               EDP_PSR_STATUS_STATE_MASK, 0, 200,
                               PSR_IDLE_TIMEOUT_MS * 1000 / 200);
                return true;
        }

        return intel_de_wait_for_clear_ms(display,
                                       psr_status_reg(display, cpu_transcoder),
                                       EDP_PSR_STATUS_STATE_MASK,
                                       PSR_IDLE_TIMEOUT_MS);
}

/**
 * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
 * @new_crtc_state: new CRTC state
 *
 * This function is expected to be called from pipe_update_start() where it is
 * not expected to race with PSR enable or disable.
 */
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
{
        struct intel_display *display = to_intel_display(new_crtc_state);
        struct intel_encoder *encoder;

        if (!new_crtc_state->has_psr)
                return;

        for_each_intel_encoder_mask_with_psr(display->drm, encoder,
                                             new_crtc_state->uapi.encoder_mask) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                int ret;

                lockdep_assert_held(&intel_dp->psr.lock);

                if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
                        continue;

                if (intel_dp->psr.sel_update_enabled)
                        ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
                                                                 NULL);
                else
                        ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
                                                                 NULL);

                if (ret)
                        drm_err(display->drm,
                                "PSR wait timed out, atomic update may fail\n");
        }
}

void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
                                 const struct intel_crtc_state *new_crtc_state)
{
        if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
                return;

        if (new_crtc_state->has_sel_update)
                _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
        else
                _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
}

static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        i915_reg_t reg;
        u32 mask;
        int err;

        if (!intel_dp->psr.enabled)
                return false;

        if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
                                          intel_dp->psr.panel_replay_enabled)) {
                reg = EDP_PSR2_STATUS(display, cpu_transcoder);
                mask = EDP_PSR2_STATUS_STATE_MASK;
        } else {
                reg = psr_status_reg(display, cpu_transcoder);
                mask = EDP_PSR_STATUS_STATE_MASK;
        }

        mutex_unlock(&intel_dp->psr.lock);

        err = intel_de_wait_for_clear_ms(display, reg, mask, 50);
        if (err)
                drm_err(display->drm,
                        "Timed out waiting for PSR Idle for re-enable\n");

        /* After the unlocked wait, verify that PSR is still wanted! */
        mutex_lock(&intel_dp->psr.lock);
        return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
}

static int intel_psr_fastset_force(struct intel_display *display)
{
        struct drm_connector_list_iter conn_iter;
        struct drm_modeset_acquire_ctx ctx;
        struct drm_atomic_state *state;
        struct drm_connector *conn;
        int err = 0;

        state = drm_atomic_state_alloc(display->drm);
        if (!state)
                return -ENOMEM;

        drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);

        state->acquire_ctx = &ctx;
        to_intel_atomic_state(state)->internal = true;

retry:
        drm_connector_list_iter_begin(display->drm, &conn_iter);
        drm_for_each_connector_iter(conn, &conn_iter) {
                struct drm_connector_state *conn_state;
                struct drm_crtc_state *crtc_state;

                if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
                        continue;

                conn_state = drm_atomic_get_connector_state(state, conn);
                if (IS_ERR(conn_state)) {
                        err = PTR_ERR(conn_state);
                        break;
                }

                if (!conn_state->crtc)
                        continue;

                crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
                if (IS_ERR(crtc_state)) {
                        err = PTR_ERR(crtc_state);
                        break;
                }

                /* Mark mode as changed to trigger a pipe->update() */
                crtc_state->mode_changed = true;
        }
        drm_connector_list_iter_end(&conn_iter);

        if (err == 0)
                err = drm_atomic_commit(state);

        if (err == -EDEADLK) {
                drm_atomic_state_clear(state);
                err = drm_modeset_backoff(&ctx);
                if (!err)
                        goto retry;
        }

        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
        drm_atomic_state_put(state);

        return err;
}

int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
        struct intel_display *display = to_intel_display(intel_dp);
        const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
        const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
                                        I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
        u32 old_mode, old_disable_bits;
        int ret;

        if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
                    I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
                    I915_PSR_DEBUG_MODE_MASK) ||
            mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
                drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
                return -EINVAL;
        }

        ret = mutex_lock_interruptible(&intel_dp->psr.lock);
        if (ret)
                return ret;

        old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
        old_disable_bits = intel_dp->psr.debug &
                (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
                 I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);

        intel_dp->psr.debug = val;

        /*
         * Do it right away if it's already enabled, otherwise it will be done
         * when enabling the source.
         */
        if (intel_dp->psr.enabled)
                psr_irq_control(intel_dp);

        mutex_unlock(&intel_dp->psr.lock);

        if (old_mode != mode || old_disable_bits != disable_bits)
                ret = intel_psr_fastset_force(display);

        return ret;
}

static void intel_psr_handle_irq(struct intel_dp *intel_dp)
{
        struct intel_psr *psr = &intel_dp->psr;

        intel_psr_disable_locked(intel_dp);
        psr->sink_not_reliable = true;
        /* let's make sure that sink is awaken */
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}

static void intel_psr_work(struct work_struct *work)
{
        struct intel_dp *intel_dp =
                container_of(work, typeof(*intel_dp), psr.work);

        mutex_lock(&intel_dp->psr.lock);

        if (!intel_dp->psr.enabled)
                goto unlock;

        if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
                intel_psr_handle_irq(intel_dp);
                goto unlock;
        }

        if (intel_dp->psr.pause_counter)
                goto unlock;

        /*
         * We have to make sure PSR is ready for re-enable
         * otherwise it keeps disabled until next full enable/disable cycle.
         * PSR might take some time to get fully disabled
         * and be ready for re-enable.
         */
        if (!__psr_wait_for_idle_locked(intel_dp))
                goto unlock;

        /*
         * The delayed work can race with an invalidate hence we need to
         * recheck. Since psr_flush first clears this and then reschedules we
         * won't ever miss a flush when bailing out here.
         */
        if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
                goto unlock;

        intel_psr_activate(intel_dp);
unlock:
        mutex_unlock(&intel_dp->psr.lock);
}

static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;

        if (!intel_dp->psr.psr2_sel_fetch_enabled)
                return;

        if (DISPLAY_VER(display) >= 20)
                intel_de_write(display, LNL_SFF_CTL(cpu_transcoder),
                               LNL_SFF_CTL_SF_SINGLE_FULL_FRAME);
        else
                intel_de_write(display,
                               PSR2_MAN_TRK_CTL(display, cpu_transcoder),
                               man_trk_ctl_enable_bit_get(display) |
                               man_trk_ctl_partial_frame_bit_get(display) |
                               man_trk_ctl_single_full_frame_bit_get(display) |
                               man_trk_ctl_continuos_full_frame(display));
}

static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
                if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
                        intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
                        intel_psr_configure_full_frame_update(intel_dp);
                }

                intel_psr_force_update(intel_dp);
        } else {
                intel_psr_exit(intel_dp);
        }
}

/**
 * intel_psr_invalidate - Invalidate PSR
 * @display: display device
 * @frontbuffer_bits: frontbuffer plane tracking bits
 * @origin: which operation caused the invalidate
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
 */
void intel_psr_invalidate(struct intel_display *display,
                          unsigned frontbuffer_bits, enum fb_op_origin origin)
{
        struct intel_encoder *encoder;

        if (origin == ORIGIN_FLIP)
                return;

        for_each_intel_encoder_with_psr(display->drm, encoder) {
                unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                mutex_lock(&intel_dp->psr.lock);
                if (!intel_dp->psr.enabled) {
                        mutex_unlock(&intel_dp->psr.lock);
                        continue;
                }

                pipe_frontbuffer_bits &=
                        INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
                intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;

                if (pipe_frontbuffer_bits)
                        _psr_invalidate_handle(intel_dp);

                mutex_unlock(&intel_dp->psr.lock);
        }
}
/*
 * When we will be completely rely on PSR2 S/W tracking in future,
 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
 * event also therefore tgl_dc3co_flush_locked() require to be changed
 * accordingly in future.
 */
static void
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
                       enum fb_op_origin origin)
{
        struct intel_display *display = to_intel_display(intel_dp);

        if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
            !intel_dp->psr.active)
                return;

        /*
         * At every frontbuffer flush flip event modified delay of delayed work,
         * when delayed work schedules that means display has been idle.
         */
        if (!(frontbuffer_bits &
            INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
                return;

        tgl_psr2_enable_dc3co(intel_dp);
        mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work,
                         intel_dp->psr.dc3co_exit_delay);
}

static void _psr_flush_handle(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);

        if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
                /* Selective fetch prior LNL */
                if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
                        /* can we turn CFF off? */
                        if (intel_dp->psr.busy_frontbuffer_bits == 0)
                                intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
                }

                /*
                 * Still keep cff bit enabled as we don't have proper SU
                 * configuration in case update is sent for any reason after
                 * sff bit gets cleared by the HW on next vblank.
                 *
                 * NOTE: Setting cff bit is not needed for LunarLake onwards as
                 * we have own register for SFF bit and we are not overwriting
                 * existing SU configuration
                 */
                intel_psr_configure_full_frame_update(intel_dp);

                intel_psr_force_update(intel_dp);
        } else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
                /*
                 * PSR1 on all platforms
                 * PSR2 HW tracking
                 * Panel Replay Full frame update
                 */
                intel_psr_force_update(intel_dp);
        } else {
                /* Selective update LNL onwards */
                intel_psr_exit(intel_dp);
        }

        if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
                queue_work(display->wq.unordered, &intel_dp->psr.work);
}

/**
 * intel_psr_flush - Flush PSR
 * @display: display device
 * @frontbuffer_bits: frontbuffer plane tracking bits
 * @origin: which operation caused the flush
 *
 * Since the hardware frontbuffer tracking has gaps we need to integrate
 * with the software frontbuffer tracking. This function gets called every
 * time frontbuffer rendering has completed and flushed out to memory. PSR
 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
 *
 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
 */
void intel_psr_flush(struct intel_display *display,
                     unsigned frontbuffer_bits, enum fb_op_origin origin)
{
        struct intel_encoder *encoder;

        for_each_intel_encoder_with_psr(display->drm, encoder) {
                unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                mutex_lock(&intel_dp->psr.lock);
                if (!intel_dp->psr.enabled) {
                        mutex_unlock(&intel_dp->psr.lock);
                        continue;
                }

                pipe_frontbuffer_bits &=
                        INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
                intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;

                /*
                 * If the PSR is paused by an explicit intel_psr_paused() call,
                 * we have to ensure that the PSR is not activated until
                 * intel_psr_resume() is called.
                 */
                if (intel_dp->psr.pause_counter)
                        goto unlock;

                if (origin == ORIGIN_FLIP ||
                    (origin == ORIGIN_CURSOR_UPDATE &&
                     !intel_dp->psr.psr2_sel_fetch_enabled)) {
                        tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
                        goto unlock;
                }

                if (pipe_frontbuffer_bits == 0)
                        goto unlock;

                /* By definition flush = invalidate + flush */
                _psr_flush_handle(intel_dp);
unlock:
                mutex_unlock(&intel_dp->psr.lock);
        }
}

/**
 * intel_psr_init - Init basic PSR work and mutex.
 * @intel_dp: Intel DP
 *
 * This function is called after the initializing connector.
 * (the initializing of connector treats the handling of connector capabilities)
 * And it initializes basic PSR stuff for each DP Encoder.
 */
void intel_psr_init(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_connector *connector = intel_dp->attached_connector;
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);

        if (!(HAS_PSR(display) || HAS_DP20(display)))
                return;

        /*
         * HSW spec explicitly says PSR is tied to port A.
         * BDW+ platforms have a instance of PSR registers per transcoder but
         * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
         * than eDP one.
         * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
         * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
         * But GEN12 supports a instance of PSR registers per transcoder.
         */
        if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
                drm_dbg_kms(display->drm,
                            "PSR condition failed: Port not supported\n");
                return;
        }

        if ((HAS_DP20(display) && !intel_dp_is_edp(intel_dp)) ||
            DISPLAY_VER(display) >= 20)
                intel_dp->psr.source_panel_replay_support = true;

        if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
                intel_dp->psr.source_support = true;

        /* Set link_standby x link_off defaults */
        if (DISPLAY_VER(display) < 12)
                /* For new platforms up to TGL let's respect VBT back again */
                intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;

        INIT_WORK(&intel_dp->psr.work, intel_psr_work);
        INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
        mutex_init(&intel_dp->psr.lock);
}

static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
                                           u8 *status, u8 *error_status)
{
        struct drm_dp_aux *aux = &intel_dp->aux;
        int ret;
        unsigned int offset;

        offset = intel_dp->psr.panel_replay_enabled ?
                 DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;

        ret = drm_dp_dpcd_readb(aux, offset, status);
        if (ret != 1)
                return ret;

        offset = intel_dp->psr.panel_replay_enabled ?
                 DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;

        ret = drm_dp_dpcd_readb(aux, offset, error_status);
        if (ret != 1)
                return ret;

        *status = *status & DP_PSR_SINK_STATE_MASK;

        return 0;
}

static void psr_alpm_check(struct intel_dp *intel_dp)
{
        struct intel_psr *psr = &intel_dp->psr;

        if (!psr->sel_update_enabled)
                return;

        if (intel_alpm_get_error(intel_dp)) {
                intel_psr_disable_locked(intel_dp);
                psr->sink_not_reliable = true;
        }
}

static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_psr *psr = &intel_dp->psr;
        u8 val;
        int r;

        r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
        if (r != 1) {
                drm_err(display->drm, "Error reading DP_PSR_ESI\n");
                return;
        }

        if (val & DP_PSR_CAPS_CHANGE) {
                intel_psr_disable_locked(intel_dp);
                psr->sink_not_reliable = true;
                drm_dbg_kms(display->drm,
                            "Sink PSR capability changed, disabling PSR\n");

                /* Clearing it */
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
        }
}

/*
 * On common bits:
 * DP_PSR_RFB_STORAGE_ERROR == DP_PANEL_REPLAY_RFB_STORAGE_ERROR
 * DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR == DP_PANEL_REPLAY_VSC_SDP_UNCORRECTABLE_ERROR
 * DP_PSR_LINK_CRC_ERROR == DP_PANEL_REPLAY_LINK_CRC_ERROR
 * this function is relying on PSR definitions
 */
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        struct intel_psr *psr = &intel_dp->psr;
        u8 status, error_status;
        const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
                          DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
                          DP_PSR_LINK_CRC_ERROR;

        if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
                return;

        mutex_lock(&psr->lock);

        psr->link_ok = false;

        if (!psr->enabled)
                goto exit;

        if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
                drm_err(display->drm,
                        "Error reading PSR status or error status\n");
                goto exit;
        }

        if ((!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR) ||
            (error_status & errors)) {
                intel_psr_disable_locked(intel_dp);
                psr->sink_not_reliable = true;
        }

        if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
            !error_status)
                drm_dbg_kms(display->drm,
                            "PSR sink internal error, disabling PSR\n");
        if (error_status & DP_PSR_RFB_STORAGE_ERROR)
                drm_dbg_kms(display->drm,
                            "PSR RFB storage error, disabling PSR\n");
        if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
                drm_dbg_kms(display->drm,
                            "PSR VSC SDP uncorrectable error, disabling PSR\n");
        if (error_status & DP_PSR_LINK_CRC_ERROR)
                drm_dbg_kms(display->drm,
                            "PSR Link CRC error, disabling PSR\n");

        if (error_status & ~errors)
                drm_err(display->drm,
                        "PSR_ERROR_STATUS unhandled errors %x\n",
                        error_status & ~errors);
        /* clear status register */
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);

        if (!psr->panel_replay_enabled) {
                psr_alpm_check(intel_dp);
                psr_capability_changed_check(intel_dp);
        }

exit:
        mutex_unlock(&psr->lock);
}

bool intel_psr_enabled(struct intel_dp *intel_dp)
{
        bool ret;

        if (!CAN_PSR(intel_dp))
                return false;

        mutex_lock(&intel_dp->psr.lock);
        ret = intel_dp->psr.enabled;
        mutex_unlock(&intel_dp->psr.lock);

        return ret;
}

/**
 * intel_psr_link_ok - return psr->link_ok
 * @intel_dp: struct intel_dp
 *
 * We are seeing unexpected link re-trainings with some panels. This is caused
 * by panel stating bad link status after PSR is enabled. Code checking link
 * status can call this to ensure it can ignore bad link status stated by the
 * panel I.e. if panel is stating bad link and intel_psr_link_ok is stating link
 * is ok caller should rely on latter.
 *
 * Return value of link_ok
 */
bool intel_psr_link_ok(struct intel_dp *intel_dp)
{
        bool ret;

        if ((!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp)) ||
            !intel_dp_is_edp(intel_dp))
                return false;

        mutex_lock(&intel_dp->psr.lock);
        ret = intel_dp->psr.link_ok;
        mutex_unlock(&intel_dp->psr.lock);

        return ret;
}

/**
 * intel_psr_lock - grab PSR lock
 * @crtc_state: the crtc state
 *
 * This is initially meant to be used by around CRTC update, when
 * vblank sensitive registers are updated and we need grab the lock
 * before it to avoid vblank evasion.
 */
void intel_psr_lock(const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);
        struct intel_encoder *encoder;

        if (!crtc_state->has_psr)
                return;

        for_each_intel_encoder_mask_with_psr(display->drm, encoder,
                                             crtc_state->uapi.encoder_mask) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                mutex_lock(&intel_dp->psr.lock);
                break;
        }
}

/**
 * intel_psr_unlock - release PSR lock
 * @crtc_state: the crtc state
 *
 * Release the PSR lock that was held during pipe update.
 */
void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);
        struct intel_encoder *encoder;

        if (!crtc_state->has_psr)
                return;

        for_each_intel_encoder_mask_with_psr(display->drm, encoder,
                                             crtc_state->uapi.encoder_mask) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                mutex_unlock(&intel_dp->psr.lock);
                break;
        }
}

/* Wa_16025596647 */
static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp)
{
        struct intel_display *display = to_intel_display(intel_dp);
        bool dc5_dc6_blocked;

        if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
                return;

        dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);

        if (intel_dp->psr.sel_update_enabled)
                psr2_program_idle_frames(intel_dp, dc5_dc6_blocked ? 0 :
                                         psr_compute_idle_frames(intel_dp));
        else
                intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
                                                                       intel_dp->psr.pipe,
                                                                       dc5_dc6_blocked);
}

static void psr_dc5_dc6_wa_work(struct work_struct *work)
{
        struct intel_display *display = container_of(work, typeof(*display),
                                                     psr_dc5_dc6_wa_work);
        struct intel_encoder *encoder;

        for_each_intel_encoder_with_psr(display->drm, encoder) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                mutex_lock(&intel_dp->psr.lock);

                if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
                    !intel_dp->psr.pkg_c_latency_used)
                        intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);

                mutex_unlock(&intel_dp->psr.lock);
        }
}

/**
 * intel_psr_notify_dc5_dc6 - Notify PSR about enable/disable dc5/dc6
 * @display: intel atomic state
 *
 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to schedule
 * psr_dc5_dc6_wa_work used for applying/removing the workaround.
 */
void intel_psr_notify_dc5_dc6(struct intel_display *display)
{
        if (DISPLAY_VER(display) != 20 &&
            !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
                return;

        schedule_work(&display->psr_dc5_dc6_wa_work);
}

/**
 * intel_psr_dc5_dc6_wa_init - Init work for underrun on idle PSR HW bug wa
 * @display: intel atomic state
 *
 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to init
 * psr_dc5_dc6_wa_work used for applying the workaround.
 */
void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
{
        if (DISPLAY_VER(display) != 20 &&
            !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
                return;

        INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
}

/**
 * intel_psr_notify_pipe_change - Notify PSR about enable/disable of a pipe
 * @state: intel atomic state
 * @crtc: intel crtc
 * @enable: enable/disable
 *
 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
 * remove the workaround when pipe is getting enabled/disabled
 */
void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
                                  struct intel_crtc *crtc, bool enable)
{
        struct intel_display *display = to_intel_display(state);
        struct intel_encoder *encoder;

        if (DISPLAY_VER(display) != 20 &&
            !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
                return;

        for_each_intel_encoder_with_psr(display->drm, encoder) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
                u8 active_non_psr_pipes;

                mutex_lock(&intel_dp->psr.lock);

                if (!intel_dp->psr.enabled || intel_dp->psr.panel_replay_enabled)
                        goto unlock;

                active_non_psr_pipes = intel_dp->psr.active_non_psr_pipes;

                if (enable)
                        active_non_psr_pipes |= BIT(crtc->pipe);
                else
                        active_non_psr_pipes &= ~BIT(crtc->pipe);

                if (active_non_psr_pipes == intel_dp->psr.active_non_psr_pipes)
                        goto unlock;

                if ((enable && intel_dp->psr.active_non_psr_pipes) ||
                    (!enable && !intel_dp->psr.active_non_psr_pipes) ||
                    !intel_dp->psr.pkg_c_latency_used) {
                        intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
                        goto unlock;
                }

                intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;

                intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
unlock:
                mutex_unlock(&intel_dp->psr.lock);
        }
}

/**
 * intel_psr_notify_vblank_enable_disable - Notify PSR about enable/disable of vblank
 * @display: intel display struct
 * @enable: enable/disable
 *
 * This is targeted for underrun on idle PSR HW bug (Wa_16025596647) to apply
 * remove the workaround when vblank is getting enabled/disabled
 */
void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
                                            bool enable)
{
        struct intel_encoder *encoder;

        for_each_intel_encoder_with_psr(display->drm, encoder) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                mutex_lock(&intel_dp->psr.lock);
                if (intel_dp->psr.panel_replay_enabled) {
                        mutex_unlock(&intel_dp->psr.lock);
                        break;
                }

                if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
                        intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);

                mutex_unlock(&intel_dp->psr.lock);
                return;
        }

        /*
         * NOTE: intel_display_power_set_target_dc_state is used
         * only by PSR * code for DC3CO handling. DC3CO target
         * state is currently disabled in * PSR code. If DC3CO
         * is taken into use we need take that into account here
         * as well.
         */
        intel_display_power_set_target_dc_state(display, enable ? DC_STATE_DISABLE :
                                                DC_STATE_EN_UPTO_DC6);
}

static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        const char *status = "unknown";
        u32 val, status_val;

        if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) &&
            (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) {
                static const char * const live_status[] = {
                        "IDLE",
                        "CAPTURE",
                        "CAPTURE_FS",
                        "SLEEP",
                        "BUFON_FW",
                        "ML_UP",
                        "SU_STANDBY",
                        "FAST_SLEEP",
                        "DEEP_SLEEP",
                        "BUF_ON",
                        "TG_ON"
                };
                val = intel_de_read(display,
                                    EDP_PSR2_STATUS(display, cpu_transcoder));
                status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
                if (status_val < ARRAY_SIZE(live_status))
                        status = live_status[status_val];
        } else {
                static const char * const live_status[] = {
                        "IDLE",
                        "SRDONACK",
                        "SRDENT",
                        "BUFOFF",
                        "BUFON",
                        "AUXACK",
                        "SRDOFFACK",
                        "SRDENT_ON",
                };
                val = intel_de_read(display,
                                    psr_status_reg(display, cpu_transcoder));
                status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
                if (status_val < ARRAY_SIZE(live_status))
                        status = live_status[status_val];
        }

        seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
}

static void intel_psr_sink_capability(struct intel_connector *connector,
                                      struct seq_file *m)
{
        seq_printf(m, "Sink support: PSR = %s",
                   str_yes_no(connector->dp.psr_caps.support));

        if (connector->dp.psr_caps.support)
                seq_printf(m, " [0x%02x]", connector->dp.psr_caps.dpcd[0]);
        if (connector->dp.psr_caps.dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED)
                seq_printf(m, " (Early Transport)");
        seq_printf(m, ", Panel Replay = %s", str_yes_no(connector->dp.panel_replay_caps.support));
        seq_printf(m, ", Panel Replay Selective Update = %s",
                   str_yes_no(connector->dp.panel_replay_caps.su_support));
        seq_printf(m, ", Panel Replay DSC support = %s",
                   panel_replay_dsc_support_str(connector->dp.panel_replay_caps.dsc_support));
        if (connector->dp.panel_replay_caps.dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
            DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
                seq_printf(m, " (Early Transport)");
        seq_printf(m, "\n");
}

static void intel_psr_print_mode(struct intel_dp *intel_dp,
                                 struct seq_file *m)
{
        struct intel_psr *psr = &intel_dp->psr;
        const char *status, *mode, *region_et;

        if (psr->enabled)
                status = " enabled";
        else
                status = "disabled";

        if (psr->panel_replay_enabled && psr->sel_update_enabled)
                mode = "Panel Replay Selective Update";
        else if (psr->panel_replay_enabled)
                mode = "Panel Replay";
        else if (psr->sel_update_enabled)
                mode = "PSR2";
        else if (psr->enabled)
                mode = "PSR1";
        else
                mode = "";

        if (psr->su_region_et_enabled)
                region_et = " (Early Transport)";
        else
                region_et = "";

        seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
        if (psr->no_psr_reason)
                seq_printf(m, "  %s\n", psr->no_psr_reason);
}

static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp,
                            struct intel_connector *connector)
{
        struct intel_display *display = to_intel_display(intel_dp);
        enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
        struct intel_psr *psr = &intel_dp->psr;
        struct ref_tracker *wakeref;
        bool enabled;
        u32 val, psr2_ctl;

        intel_psr_sink_capability(connector, m);

        if (!(connector->dp.psr_caps.support || connector->dp.panel_replay_caps.support))
                return 0;

        wakeref = intel_display_rpm_get(display);
        mutex_lock(&psr->lock);

        intel_psr_print_mode(intel_dp, m);

        if (!psr->enabled) {
                seq_printf(m, "PSR sink not reliable: %s\n",
                           str_yes_no(psr->sink_not_reliable));

                goto unlock;
        }

        if (psr->panel_replay_enabled) {
                val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));

                if (intel_dp_is_edp(intel_dp))
                        psr2_ctl = intel_de_read(display,
                                                 EDP_PSR2_CTL(display,
                                                              cpu_transcoder));

                enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
        } else if (psr->sel_update_enabled) {
                val = intel_de_read(display,
                                    EDP_PSR2_CTL(display, cpu_transcoder));
                enabled = val & EDP_PSR2_ENABLE;
        } else {
                val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
                enabled = val & EDP_PSR_ENABLE;
        }
        seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
                   str_enabled_disabled(enabled), val);
        if (psr->panel_replay_enabled && intel_dp_is_edp(intel_dp))
                seq_printf(m, "PSR2_CTL: 0x%08x\n",
                           psr2_ctl);
        psr_source_status(intel_dp, m);
        seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
                   psr->busy_frontbuffer_bits);

        /*
         * SKL+ Perf counter is reset to 0 everytime DC state is entered
         */
        val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
        seq_printf(m, "Performance counter: %u\n",
                   REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));

        if (psr->debug & I915_PSR_DEBUG_IRQ) {
                seq_printf(m, "Last attempted entry at: %lld\n",
                           psr->last_entry_attempt);
                seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
        }

        if (psr->sel_update_enabled) {
                u32 su_frames_val[3];
                int frame;

                /*
                 * PSR2_SU_STATUS register has been tied-off since DG2/ADL-P
                 * (it returns zeros only) and it has been removed on Xe2_LPD.
                 */
                if (DISPLAY_VER(display) < 13) {
                        /*
                         * Reading all 3 registers before hand to minimize crossing a
                         * frame boundary between register reads
                         */
                        for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
                                val = intel_de_read(display,
                                                    PSR2_SU_STATUS(display, cpu_transcoder, frame));
                                su_frames_val[frame / 3] = val;
                        }

                        seq_puts(m, "Frame:\tPSR2 SU blocks:\n");

                        for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
                                u32 su_blocks;

                                su_blocks = su_frames_val[frame / 3] &
                                        PSR2_SU_STATUS_MASK(frame);
                                su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
                                seq_printf(m, "%d\t%d\n", frame, su_blocks);
                        }
                }

                seq_printf(m, "PSR2 selective fetch: %s\n",
                           str_enabled_disabled(psr->psr2_sel_fetch_enabled));
        }

unlock:
        mutex_unlock(&psr->lock);
        intel_display_rpm_put(display, wakeref);

        return 0;
}

static int i915_edp_psr_status_show(struct seq_file *m, void *data)
{
        struct intel_display *display = m->private;
        struct intel_dp *intel_dp = NULL;
        struct intel_encoder *encoder;

        if (!HAS_PSR(display))
                return -ENODEV;

        /* Find the first EDP which supports PSR */
        for_each_intel_encoder_with_psr(display->drm, encoder) {
                intel_dp = enc_to_intel_dp(encoder);
                break;
        }

        if (!intel_dp)
                return -ENODEV;

        return intel_psr_status(m, intel_dp, intel_dp->attached_connector);
}
DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);

static int
i915_edp_psr_debug_set(void *data, u64 val)
{
        struct intel_display *display = data;
        struct intel_encoder *encoder;
        int ret = -ENODEV;

        if (!HAS_PSR(display))
                return ret;

        for_each_intel_encoder_with_psr(display->drm, encoder) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);

                // TODO: split to each transcoder's PSR debug state
                with_intel_display_rpm(display)
                        ret = intel_psr_debug_set(intel_dp, val);
        }

        return ret;
}

static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
        struct intel_display *display = data;
        struct intel_encoder *encoder;

        if (!HAS_PSR(display))
                return -ENODEV;

        for_each_intel_encoder_with_psr(display->drm, encoder) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);

                // TODO: split to each transcoder's PSR debug state
                *val = READ_ONCE(intel_dp->psr.debug);
                return 0;
        }

        return -ENODEV;
}

DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
                        i915_edp_psr_debug_get, i915_edp_psr_debug_set,
                        "%llu\n");

void intel_psr_debugfs_register(struct intel_display *display)
{
        struct dentry *debugfs_root = display->drm->debugfs_root;

        debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
                            display, &i915_edp_psr_debug_fops);

        debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
                            display, &i915_edp_psr_status_fops);
}

static const char *psr_mode_str(struct intel_dp *intel_dp)
{
        if (intel_dp->psr.panel_replay_enabled)
                return "PANEL-REPLAY";
        else if (intel_dp->psr.enabled)
                return "PSR";

        return "unknown";
}

static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
        struct intel_connector *connector = m->private;
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        static const char * const sink_status[] = {
                "inactive",
                "transition to active, capture and display",
                "active, display from RFB",
                "active, capture and display on sink device timings",
                "transition to inactive, capture and display, timing re-sync",
                "reserved",
                "reserved",
                "sink internal error",
        };
        const char *str;
        int ret;
        u8 status, error_status;

        if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
                seq_puts(m, "PSR/Panel-Replay Unsupported\n");
                return -ENODEV;
        }

        if (connector->base.status != connector_status_connected)
                return -ENODEV;

        ret = psr_get_status_and_error_status(intel_dp, &status, &error_status);
        if (ret)
                return ret;

        status &= DP_PSR_SINK_STATE_MASK;
        if (status < ARRAY_SIZE(sink_status))
                str = sink_status[status];
        else
                str = "unknown";

        seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);

        seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);

        if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
                            DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
                            DP_PSR_LINK_CRC_ERROR))
                seq_puts(m, ":\n");
        else
                seq_puts(m, "\n");
        if (error_status & DP_PSR_RFB_STORAGE_ERROR)
                seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
        if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
                seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
        if (error_status & DP_PSR_LINK_CRC_ERROR)
                seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));

        return ret;
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);

static int i915_psr_status_show(struct seq_file *m, void *data)
{
        struct intel_connector *connector = m->private;
        struct intel_dp *intel_dp = intel_attached_dp(connector);

        return intel_psr_status(m, intel_dp, connector);
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);

void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
        struct intel_display *display = to_intel_display(connector);
        struct dentry *root = connector->base.debugfs_entry;

        if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
            connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
                return;

        debugfs_create_file("i915_psr_sink_status", 0444, root,
                            connector, &i915_psr_sink_status_fops);

        if (HAS_PSR(display) || HAS_DP20(display))
                debugfs_create_file("i915_psr_status", 0444, root,
                                    connector, &i915_psr_status_fops);
}

bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state)
{
        /*
         * eDP Panel Replay uses always ALPM
         * PSR2 uses ALPM but PSR1 doesn't
         */
        return intel_dp_is_edp(intel_dp) && (crtc_state->has_sel_update ||
                                             crtc_state->has_panel_replay);
}

bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
                                   const struct intel_crtc_state *crtc_state)
{
        return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
}

void intel_psr_compute_config_late(struct intel_dp *intel_dp,
                                   struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(intel_dp);
        int vblank = intel_crtc_vblank_length(crtc_state);
        int wake_lines;

        if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state))
                wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
        else if (intel_psr_needs_alpm(intel_dp, crtc_state))
                wake_lines = DISPLAY_VER(display) < 20 ?
                             psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
                                                    crtc_state->alpm_state.fast_wake_lines) :
                             crtc_state->alpm_state.io_wake_lines;
        else
                wake_lines = 0;

        /*
         * Disable the PSR features if wake lines exceed the available vblank.
         * Though SCL is computed based on these PSR features, it is not reset
         * even if the PSR features are disabled to avoid changing vblank start
         * at this stage.
         */
        if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) {
                drm_dbg_kms(display->drm,
                            "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n",
                            wake_lines);

                if (crtc_state->has_panel_replay) {
                        crtc_state->has_panel_replay = false;
                        /*
                         * #TODO : Add fall back to PSR/PSR2
                         * Since panel replay cannot be supported, we can fall back to PSR/PSR2.
                         * This will require calling compute_config for psr and psr2 with check for
                         * actual guardband instead of vblank_length.
                         */
                        crtc_state->has_psr = false;
                }

                crtc_state->has_sel_update = false;
                crtc_state->enable_psr2_su_region_et = false;
                crtc_state->enable_psr2_sel_fetch = false;
        }

        /* Wa_18037818876 */
        if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
                crtc_state->has_psr = false;
                drm_dbg_kms(display->drm,
                            "PSR disabled to workaround PSR FSM hang issue\n");
        }

        intel_psr_set_non_psr_pipes(intel_dp, crtc_state);
}

int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
{
        struct intel_display *display = to_intel_display(crtc_state);
        int psr_min_guardband;
        int wake_lines;

        if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
                return 0;

        if (crtc_state->has_panel_replay)
                wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
        else if (crtc_state->has_sel_update)
                wake_lines = DISPLAY_VER(display) < 20 ?
                             psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
                                                    crtc_state->alpm_state.fast_wake_lines) :
                             crtc_state->alpm_state.io_wake_lines;
        else
                return 0;

        psr_min_guardband = wake_lines + crtc_state->set_context_latency;

        if (crtc_state->req_psr2_sdp_prior_scanline)
                psr_min_guardband++;

        return psr_min_guardband;
}