#include <drm/display/drm_dp_tunnel.h>
#include <drm/drm_print.h>
#include "intel_atomic.h"
#include "intel_display_core.h"
#include "intel_display_limits.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dp_tunnel.h"
#include "intel_link_bw.h"
struct intel_dp_tunnel_inherited_state {
struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
};
void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
{
drm_dp_tunnel_destroy(intel_dp->tunnel);
intel_dp->tunnel = NULL;
}
void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
{
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
intel_dp_tunnel_disconnect(intel_dp);
}
static int kbytes_to_mbits(int kbytes)
{
return DIV_ROUND_UP(kbytes * 8, 1000);
}
static int get_current_link_bw(struct intel_dp *intel_dp,
bool *below_dprx_bw)
{
int rate = intel_dp_max_common_rate(intel_dp);
int lane_count = intel_dp_max_common_lane_count(intel_dp);
int bw;
bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
*below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
return bw;
}
static int update_tunnel_state(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool old_bw_below_dprx;
bool new_bw_below_dprx;
int old_bw;
int new_bw;
int ret;
old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
if (ret < 0) {
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(ret));
return ret;
}
if (ret == 0 ||
!drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
return 0;
intel_dp_update_sink_caps(intel_dp);
new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
if (old_bw_below_dprx == new_bw_below_dprx &&
!new_bw_below_dprx)
return 0;
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
return 1;
}
static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc;
int tunnel_bw = 0;
int err;
for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int stream_bw = intel_dp_config_required_rate(crtc_state);
tunnel_bw += stream_bw;
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
crtc->pipe,
kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
}
err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
if (err) {
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(err));
return err;
}
return update_tunnel_state(intel_dp);
}
static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
struct drm_modeset_acquire_ctx *ctx)
{
u8 pipe_mask;
int err;
err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
if (err)
return err;
return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
}
static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_dp_tunnel *tunnel;
int ret;
tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
&intel_dp->aux);
if (IS_ERR(tunnel))
return PTR_ERR(tunnel);
intel_dp->tunnel = tunnel;
ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
if (ret) {
if (ret == -EOPNOTSUPP)
return 0;
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(ret));
return 0;
}
ret = allocate_initial_tunnel_bw(intel_dp, ctx);
if (ret < 0)
intel_dp_tunnel_destroy(intel_dp);
return ret;
}
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
int ret;
if (intel_dp_is_edp(intel_dp))
return 0;
if (intel_dp->tunnel) {
ret = update_tunnel_state(intel_dp);
if (ret >= 0)
return ret;
intel_dp_tunnel_destroy(intel_dp);
}
return detect_new_tunnel(intel_dp, ctx);
}
bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
{
return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
}
void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return;
drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
intel_dp->tunnel_suspended = true;
}
void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool dpcd_updated)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 pipe_mask;
int err = 0;
if (!intel_dp->tunnel_suspended)
return;
intel_dp->tunnel_suspended = false;
drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
if (!dpcd_updated) {
err = intel_dp_read_dprx_caps(intel_dp, dpcd);
if (err) {
drm_dp_tunnel_set_io_error(intel_dp->tunnel);
goto out_err;
}
}
err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
if (err)
goto out_err;
pipe_mask = 0;
if (crtc_state) {
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
pipe_mask |= BIT(crtc->pipe);
}
err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
if (err < 0)
goto out_err;
return;
out_err:
drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
ERR_PTR(err));
}
static struct drm_dp_tunnel *
get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
if (!state->inherited_dp_tunnels)
return NULL;
return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
}
static int
add_inherited_tunnel(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct drm_dp_tunnel *old_tunnel;
old_tunnel = get_inherited_tunnel(state, crtc);
if (old_tunnel) {
drm_WARN_ON(display->drm, old_tunnel != tunnel);
return 0;
}
if (!state->inherited_dp_tunnels) {
state->inherited_dp_tunnels = kzalloc_obj(*state->inherited_dp_tunnels);
if (!state->inherited_dp_tunnels)
return -ENOMEM;
}
drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
return 0;
}
static int check_inherited_tunnel_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_digital_connector_state *old_conn_state)
{
struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector =
to_intel_connector(old_conn_state->base.connector);
struct intel_crtc *old_crtc;
const struct intel_crtc_state *old_crtc_state;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return 0;
if (!old_conn_state->base.crtc)
return 0;
old_crtc = to_intel_crtc(old_conn_state->base.crtc);
old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
if (!old_crtc_state->hw.active ||
old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
return 0;
drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
old_crtc->base.base.id, old_crtc->base.name,
intel_dp->tunnel);
return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
}
void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
enum pipe pipe;
if (!state->inherited_dp_tunnels)
return;
for_each_pipe(display, pipe)
if (state->inherited_dp_tunnels->ref[pipe].tunnel)
drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
kfree(state->inherited_dp_tunnels);
state->inherited_dp_tunnels = NULL;
}
static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel)
{
struct intel_display *display = to_intel_display(state);
u32 pipe_mask;
int err;
err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
tunnel, &pipe_mask);
if (err)
return err;
drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
}
int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
const struct drm_dp_tunnel_state *tunnel_state;
struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
if (!tunnel)
return 0;
tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
if (IS_ERR(tunnel_state))
return PTR_ERR(tunnel_state);
return 0;
}
static int check_group_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
struct intel_connector *connector,
struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
if (!crtc_state->dp_tunnel_ref.tunnel)
return 0;
drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
crtc_state->dp_tunnel_ref.tunnel);
return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
}
int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
struct intel_connector *connector)
{
const struct intel_digital_connector_state *old_conn_state =
intel_atomic_get_old_connector_state(state, connector);
const struct intel_digital_connector_state *new_conn_state =
intel_atomic_get_new_connector_state(state, connector);
int err;
if (old_conn_state->base.crtc) {
err = check_group_state(state, intel_dp, connector,
to_intel_crtc(old_conn_state->base.crtc));
if (err)
return err;
}
if (new_conn_state->base.crtc &&
new_conn_state->base.crtc != old_conn_state->base.crtc) {
err = check_group_state(state, intel_dp, connector,
to_intel_crtc(new_conn_state->base.crtc));
if (err)
return err;
}
return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
}
int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int required_rate = intel_dp_config_required_rate(crtc_state);
int ret;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return 0;
drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
crtc->base.base.id, crtc->base.name,
crtc->pipe,
kbytes_to_mbits(required_rate));
ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
crtc->pipe, required_rate);
if (ret < 0)
return ret;
drm_dp_tunnel_ref_get(intel_dp->tunnel,
&crtc_state->dp_tunnel_ref);
return 0;
}
int intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int err;
if (!crtc_state->dp_tunnel_ref.tunnel)
return 0;
err = drm_dp_tunnel_atomic_set_stream_bw(&state->base,
crtc_state->dp_tunnel_ref.tunnel,
crtc->pipe, 0);
if (err)
return err;
drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
return 0;
}
int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits)
{
u32 failed_stream_mask;
int err;
err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
&failed_stream_mask);
if (err != -ENOSPC)
return err;
err = intel_link_bw_reduce_bpp(state, limits,
failed_stream_mask, "DP tunnel link BW");
return err ? : -EAGAIN;
}
static void atomic_decrease_bw(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_dp_tunnel_state *new_tunnel_state;
struct drm_dp_tunnel *tunnel;
int old_bw;
int new_bw;
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
tunnel = get_inherited_tunnel(state, crtc);
if (!tunnel)
tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
if (!tunnel)
continue;
old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
if (new_bw >= old_bw)
continue;
drm_dp_tunnel_alloc_bw(tunnel, new_bw);
}
}
static void queue_retry_work(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder;
encoder = intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_digital_port_connected(encoder))
return;
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
drm_dp_tunnel_name(tunnel),
encoder->base.base.id,
encoder->base.name);
intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
}
static void atomic_increase_bw(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
const struct intel_crtc_state *crtc_state;
int i;
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
struct drm_dp_tunnel_state *tunnel_state;
struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
int bw;
if (!intel_crtc_needs_modeset(crtc_state))
continue;
if (!tunnel)
continue;
tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
queue_retry_work(state, tunnel, crtc_state);
}
}
void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
{
atomic_decrease_bw(state);
atomic_increase_bw(state);
}
int intel_dp_tunnel_mgr_init(struct intel_display *display)
{
struct drm_dp_tunnel_mgr *tunnel_mgr;
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector;
int dp_connectors = 0;
drm_connector_list_iter_begin(display->drm, &connector_list_iter);
for_each_intel_connector_iter(connector, &connector_list_iter) {
if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
dp_connectors++;
}
drm_connector_list_iter_end(&connector_list_iter);
tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
if (IS_ERR(tunnel_mgr))
return PTR_ERR(tunnel_mgr);
display->dp_tunnel_mgr = tunnel_mgr;
return 0;
}
void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
{
drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
display->dp_tunnel_mgr = NULL;
}