aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c223
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c1889
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h42
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h30
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c89
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h4
67 files changed, 3118 insertions, 419 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4595c59f2bf0..3fcf911c8b61 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -51,6 +51,7 @@
#include <drm/drm_hdcp.h>
#endif
#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
@@ -789,8 +790,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
plink = adev->dm.dc->links[notify.link_index];
if (plink) {
plink->hpd_status =
- notify.hpd_status ==
- DP_HPD_PLUG ? true : false;
+ notify.hpd_status == DP_HPD_PLUG;
}
}
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
@@ -1455,6 +1455,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.power_down_display_on_boot = true;
+ if (check_seamless_boot_capability(adev)) {
+ init_data.flags.power_down_display_on_boot = false;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
INIT_LIST_HEAD(&adev->dm.da_list);
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -1479,8 +1485,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
adev->dm.dc->debug.disable_dsc = true;
+ adev->dm.dc->debug.disable_dsc_edp = true;
+ }
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@@ -2303,14 +2311,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
goto fail;
}
-
- res = dc_validate_global_state(dc, context, false);
-
- if (res != DC_OK) {
- DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
- goto fail;
- }
-
res = dc_commit_state(dc, context);
fail:
@@ -2561,6 +2561,22 @@ static int dm_resume(void *handle)
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround it needs to be
+ * cleared here.
+ */
+ link_enc_cfg_init(dm->dc, dc_state);
+
+ amdgpu_dm_outbox_init(adev);
+
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2572,20 +2588,11 @@ static int dm_resume(void *handle)
for (i = 0; i < dc_state->stream_count; i++) {
dc_state->streams[i]->mode_changed = true;
- for (j = 0; j < dc_state->stream_status->plane_count; j++) {
- dc_state->stream_status->plane_states[j]->update_flags.raw
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
= 0xffffffff;
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- /*
- * Resource allocation happens for link encoders for newer ASIC in
- * dc_validate_global_state, so we need to revalidate it.
- *
- * This shouldn't fail (it passed once before), so warn if it does.
- */
- WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
-#endif
WARN_ON(!dc_commit_state(dm->dc, dc_state));
@@ -3909,6 +3916,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
caps = dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
@@ -4251,6 +4261,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
+ /*
+ * Disable vblank IRQs aggressively for power-saving.
+ *
+ * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
+ * is also supported.
+ */
+ adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -6036,7 +6054,8 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
{
stream->timing.flags.DSC = 0;
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
@@ -6044,6 +6063,64 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
}
}
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ uint32_t link_bw_in_kbps;
+ uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ 0,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
+
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ dc->debug.dsc_min_slice_height_override,
+ max_dsc_target_bpp_limit_override,
+ link_bw_in_kbps,
+ &stream->timing,
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ }
+}
+
static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
@@ -6051,6 +6128,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
uint32_t max_dsc_target_bpp_limit_override = 0;
+ struct dc *dc = sink->ctx->dc;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
@@ -6063,7 +6141,12 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
- if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
+
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
+
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
@@ -10759,8 +10842,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
trace_amdgpu_dm_atomic_check_begin(state);
ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
goto fail;
+ }
/* Check connector changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10776,6 +10861,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
ret = PTR_ERR(new_crtc_state);
goto fail;
}
@@ -10790,8 +10876,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
goto fail;
+ }
}
}
}
@@ -10806,19 +10894,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
continue;
ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
goto fail;
+ }
if (!new_crtc_state->enable)
continue;
ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
goto fail;
+ }
ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
goto fail;
+ }
if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
@@ -10855,6 +10949,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (IS_ERR(new_plane_state)) {
ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
goto fail;
}
}
@@ -10867,8 +10962,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Disable all crtcs which require disable */
@@ -10878,8 +10975,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
false,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Enable all crtcs which require enable */
@@ -10889,8 +10988,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_crtc_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
goto fail;
+ }
}
/* Add new/modified planes */
@@ -10900,20 +11001,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
new_plane_state,
true,
&lock_and_validation_needed);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
+ }
}
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
goto fail;
+ }
/* Check cursor planes scaling */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
goto fail;
+ }
}
if (state->legacy_cursor_update) {
@@ -11000,20 +11107,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
if (lock_and_validation_needed) {
ret = dm_atomic_get_state(state, &dm_state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
goto fail;
+ }
ret = do_aquire_global_lock(dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
goto fail;
+ }
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
+ if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
goto fail;
+ }
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
goto fail;
+ }
#endif
/*
@@ -11023,12 +11138,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* to get stuck in an infinite loop and hang eventually.
*/
ret = drm_dp_mst_atomic_check(state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
goto fail;
- status = dc_validate_global_state(dc, dm_state->context, false);
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
if (status != DC_OK) {
- drm_dbg_atomic(dev,
- "DC global validation failure: %s (%d)",
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
dc_status_to_str(status), status);
ret = -EINVAL;
goto fail;
@@ -11529,3 +11645,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context
ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
(uint32_t *)operation_result);
}
+
+/*
+ * Check whether seamless boot is supported.
+ *
+ * So far we only support seamless boot on CHIP_VANGOGH.
+ * If everything goes well, we may consider expanding
+ * seamless boot to other ASICs.
+ */
+bool check_seamless_boot_capability(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VANGOGH:
+ if (!adev->mman.keep_stolen_vga_memory)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 37e61a88d49e..bb65f41d1a59 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -731,4 +731,7 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
struct dc_context *ctx, unsigned int link_index,
void *payload, void *operation_result);
+
+bool check_seamless_boot_capability(struct amdgpu_device *adev);
+
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 8cbeeb7c986d..72a2e84645df 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -584,7 +584,7 @@ bool dm_helpers_dp_write_dsc_enable(
ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);
}
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable");
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index a4bef4364afd..1e385d55e7fb 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2995,7 +2995,7 @@ static bool bios_parser2_construct(
&bp->object_info_tbl.revision);
if (bp->object_info_tbl.revision.major == 1
- && bp->object_info_tbl.revision.minor >= 4) {
+ && bp->object_info_tbl.revision.minor == 4) {
struct display_object_info_table_v1_4 *tbl_v1_4;
tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4,
@@ -3004,8 +3004,10 @@ static bool bios_parser2_construct(
return false;
bp->object_info_tbl.v1_4 = tbl_v1_4;
- } else
+ } else {
+ ASSERT(0);
return false;
+ }
dal_firmware_parser_init_cmd_tbl(bp);
dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 6b248cd2a461..c8b0a2f05b4d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -763,7 +763,7 @@ unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw
return 4;
}
-bool dcn_validate_bandwidth(
+bool dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 26f96ee32472..9200c8ce02ba 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -308,8 +308,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
case FAMILY_NV:
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
- }
- if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
}
if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index f4c9a458ace8..a13ff1783b9b 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -66,7 +66,7 @@
#define TO_CLK_MGR_DCN31(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn31, base)
-int dcn31_get_active_display_cnt_wa(
+static int dcn31_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
{
@@ -118,7 +118,7 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
}
}
-static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
+void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
{
@@ -284,7 +284,7 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn31_smu_enable_pme_wa(clk_mgr);
}
-static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
+void dcn31_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -294,7 +294,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
}
-static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
+bool dcn31_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
if (a->dispclk_khz != b->dispclk_khz)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
index f8f100535526..961b10a49486 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h
@@ -39,6 +39,13 @@ struct clk_mgr_dcn31 {
struct dcn31_smu_watermark_set smu_wm_set;
};
+bool dcn31_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b);
+void dcn31_init_clocks(struct clk_mgr *clk_mgr);
+void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower);
+
void dcn31_clk_mgr_construct(struct dc_context *ctx,
struct clk_mgr_dcn31 *clk_mgr,
struct pp_smu_funcs *pp_smu,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0ded4decee05..17b7408d84b7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -221,9 +221,9 @@ static bool create_links(
link = link_create(&link_init_params);
if (link) {
- dc->links[dc->link_count] = link;
- link->dc = dc;
- ++dc->link_count;
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
}
}
@@ -808,6 +808,10 @@ void dc_stream_set_static_screen_params(struct dc *dc,
static void dc_destruct(struct dc *dc)
{
+ // reset link encoder assignment table on destruct
+ if (dc->res_pool->funcs->link_encs_assign)
+ link_enc_cfg_init(dc, dc->current_state);
+
if (dc->current_state) {
dc_release_state(dc->current_state);
dc->current_state = NULL;
@@ -1016,8 +1020,6 @@ static bool dc_construct(struct dc *dc,
goto fail;
}
- dc_resource_state_construct(dc, dc->current_state);
-
if (!create_links(dc, init_params->num_virtual_links))
goto fail;
@@ -1027,8 +1029,7 @@ static bool dc_construct(struct dc *dc,
if (!create_link_encoders(dc))
goto fail;
- /* Initialise DIG link encoder resource tracking variables. */
- link_enc_cfg_init(dc, dc->current_state);
+ dc_resource_state_construct(dc, dc->current_state);
return true;
@@ -1830,6 +1831,19 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
dc_stream_log(dc, stream);
}
+ /*
+ * Previous validation was perfomred with fast_validation = true and
+ * the full DML state required for hardware programming was skipped.
+ *
+ * Re-validate here to calculate these parameters / watermarks.
+ */
+ result = dc_validate_global_state(dc, context, false);
+ if (result != DC_OK) {
+ DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
+ dc_status_to_str(result), result);
+ return result;
+ }
+
result = dc_commit_state_no_check(dc, context);
return (result == DC_OK);
@@ -2870,7 +2884,8 @@ static void commit_planes_for_stream(struct dc *dc,
#endif
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
- if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (top_pipe_to_program &&
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 60544788e911..3d08f8eba402 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -270,10 +270,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
/* Link may not have physical HPD pin. */
if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->hpd_status)
- *type = dc_connection_single;
- else
+ if (link->is_hpd_pending || !link->hpd_status)
*type = dc_connection_none;
+ else
+ *type = dc_connection_single;
return true;
}
@@ -1999,6 +1999,57 @@ static enum dc_status enable_link_dp_mst(
return enable_link_dp(state, pipe_ctx);
}
+void dc_link_blank_all_dp_displays(struct dc *dc)
+{
+ unsigned int i;
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) ||
+ (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL))
+ continue;
+
+ /* DP 2.0 spec requires that we read LTTPR caps first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ dc_link_blank_dp_stream(dc->links[i], true);
+ }
+
+}
+
+void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init)
+{
+ unsigned int j;
+ struct dc *dc = link->ctx->dc;
+ enum signal_type signal = link->connector_signal;
+
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ link->link_enc->funcs->get_dig_frontend &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (fe != ENGINE_ID_UNKNOWN)
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(link,
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
+ if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ dp_receiver_power_ctrl(link, false);
+ }
+}
+
static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
enum engine_id eng_id,
struct ext_hdmi_settings *settings)
@@ -2946,7 +2997,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active
link->psr_settings.psr_power_opt = *power_opts;
if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
- psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt);
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
}
/* Enable or Disable PSR */
@@ -3913,9 +3964,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct link_encoder *link_enc = NULL;
- struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state;
- struct link_enc_assignment link_enc_assign;
- int i;
#endif
if (cp_psp && cp_psp->funcs.update_stream_config) {
@@ -3943,18 +3991,15 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
pipe_ctx->stream->ctx->dc,
pipe_ctx->stream);
}
+ ASSERT(link_enc);
+
// Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- //look up the link_enc_assignment for the current pipe_ctx
- for (i = 0; i < state->stream_count; i++) {
- if (pipe_ctx->stream == state->streams[i]) {
- link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
- }
- }
// Add flag to guard new A0 DIG mapping
- if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) {
- config.dig_be = link_enc_assign.eng_id;
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
+ pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) {
+ config.dig_be = link_enc->preferred_engine;
config.dio_output_type = pipe_ctx->stream->link->ep_type;
config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
} else {
@@ -3966,10 +4011,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- link_enc = link_enc_assign.stream->link_enc;
-
// enum ID 1-4 maps to DPIA PHY ID 0-3
- config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1;
+ config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
} else { // for non DPIA mode over B0, ABCDE maps to 01564
switch (link_enc->transmitter) {
@@ -4242,7 +4285,8 @@ void core_link_enable_stream(
/* eDP lit up by bios already, no need to enable again. */
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
apply_edp_fast_boot_optimization &&
- !pipe_ctx->stream->timing.flags.DSC) {
+ !pipe_ctx->stream->timing.flags.DSC &&
+ !pipe_ctx->next_odm_pipe) {
pipe_ctx->stream->dpms_off = false;
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
@@ -4749,6 +4793,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
(link->connector_signal == SIGNAL_TYPE_EDP
+ // enable FEC for EDP if DSC is supported
+ && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false
))
is_fec_disable = true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 60539b1f2a80..24dc662ec3e4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -626,7 +626,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length ? true : false;
+ payload->length;
uint32_t payload_length = is_end_of_payload ?
payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cb7bf9148904..84f3545c3032 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -430,7 +430,7 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, 1);
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
lt_settings->link_settings.use_link_rate_set == true) {
rate = 0;
/* WA for some MUX chips that will power down with eDP and lose supported
@@ -3346,6 +3346,148 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin
return false;
}
+static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw,
+ enum dc_link_rate max_link_rate)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ unsigned int policy = 0;
+
+ policy = link->ctx->dc->debug.force_dsc_edp_policy;
+ if (max_link_rate == LINK_RATE_UNKNOWN)
+ max_link_rate = link->verified_link_cap.link_rate;
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0)) {
+ /* for DSC enabled case, we search for minimum lane count */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = LINK_RATE_LOW;
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = false;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ } else {
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate = initial_link_setting.link_rate;
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+ }
+ return false;
+ }
+
+ /* if optimize edp link is supported */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate_set <
+ link->dpcd_caps.edp_supported_link_rates_count
+ && current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else {
+ if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
+ }
+ }
+ }
+ return false;
+}
+
static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
{
*link_setting = link->verified_link_cap;
@@ -3380,7 +3522,25 @@ void decide_link_settings(struct dc_stream_state *stream,
if (decide_mst_link_settings(link, link_setting))
return;
} else if (link->connector_signal == SIGNAL_TYPE_EDP) {
- if (decide_edp_link_settings(link, link_setting, req_bw))
+ /* enable edp link optimization for DSC eDP case */
+ if (stream->timing.flags.DSC) {
+ enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
+
+ if (link->ctx->dc->debug.force_dsc_edp_policy) {
+ /* calculate link max link rate cap*/
+ struct dc_link_settings tmp_link_setting;
+ struct dc_crtc_timing tmp_timing = stream->timing;
+ uint32_t orig_req_bw;
+
+ tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ tmp_timing.flags.DSC = 0;
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
+ max_link_rate = tmp_link_setting.link_rate;
+ }
+ if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate))
+ return;
+ } else if (decide_edp_link_settings(link, link_setting, req_bw))
return;
} else if (decide_dp_link_settings(link, link_setting, req_bw))
return;
@@ -4454,7 +4614,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data,
sizeof(lttpr_dpcd_data));
if (status != DC_OK) {
- dm_error("%s: Read LTTPR caps data failed.\n", __func__);
+ DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__);
return false;
}
@@ -5885,7 +6045,10 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
- decide_edp_link_settings(link, &link_setting, req_bw);
+ if (!crtc_timing->flags.DSC)
+ decide_edp_link_settings(link, &link_setting, req_bw);
+ else
+ decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
index b1c9f77d6bf4..d72122593959 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -94,17 +94,17 @@ static enum link_training_result dpia_configure_link(struct dc_link *link,
lt_settings);
status = dpcd_configure_channel_coding(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
/* Configure lttpr mode */
status = dpcd_configure_lttpr_mode(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
/* Set link rate, lane count and spread. */
status = dpcd_set_link_settings(link, lt_settings);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
if (link->preferred_training_settings.fec_enable)
@@ -112,7 +112,7 @@ static enum link_training_result dpia_configure_link(struct dc_link *link,
else
fec_enable = true;
status = dp_set_fec_ready(link, fec_enable);
- if (status != DC_OK && !link->hpd_status)
+ if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
return LINK_TRAINING_SUCCESS;
@@ -388,7 +388,7 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link
}
/* Abort link training if clock recovery failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
@@ -490,7 +490,7 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li
}
/* Abort link training if clock recovery failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
@@ -675,7 +675,7 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link
}
/* Abort link training if equalization failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
@@ -758,7 +758,7 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li
}
/* Abort link training if equalization failed due to HPD unplug. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
@@ -892,10 +892,10 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)
__func__,
link->link_id.enum_id - ENUM_ID_1,
link->lttpr_mode,
- link->hpd_status);
+ link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
- if (!link->hpd_status)
+ if (link->is_hpd_pending)
return;
if (hop != DPRX)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 25e48a8cbb78..a55944da8d53 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -118,7 +118,10 @@ static void remove_link_enc_assignment(
*/
if (get_stream_using_link_enc(state, eng_id) == NULL)
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id;
+
stream->link_enc = NULL;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
break;
}
}
@@ -148,6 +151,7 @@ static void add_link_enc_assignment(
.ep_type = stream->link->ep_type},
.eng_id = eng_id,
.stream = stream};
+ dc_stream_retain(stream);
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
break;
@@ -227,7 +231,7 @@ static struct link_encoder *get_link_enc_used_by_link(
.link_id = link->link_id,
.ep_type = link->ep_type};
- for (i = 0; i < state->stream_count; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id))
@@ -237,28 +241,18 @@ static struct link_encoder *get_link_enc_used_by_link(
return link_enc;
}
/* Clear all link encoder assignments. */
-static void clear_enc_assignments(struct dc_state *state)
+static void clear_enc_assignments(const struct dc *dc, struct dc_state *state)
{
int i;
- enum engine_id eng_id;
- struct dc_stream_state *stream;
for (i = 0; i < MAX_PIPES; i++) {
state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
- eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id;
- stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream;
- if (eng_id != ENGINE_ID_UNKNOWN)
- state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id;
- if (stream)
- stream->link_enc = NULL;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN;
+ if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) {
+ dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream);
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;
+ }
}
-}
-
-void link_enc_cfg_init(
- struct dc *dc,
- struct dc_state *state)
-{
- int i;
for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
if (dc->res_pool->link_encoders[i])
@@ -266,8 +260,13 @@ void link_enc_cfg_init(
else
state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
}
+}
- clear_enc_assignments(state);
+void link_enc_cfg_init(
+ const struct dc *dc,
+ struct dc_state *state)
+{
+ clear_enc_assignments(dc, state);
state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
@@ -284,12 +283,9 @@ void link_enc_cfg_link_encs_assign(
ASSERT(state->stream_count == stream_count);
- if (stream_count == 0)
- clear_enc_assignments(state);
-
/* Release DIG link encoder resources before running assignment algorithm. */
- for (i = 0; i < stream_count; i++)
- dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]);
for (i = 0; i < MAX_PIPES; i++)
ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false);
@@ -544,6 +540,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
uint8_t dig_stream_count = 0;
int matching_stream_ptrs = 0;
int eng_ids_per_ep_id[MAX_PIPES] = {0};
+ int valid_bitmap = 0;
/* (1) No. valid entries same as stream count. */
for (i = 0; i < MAX_PIPES; i++) {
@@ -625,5 +622,15 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams;
ASSERT(is_valid);
+ if (is_valid == false) {
+ valid_bitmap =
+ (valid_entries & 0x1) |
+ ((valid_stream_ptrs & 0x1) << 1) |
+ ((valid_uniqueness & 0x1) << 2) |
+ ((valid_avail & 0x1) << 3) |
+ ((valid_streams & 0x1) << 4);
+ dm_error("Invalid link encoder assignments: 0x%x\n", valid_bitmap);
+ }
+
return is_valid;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c32fdccd4d92..1da91f250afa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2078,7 +2078,6 @@ static void mark_seamless_boot_stream(
{
struct dc_bios *dcb = dc->ctx->dc_bios;
- /* TODO: Check Linux */
if (dc->config.allow_seamless_boot_optimization &&
!dcb->funcs->is_accelerated_mode(dcb)) {
if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
@@ -2224,6 +2223,9 @@ void dc_resource_state_construct(
struct dc_state *dst_ctx)
{
dst_ctx->clk_mgr = dc->clk_mgr;
+
+ /* Initialise DIG link encoder resource tracking variables. */
+ link_enc_cfg_init(dc, dst_ctx);
}
@@ -2506,17 +2508,7 @@ static void set_avi_info_frame(
/* TODO : We should handle YCC quantization */
/* but we do not have matrix calculation */
- if (stream->qy_bit == 1) {
- if (color_space == COLOR_SPACE_SRGB ||
- color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
- color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- else
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
///VIC
format = stream->timing.timing_3d_format;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index a249a0e5edd0..4b5e4d8e7735 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -33,14 +33,6 @@
* Private functions
******************************************************************************/
-static void dc_sink_destruct(struct dc_sink *sink)
-{
- if (sink->dc_container_id) {
- kfree(sink->dc_container_id);
- sink->dc_container_id = NULL;
- }
-}
-
static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
{
@@ -75,7 +67,7 @@ void dc_sink_retain(struct dc_sink *sink)
static void dc_sink_free(struct kref *kref)
{
struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
- dc_sink_destruct(sink);
+ kfree(sink->dc_container_id);
kfree(sink);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3aac3f4a2852..c14e7db3f69d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,7 +47,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.160"
+#define DC_VER "3.2.163"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -188,6 +188,7 @@ struct dc_caps {
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool dp_hpo;
#endif
+ bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
};
@@ -573,6 +574,8 @@ struct dc_debug_options {
bool native422_support;
bool disable_dsc;
enum visual_confirm visual_confirm;
+ int visual_confirm_rect_height;
+
bool sanity_checks;
bool max_disp_clk;
bool surface_trace;
@@ -667,6 +670,8 @@ struct dc_debug_options {
bool validate_dml_output;
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
+ bool disable_dsc_edp;
+ unsigned int force_dsc_edp_policy;
bool enable_dram_clock_change_one_display_vactive;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* TODO - remove once tested */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 360f3199ea6f..541376fabbef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -115,13 +115,44 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
}
}
+void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv)
+{
+ struct dmub_srv *dmub = dmub_srv->dmub;
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_clear_inbox0_ack(dmub);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
+}
+
+void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv)
+{
+ struct dmub_srv *dmub = dmub_srv->dmub;
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_wait_for_inbox0_ack(dmub, 100000);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n");
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
+}
+
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_data_register data)
{
struct dmub_srv *dmub = dmub_srv->dmub;
- if (dmub->hw_funcs.send_inbox0_cmd)
- dmub->hw_funcs.send_inbox0_cmd(dmub, data);
- // TODO: Add wait command -- poll register for ACK
+ struct dc_context *dc_ctx = dmub_srv->ctx;
+ enum dmub_status status = DMUB_STATUS_OK;
+
+ status = dmub_srv_send_inbox0_cmd(dmub, data);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error sending INBOX0 cmd\n");
+ dc_dmub_srv_log_diagnostic_data(dmub_srv);
+ }
}
bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd)
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 3e35eee7188c..7e4e2ec5915d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -68,6 +68,8 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu
void dc_dmub_trace_event_control(struct dc *dc, bool enable);
+void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv);
+void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);
void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 180ecd860296..d449e72a4e2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -113,6 +113,7 @@ struct dc_link {
* DIG encoder. */
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
+ bool is_hpd_pending; /* Indicates a new received hpd */
bool edp_sink_present;
@@ -287,6 +288,10 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+void dc_link_blank_all_dp_displays(struct dc *dc);
+
+void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init);
+
/* Request DC to detect if there is a Panel connected.
* boot - If this call is during initial boot.
* Return false for any type of detection failure or MST detection
@@ -298,7 +303,7 @@ enum dc_detect_reason {
DETECT_REASON_HPD,
DETECT_REASON_HPDRX,
DETECT_REASON_FALLBACK,
- DETECT_REASON_RETRAIN
+ DETECT_REASON_RETRAIN,
};
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 27218ede150a..70eaac017624 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -67,9 +67,6 @@ static void write_indirect_azalia_reg(struct audio *audio,
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
AZALIA_ENDPOINT_REG_DATA, reg_data);
-
- DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
- reg_index, reg_data);
}
static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
@@ -85,9 +82,6 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index
/* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
- DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
- reg_index, value);
-
return value;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
index 5622d5e32d81..dbd2cfed0603 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -113,6 +113,7 @@ struct dce_audio_shift {
uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ uint32_t CLOCK_GATING_DISABLE;
};
struct dce_audio_mask {
@@ -132,6 +133,7 @@ struct dce_audio_mask {
uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;
uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO;
+ uint32_t CLOCK_GATING_DISABLE;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 9baf8ca0a920..b1b2e3c6f379 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -56,8 +56,11 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd)
{
union dmub_inbox0_data_register data = { 0 };
+
data.inbox0_cmd_lock_hw = hw_lock_cmd;
+ dc_dmub_srv_clear_inbox0_ack(dmub_srv);
dc_dmub_srv_send_inbox0_cmd(dmub_srv, data);
+ dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
bool should_use_dmub_lock(struct dc_link *link)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 90eb8eedacf2..87ed48d5530d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -230,7 +230,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
/**
* Set PSR power optimization flags.
*/
-static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt)
+static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -239,7 +239,9 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt
cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR;
cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT;
cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data);
+ cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
+ cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -327,6 +329,16 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
copy_settings_data->panel_inst = panel_inst;
+ copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1);
+
+ if (link->fec_state == dc_link_fec_enabled &&
+ (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1,
+ sizeof(link->dpcd_caps.sink_dev_id_str)) ||
+ !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2,
+ sizeof(link->dpcd_caps.sink_dev_id_str))))
+ copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1;
+ else
+ copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 5dbd479660f1..01acc01cc191 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -46,7 +46,7 @@ struct dmub_psr_funcs {
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
uint8_t panel_inst);
- void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt);
+ void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 24e47df526f6..3d421583e9ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1602,6 +1602,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
+ pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
+ pipe_ctx->stream_res.stream_enc);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
@@ -1655,30 +1660,12 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
- int i, j;
+ int i;
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
- if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
- if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
- dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
- unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
-
- if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
- dp_receiver_power_ctrl(dc->links[i], false);
- }
+ dc_link_blank_dp_stream(dc->links[i], false);
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
@@ -1846,7 +1833,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
}
}
// We are trying to enable eDP, don't power down VDD
- if (edp_stream_num)
+ if (edp_stream_num && can_apply_edp_fast_boot)
keep_edp_vdd_on = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index 44293d66b46b..e31a6f1516bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -39,6 +39,10 @@
#define BLACK_OFFSET_RGB_Y 0x0
#define BLACK_OFFSET_CBCR 0x8000
+#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3
+#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1
+#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10
+
#define REG(reg)\
dpp->tf_regs->reg
@@ -685,9 +689,17 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
const struct rect *recout)
{
int visual_confirm_on = 0;
+ unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT;
+
if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)
visual_confirm_on = 1;
+ /* Check bounds to ensure the VC bar height was set to a sane value */
+ if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) &&
+ (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) {
+ visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height;
+ }
+
REG_SET_2(RECOUT_START, 0,
/* First pixel of RECOUT in the active OTG area */
RECOUT_START_X, recout->x,
@@ -699,7 +711,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,
RECOUT_WIDTH, recout->width,
/* Number of RECOUT vertical lines */
RECOUT_HEIGHT, recout->height
- - visual_confirm_on * 2 * (dpp->base.inst + 1));
+ - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 0b788d794fb3..a2b925cc4132 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1362,11 +1362,48 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
tg->funcs->tg_init(tg);
}
+
+ /* Power gate DSCs */
+ if (hws->funcs.dsc_pg_control != NULL) {
+ uint32_t num_opps = 0;
+ uint32_t opp_id_src0 = OPP_ID_INVALID;
+ uint32_t opp_id_src1 = OPP_ID_INVALID;
+
+ // Step 1: To find out which OPTC is running & OPTC DSC is ON
+ for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) {
+ uint32_t optc_dsc_state = 0;
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg)) {
+ if (tg->funcs->get_dsc_status)
+ tg->funcs->get_dsc_status(tg, &optc_dsc_state);
+ // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
+ // non-zero value is DSC enabled
+ if (optc_dsc_state != 0) {
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ break;
+ }
+ }
+ }
+
+ // Step 2: To power down DSC but skip DSC of running OPTC
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
+ struct dcn_dsc_state s = {0};
+
+ dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
+
+ if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
+ s.dsc_clock_en && s.dsc_fw_en)
+ continue;
+
+ hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
+ }
+ }
}
void dcn10_init_hw(struct dc *dc)
{
- int i, j;
+ int i;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
@@ -1468,43 +1505,8 @@ void dcn10_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
-
- /* DP 2.0 requires that LTTPR Caps be read first */
- dp_retrieve_lttpr_cap(dc->links[i]);
-
- /*
- * If any of the displays are lit up turn them off.
- * The reason is that some MST hubs cannot be turned off
- * completely until we tell them to do so.
- * If not turned off, then displays connected to MST hub
- * won't light up.
- */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -1637,7 +1639,7 @@ void dcn10_reset_hw_ctx_wrap(
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -2624,7 +2626,7 @@ static void dcn10_update_dchubp_dpp(
/* new calculated dispclk, dppclk are stored in
* context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
* dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
- * dcn_validate_bandwidth compute new dispclk, dppclk.
+ * dcn10_validate_bandwidth compute new dispclk, dppclk.
* dispclk will put in use after optimize_bandwidth when
* ramp_up_dispclk_with_dpp is called.
* there are two places for dppclk be put in use. One location
@@ -2638,7 +2640,7 @@ static void dcn10_update_dchubp_dpp(
* for example, eDP + external dp, change resolution of DP from
* 1920x1080x144hz to 1280x960x60hz.
* before change: dispclk = 337889 dppclk = 337889
- * change mode, dcn_validate_bandwidth calculate
+ * change mode, dcn10_validate_bandwidth calculate
* dispclk = 143122 dppclk = 143122
* update_dchubp_dpp be executed before dispclk be updated,
* dispclk = 337889, but dppclk use new value dispclk /2 =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index f37551e00023..19a2dd619ec7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -978,10 +978,8 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
pool->base.mpc = NULL;
}
- if (pool->base.hubbub != NULL) {
- kfree(pool->base.hubbub);
- pool->base.hubbub = NULL;
- }
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
for (i = 0; i < pool->base.pipe_count; i++) {
if (pool->base.opps[i] != NULL)
@@ -1011,14 +1009,10 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
- if (pool->base.hw_i2cs[i] != NULL) {
- kfree(pool->base.hw_i2cs[i]);
- pool->base.hw_i2cs[i] = NULL;
- }
- if (pool->base.sw_i2cs[i] != NULL) {
- kfree(pool->base.sw_i2cs[i]);
- pool->base.sw_i2cs[i] = NULL;
- }
+ kfree(pool->base.hw_i2cs[i]);
+ pool->base.hw_i2cs[i] = NULL;
+ kfree(pool->base.sw_i2cs[i]);
+ pool->base.sw_i2cs[i] = NULL;
}
for (i = 0; i < pool->base.audio_count; i++) {
@@ -1320,7 +1314,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
.panel_cntl_create = dcn10_panel_cntl_create,
- .validate_bandwidth = dcn_validate_bandwidth,
+ .validate_bandwidth = dcn10_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b0c08ee6bc2c..bf4436d7aaab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -902,6 +902,19 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
+void enc1_stream_encoder_reset_fifo(
+ struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ /* set DIG_START to 0x1 to reset FIFO */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+ udelay(100);
+
+ /* write 0 to take the FIFO out of reset */
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
+}
+
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
@@ -1587,6 +1600,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 687d7e4bf7ca..a146a41f68e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -626,6 +626,9 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
+void enc1_stream_encoder_reset_fifo(
+ struct stream_encoder *enc);
+
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 79b640e202eb..ef5c4c0f4d6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -162,6 +162,8 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds
REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);
REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);
REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset);
+ REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en,
+ DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 4f88376a118f..e6af99ae3d9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap(
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index c90b8516dcc1..8c34751b0e58 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -190,6 +190,19 @@ void optc2_set_dsc_config(struct timing_generator *optc,
OPTC_DSC_SLICE_WIDTH, dsc_slice_width);
}
+/* Get DSC-related configuration.
+ * dsc_mode: 0 disables DSC, other values enable DSC in specified format
+ */
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OPTC_DATA_FORMAT_CONTROL,
+ OPTC_DSC_MODE, dsc_mode);
+}
+
+
/*TEMP: Need to figure out inheritance model here.*/
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
{
@@ -579,6 +592,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
.set_odm_combine = optc2_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index be19a6885fbf..f7968b9ca16e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -98,6 +98,9 @@ void optc2_set_dsc_config(struct timing_generator *optc,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
+void optc2_get_dsc_status(struct timing_generator *optc,
+ uint32_t *dsc_mode);
+
void optc2_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index aab25ca8343a..8a70f92795c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -593,6 +593,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index ebd9c35c914f..7aa9aaf5db4c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -805,6 +805,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
+ .reset_fifo =
+ enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index df2717116604..3e99bb9c70ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -437,7 +437,7 @@ void dcn30_init_hw(struct dc *dc)
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
- int i, j;
+ int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
@@ -534,41 +534,8 @@ void dcn30_init_hw(struct dc *dc)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
- /* DP 2.0 states that LTTPR regs must be read first */
- dp_retrieve_lttpr_cap(dc->links[i]);
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe;
-
- fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
- if (fe == ENGINE_ID_UNKNOWN)
- continue;
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 5d9e6413d67a..f5e8916601d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -332,6 +332,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc3_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index fbaa03f26d8b..7abc36a4ff76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1449,9 +1449,7 @@ static bool dcn301_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
-#ifdef CONFIG_DRM_AMD_DC_DMUB
dc->caps.dmcub_support = true;
-#endif
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@@ -1487,6 +1485,23 @@ static bool dcn301_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index fcf96cf08c76..058f5d71e037 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -1557,6 +1557,24 @@ static bool dcn302_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+ &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index 4a9b64023675..7024aeb0884c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -1500,6 +1500,23 @@ static bool dcn303_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* read VBIOS LTTPR caps */
+ if (ctx->dc_bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+ dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (ctx->dc_bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
+ dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 565f12dd179a..5065904c7833 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -358,8 +358,8 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right;
v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
- hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0;
- vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0;
+ hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80;
+ vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;
v_freq = hw_crtc_timing.pix_clk_100hz * 100;
/* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 5dd1ce9ddb53..4206ce5bf9a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -112,7 +112,7 @@ void dcn31_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
- int i, j;
+ int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -192,50 +192,13 @@ void dcn31_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
- /* Power gate DSCs */
- for (i = 0; i < res_pool->res_cap->num_dsc; i++)
- if (hws->funcs.dsc_pg_control != NULL)
- hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-
/* Enables outbox notifications for usb4 dpia */
if (dc->res_pool->usb4_dpia_count)
dmub_enable_outbox_notification(dc);
/* we want to turn off all dp displays before doing detection */
- if (dc->config.power_down_display_on_boot) {
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
- continue;
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
- /* blank dp stream before power off receiver*/
- if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY &&
- dc->links[i]->link_enc->funcs->get_dig_frontend) {
- unsigned int fe;
-
- fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
- dc->links[i]->link_enc);
- if (fe == ENGINE_ID_UNKNOWN)
- continue;
-
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
- dp_receiver_power_ctrl(dc->links[i], false);
- }
- }
- }
+ if (dc->config.power_down_display_on_boot)
+ dc_link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -602,7 +565,7 @@ void dcn31_reset_hw_ctx_wrap(
dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 05335a8c3c2d..e175b6cc0125 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -149,4 +149,9 @@ void dcn31_hw_sequencer_construct(struct dc *dc)
dc->hwss.init_hw = dcn20_fpga_init_hw;
dc->hwseq->funcs.init_pipes = NULL;
}
+ if (dc->debug.disable_z10) {
+ /*hw not support z10 or sw disable it*/
+ dc->hwss.z10_restore = NULL;
+ dc->hwss.z10_save_init = NULL;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index a4b1d98f0007..e8562fa11366 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -256,6 +256,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.get_crc = optc1_get_crc,
.configure_crc = optc2_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
+ .get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
.set_odm_bypass = optc3_set_odm_bypass,
.set_odm_combine = optc31_set_odm_combine,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 18896294ae12..88e040687940 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -2199,6 +2199,7 @@ static bool dcn31_resource_construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.dp_hpo = true;
+ dc->caps.edp_dsc_support = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 6905ef1e75a6..d76251fd1566 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -73,6 +73,7 @@ struct display_mode_lib {
struct vba_vars_st vba;
struct dal_logger *logger;
struct dml_funcs funcs;
+ struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6];
};
void dml_init_instance(struct display_mode_lib *lib,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
new file mode 100644
index 000000000000..ece34b0b8a46
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c
@@ -0,0 +1,1889 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml_wrapper.h"
+#include "resource.h"
+#include "core_types.h"
+#include "dsc.h"
+#include "clk_mgr.h"
+
+#ifndef DC_LOGGER_INIT
+#define DC_LOGGER_INIT
+#undef DC_LOG_WARNING
+#define DC_LOG_WARNING
+#endif
+
+#define DML_WRAPPER_TRANSLATION_
+#include "dml_wrapper_translation.c"
+#undef DML_WRAPPER_TRANSLATION_
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+static void build_clamping_params(struct dc_stream_state *stream)
+{
+ stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
+ stream->clamping.c_depth = stream->timing.display_color_depth;
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+}
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+
+ /*TODO: is this halved for YCbCr 420? in that case we might want to move
+ * the pixel clock normalization for hdmi up to here instead of doing it
+ * in pll_adjust_pix_clk
+ */
+ pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
+ pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420);
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+ }
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2;
+ }
+ if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ pixel_clk_params->requested_pix_clk_100hz *= 2;
+
+}
+
+static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+
+ if (pipe_ctx->clock_source)
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ build_clamping_params(pipe_ctx->stream);
+
+ return DC_OK;
+}
+
+static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth)
+{
+ enum dc_dither_option option = stream->dither_option;
+ enum dc_pixel_encoding pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+ if (option == DITHER_OPTION_DEFAULT) {
+ switch (stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ option = DITHER_OPTION_SPATIAL6;
+ break;
+ case COLOR_DEPTH_888:
+ option = DITHER_OPTION_SPATIAL8;
+ break;
+ case COLOR_DEPTH_101010:
+ option = DITHER_OPTION_SPATIAL10;
+ break;
+ default:
+ option = DITHER_OPTION_DISABLE;
+ }
+ }
+
+ if (option == DITHER_OPTION_DISABLE)
+ return;
+
+ if (option == DITHER_OPTION_TRUN6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
+ } else if (option == DITHER_OPTION_TRUN8 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
+ } else if (option == DITHER_OPTION_TRUN10 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ }
+
+ /* special case - Formatter can only reduce by 4 bits at most.
+ * When reducing from 12 to 6 bits,
+ * HW recommends we use trunc with round mode
+ * (if we did nothing, trunc to 10 bits would be used)
+ * note that any 12->10 bit reduction is ignored prior to DCE8,
+ * as the input was 10 bits.
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
+ }
+
+ /* spatial dither
+ * note that spatial modes 1-3 are never used
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL10 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ }
+
+ if (option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL10) {
+ fmt_bit_depth->flags.FRAME_RANDOM = 0;
+ } else {
+ fmt_bit_depth->flags.FRAME_RANDOM = 1;
+ }
+
+ //////////////////////
+ //// temporal dither
+ //////////////////////
+ if (option == DITHER_OPTION_FM6 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_SPATIAL10_FM6 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
+ } else if (option == DITHER_OPTION_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM8) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
+ } else if (option == DITHER_OPTION_FM10) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
+ }
+
+ fmt_bit_depth->pixel_encoding = pixel_encoding;
+}
+
+bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+{
+ int i;
+
+ /* Validate DSC config, dsc count validation is already done */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dsc_config dsc_cfg;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ /* Only need to validate top pipe */
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
+ continue;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
+ + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
+ + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+
+ if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
+ return false;
+ }
+ return true;
+}
+
+enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
+{
+ enum dc_status status = DC_OK;
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+
+ status = build_pipe_hw_param(pipe_ctx);
+
+ return status;
+}
+
+void dml_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx)
+{
+ int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
+
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
+ *dsc = NULL;
+
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
+ if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
+ *dsc = pool->dscs[pipe_idx];
+ res_ctx->is_dsc_acquired[pipe_idx] = true;
+ return;
+ }
+
+ /* Return old DSC to avoid the need for redo it */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
+ /* Find first free DSC */
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (!res_ctx->is_dsc_acquired[i]) {
+ *dsc = pool->dscs[i];
+ res_ctx->is_dsc_acquired[i] = true;
+ break;
+ }
+}
+
+static bool dml_split_stream_for_mpc_or_odm(
+ const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pri_pipe,
+ struct pipe_ctx *sec_pipe,
+ bool odm)
+{
+ int pipe_idx = sec_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
+
+ *sec_pipe = *pri_pipe;
+
+ sec_pipe->pipe_idx = pipe_idx;
+ sec_pipe->plane_res.mi = pool->mis[pipe_idx];
+ sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
+ sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
+ sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
+ sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
+ sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
+ sec_pipe->stream_res.dsc = NULL;
+ if (odm) {
+ if (pri_pipe->next_odm_pipe) {
+ ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
+ sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
+ sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
+ }
+ if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
+ pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
+ }
+ if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
+ pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
+ sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
+ }
+ pri_pipe->next_odm_pipe = sec_pipe;
+ sec_pipe->prev_odm_pipe = pri_pipe;
+ ASSERT(sec_pipe->top_pipe == NULL);
+
+ if (!sec_pipe->top_pipe)
+ sec_pipe->stream_res.opp = pool->opps[pipe_idx];
+ else
+ sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
+ if (sec_pipe->stream->timing.flags.DSC == 1) {
+ dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
+ ASSERT(sec_pipe->stream_res.dsc);
+ if (sec_pipe->stream_res.dsc == NULL)
+ return false;
+ }
+ } else {
+ if (pri_pipe->bottom_pipe) {
+ ASSERT(pri_pipe->bottom_pipe != sec_pipe);
+ sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
+ sec_pipe->bottom_pipe->top_pipe = sec_pipe;
+ }
+ pri_pipe->bottom_pipe = sec_pipe;
+ sec_pipe->top_pipe = pri_pipe;
+
+ ASSERT(pri_pipe->plane_state);
+ }
+
+ return true;
+}
+
+static struct pipe_ctx *dml_find_split_pipe(
+ struct dc *dc,
+ struct dc_state *context,
+ int old_index)
+{
+ struct pipe_ctx *pipe = NULL;
+ int i;
+
+ if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[old_index];
+ pipe->pipe_idx = old_index;
+ }
+
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
+ && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+ }
+
+ /*
+ * May need to fix pipes getting tossed from 1 opp to another on flip
+ * Add for debugging transient underflow during topology updates:
+ * ASSERT(pipe);
+ */
+ if (!pipe)
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+ return pipe;
+}
+
+static void dml_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc)
+{
+ int i;
+
+ for (i = 0; i < pool->res_cap->num_dsc; i++)
+ if (pool->dscs[i] == *dsc) {
+ res_ctx->is_dsc_acquired[i] = false;
+ *dsc = NULL;
+ break;
+ }
+}
+
+static int dml_get_num_mpc_splits(struct pipe_ctx *pipe)
+{
+ int mpc_split_count = 0;
+ struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->bottom_pipe;
+ }
+ other_pipe = pipe->top_pipe;
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->top_pipe;
+ }
+
+ return mpc_split_count;
+}
+
+static bool dml_enough_pipes_for_subvp(struct dc *dc,
+ struct dc_state *context)
+{
+ int i = 0;
+ int num_pipes = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state)
+ num_pipes++;
+ }
+
+ // Sub-VP only possible if the number of "real" pipes is
+ // less than or equal to half the number of available pipes
+ if (num_pipes * 2 > dc->res_pool->pipe_count)
+ return false;
+
+ return true;
+}
+
+static int dml_validate_apply_pipe_split_flags(
+ struct dc *dc,
+ struct dc_state *context,
+ int vlevel,
+ int *split,
+ bool *merge)
+{
+ int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
+ bool force_split = false;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
+
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
+ }
+ if (plane_count > dc->res_pool->pipe_count / 2)
+ avoid_split = true;
+
+ /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_crtc_timing timing;
+
+ if (!pipe->stream)
+ continue;
+ else {
+ timing = pipe->stream->timing;
+ if (timing.h_border_left + timing.h_border_right
+ + timing.v_border_top + timing.v_border_bottom > 0) {
+ avoid_split = true;
+ break;
+ }
+ }
+ }
+
+ /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
+ if (avoid_split) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
+ break;
+ /* Impossible to not split this pipe */
+ if (vlevel > context->bw_ctx.dml.soc.num_states)
+ vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
+ pipe_idx++;
+ }
+ v->maxMpcComb = max_mpc_comb;
+ }
+
+ /* Split loop sets which pipe should be split based on dml outputs and dc flags */
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
+ split[i] = 4;
+ else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
+ split[i] = 2;
+
+ if ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))
+ split[i] = 2;
+ if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ }
+ if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
+ split[i] = 4;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
+ }
+ /*420 format workaround*/
+ if (pipe->stream->timing.h_addressable > 7680 &&
+ pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ split[i] = 4;
+ }
+
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (dml_get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (dml_get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (dml_get_num_mpc_splits(pipe)) {
+ /* ODM -> MPC transition */
+ if (pipe->prev_odm_pipe) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (dml_get_num_mpc_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (dml_get_num_mpc_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (dml_get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ }
+
+ /* Adjust dppclk when split is forced, do not bother with dispclk */
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
+ pipe_idx++;
+ }
+
+ return vlevel;
+}
+
+static void dml_set_phantom_stream_timing(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *ref_pipe,
+ struct dc_stream_state *phantom_stream)
+{
+ // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width
+ uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 +
+ dc->caps.subvp_fw_processing_delay_us +
+ dc->caps.subvp_pstate_allow_width_us;
+ uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) *
+ (ref_pipe->stream->timing.pix_clk_100hz * 100) /
+ (double)ref_pipe->stream->timing.h_total;
+ uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start;
+
+ phantom_stream->dst.y = 0;
+ phantom_stream->dst.height = phantom_vactive;
+ phantom_stream->src.y = 0;
+ phantom_stream->src.height = phantom_vactive;
+
+ phantom_stream->timing.v_addressable = phantom_vactive;
+ phantom_stream->timing.v_front_porch = 1;
+ phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
+ phantom_stream->timing.v_front_porch +
+ phantom_stream->timing.v_sync_width +
+ phantom_bp;
+}
+
+static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *ref_pipe)
+{
+ struct dc_stream_state *phantom_stream = NULL;
+
+ phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
+ phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
+ phantom_stream->dpms_off = true;
+ phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
+ phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
+ ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
+ ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+
+ /* stream has limited viewport and small timing */
+ memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
+ memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
+ memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
+ dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream);
+
+ dc_add_stream_to_ctx(dc, context, phantom_stream);
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ return phantom_stream;
+}
+
+static void dml_enable_phantom_plane(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *phantom_stream,
+ struct pipe_ctx *main_pipe)
+{
+ struct dc_plane_state *phantom_plane = NULL;
+ struct dc_plane_state *prev_phantom_plane = NULL;
+ struct pipe_ctx *curr_pipe = main_pipe;
+
+ while (curr_pipe) {
+ if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
+ phantom_plane = prev_phantom_plane;
+ else
+ phantom_plane = dc_create_plane_state(dc);
+
+ memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
+ memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
+ sizeof(phantom_plane->scaling_quality));
+ memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
+ memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
+ memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
+ memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
+ sizeof(phantom_plane->plane_size));
+ memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
+ sizeof(phantom_plane->tiling_info));
+ memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
+ /* Currently compat_level is undefined in dc_state
+ * phantom_plane->compat_level = curr_pipe->plane_state->compat_level;
+ */
+ phantom_plane->format = curr_pipe->plane_state->format;
+ phantom_plane->rotation = curr_pipe->plane_state->rotation;
+ phantom_plane->visible = curr_pipe->plane_state->visible;
+
+ /* Shadow pipe has small viewport. */
+ phantom_plane->clip_rect.y = 0;
+ phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
+
+ dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
+
+ curr_pipe = curr_pipe->bottom_pipe;
+ prev_phantom_plane = phantom_plane;
+ }
+}
+
+static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *ref_stream = pipe->stream;
+ // Only construct phantom stream for top pipes that have plane enabled
+ if (!pipe->top_pipe && pipe->plane_state && pipe->stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ struct dc_stream_state *phantom_stream = NULL;
+
+ phantom_stream = dml_enable_phantom_stream(dc, context, pipe);
+ dml_enable_phantom_plane(dc, context, phantom_stream, pipe);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && pipe->stream &&
+ pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ pipe->stream->use_dynamic_meta = false;
+ pipe->plane_state->flip_immediate = false;
+ if (!resource_build_scaling_params(pipe)) {
+ // Log / remove phantom pipes since failed to build scaling params
+ }
+ }
+ }
+}
+
+static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ bool removed_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ // build scaling params for phantom pipes
+ if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_rem_all_planes_for_stream(dc, pipe->stream, context);
+ dc_remove_stream_from_ctx(dc, context, pipe->stream);
+ removed_pipe = true;
+ }
+
+ // Clear all phantom stream info
+ if (pipe->stream) {
+ pipe->stream->mall_stream_config.type = SUBVP_NONE;
+ pipe->stream->mall_stream_config.paired_stream = NULL;
+ }
+ }
+ if (removed_pipe)
+ dc->hwss.apply_ctx_to_hw(dc, context);
+}
+
+/*
+ * If the input state contains no upstream planes for a particular pipe (i.e. only timing)
+ * we need to populate some "conservative" plane information as DML cannot handle "no planes"
+ */
+static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe)
+{
+ pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled;
+ pipe->src.source_scan = dm_horz;
+ pipe->src.sw_mode = dm_sw_4kb_s;
+ pipe->src.macro_tile_size = dm_64k_tile;
+ pipe->src.viewport_width = timing->h_addressable;
+ if (pipe->src.viewport_width > 1920)
+ pipe->src.viewport_width = 1920;
+ pipe->src.viewport_height = timing->v_addressable;
+ if (pipe->src.viewport_height > 1080)
+ pipe->src.viewport_height = 1080;
+ pipe->src.surface_height_y = pipe->src.viewport_height;
+ pipe->src.surface_width_y = pipe->src.viewport_width;
+ pipe->src.surface_height_c = pipe->src.viewport_height;
+ pipe->src.surface_width_c = pipe->src.viewport_width;
+ pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256;
+ pipe->src.source_format = dm_444_32;
+ pipe->dest.recout_width = pipe->src.viewport_width;
+ pipe->dest.recout_height = pipe->src.viewport_height;
+ pipe->dest.full_recout_width = pipe->dest.recout_width;
+ pipe->dest.full_recout_height = pipe->dest.recout_height;
+ pipe->scale_ratio_depth.lb_depth = dm_lb_16;
+ pipe->scale_ratio_depth.hscl_ratio = 1.0;
+ pipe->scale_ratio_depth.vscl_ratio = 1.0;
+ pipe->scale_ratio_depth.scl_enable = 0;
+ pipe->scale_taps.htaps = 1;
+ pipe->scale_taps.vtaps = 1;
+ pipe->dest.vtotal_min = timing->v_total;
+ pipe->dest.vtotal_max = timing->v_total;
+
+ if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) {
+ pipe->src.viewport_width /= 2;
+ pipe->dest.recout_width /= 2;
+ } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) {
+ pipe->src.viewport_width /= 4;
+ pipe->dest.recout_width /= 4;
+ }
+
+ pipe->src.dcc = false;
+ pipe->src.dcc_rate = 1;
+}
+
+/*
+ * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its
+ * hsplit group is equal to its own pipe ID
+ * Otherwise, all pipes part of the same blending tree have the same hsplit group
+ * ID as the top most pipe
+ *
+ * If the pipe ctx is ODM combined, then similar logic follows
+ */
+static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
+{
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+
+ if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state
+ == dc_pipe_ctx->plane_state) {
+ struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe;
+ int split_idx = 0;
+
+ while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+ == dc_pipe_ctx->plane_state) {
+ first_pipe = first_pipe->top_pipe;
+ split_idx++;
+ }
+
+ /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
+ if (split_idx == 0)
+ e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ else if (split_idx == 1)
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+ else if (split_idx == 2)
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx;
+
+ } else if (dc_pipe_ctx->prev_odm_pipe) {
+ struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe;
+
+ while (first_pipe->prev_odm_pipe)
+ first_pipe = first_pipe->prev_odm_pipe;
+ e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ }
+}
+
+static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale)
+{
+ const struct dc_plane_state *pln = dc_pipe_ctx->plane_state;
+ const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data;
+
+ e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate;
+ e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln)
+ || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln)
+ || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
+
+ /* stereo is not split */
+ if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
+ pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
+ e2e_pipe->pipe.src.is_hsplit = false;
+ e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
+ }
+
+ e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
+ || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
+ e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y;
+ e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y;
+ e2e_pipe->pipe.src.viewport_width = scl->viewport.width;
+ e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width;
+ e2e_pipe->pipe.src.viewport_height = scl->viewport.height;
+ e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height;
+ e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width;
+ e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height;
+ e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width;
+ e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height;
+ e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
+ e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
+
+ if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
+ || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
+ e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
+ e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
+ } else {
+ e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
+ e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
+ }
+ e2e_pipe->pipe.src.dcc = pln->dcc.enable;
+ e2e_pipe->pipe.src.dcc_rate = 1;
+ e2e_pipe->pipe.dest.recout_width = scl->recout.width;
+ e2e_pipe->pipe.dest.recout_height = scl->recout.height;
+ e2e_pipe->pipe.dest.full_recout_height = scl->recout.height;
+ e2e_pipe->pipe.dest.full_recout_width = scl->recout.width;
+ if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+ e2e_pipe->pipe.dest.full_recout_width *= 2;
+ else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
+ e2e_pipe->pipe.dest.full_recout_width *= 4;
+ else {
+ struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pln) {
+ e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ split_pipe = dc_pipe_ctx->top_pipe;
+ while (split_pipe && split_pipe->plane_state == pln) {
+ e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->top_pipe;
+ }
+ }
+
+ e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16;
+ e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
+ e2e_pipe->pipe.scale_ratio_depth.scl_enable =
+ scl->ratios.vert.value != dc_fixpt_one.value
+ || scl->ratios.horz.value != dc_fixpt_one.value
+ || scl->ratios.vert_c.value != dc_fixpt_one.value
+ || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
+ || always_scale; /*support always scale*/
+ e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps;
+ e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
+ e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps;
+ e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
+
+ /* Currently compat_level is not defined. Commenting it until further resolution
+ * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) {
+ swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
+ &e2e_pipe->pipe.src.sw_mode);
+ e2e_pipe->pipe.src.macro_tile_size =
+ swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
+ } else {
+ gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode,
+ pln->compat_level,
+ &e2e_pipe->pipe.src.sw_mode);
+ e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile;
+ }*/
+
+ e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format);
+}
+
+static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
+{
+ /*
+ * For graphic plane, cursor number is 1, nv12 is 0
+ * bw calculations due to cursor on/off
+ */
+ if (dc_pipe_ctx->plane_state &&
+ (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
+ dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM))
+ e2e_pipe->pipe.src.num_cursors = 0;
+ else
+ e2e_pipe->pipe.src.num_cursors = 1;
+
+ e2e_pipe->pipe.src.cur0_src_width = 256;
+ e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit;
+}
+
+static int populate_dml_pipes_from_context_base(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int pipe_cnt, i;
+ bool synchronized_vblank = true;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ if (pipe_cnt < 0) {
+ pipe_cnt = i;
+ continue;
+ }
+
+ if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ if (dc->debug.disable_timing_sync ||
+ (!resource_are_streams_timing_synchronizable(
+ res_ctx->pipe_ctx[pipe_cnt].stream,
+ res_ctx->pipe_ctx[i].stream) &&
+ !resource_are_vblanks_synchronizable(
+ res_ctx->pipe_ctx[pipe_cnt].stream,
+ res_ctx->pipe_ctx[i].stream))) {
+ synchronized_vblank = false;
+ break;
+ }
+ }
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
+
+ struct audio_check aud_check = {0};
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ /* todo:
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
+ pipes[pipe_cnt].pipe.src.dcc = 0;
+ pipes[pipe_cnt].pipe.src.vm = 0;*/
+
+ pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+
+ pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
+ /* todo: rotation?*/
+ pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
+ if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
+ /* 1/2 vblank */
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
+ (timing->v_total - timing->v_addressable
+ - timing->v_border_top - timing->v_border_bottom) / 2;
+ /* 36 bytes dp, 32 hdmi */
+ pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
+ dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
+ }
+ pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
+
+ dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest);
+ pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
+
+ pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
+
+ pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]);
+
+ populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
+
+ pipes[pipe_cnt].dout.dp_lanes = 4;
+ pipes[pipe_cnt].dout.is_virtual = 0;
+ pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal);
+ if (pipes[pipe_cnt].dout.output_type < 0) {
+ pipes[pipe_cnt].dout.output_type = dm_dp;
+ pipes[pipe_cnt].dout.is_virtual = 1;
+ }
+
+ populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout);
+
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
+ pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
+
+ /* todo: default max for now, until there is logic reflecting this in dc*/
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ /*fill up the audio sample rate (unit in kHz)*/
+ get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
+ pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
+
+ populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
+
+ if (!res_ctx->pipe_ctx[i].plane_state) {
+ populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe);
+ } else {
+ populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale);
+ }
+
+ pipe_cnt++;
+ }
+
+ /* populate writeback information */
+ if (dc->res_pool)
+ dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
+
+ return pipe_cnt;
+}
+
+static int dml_populate_dml_pipes_from_context(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe;
+
+ populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ pipes[pipe_cnt].pipe.src.gpuvm = true;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
+ pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ pipe_cnt++;
+ }
+ dc->config.enable_4to1MPC = false;
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format)) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ }
+
+ return pipe_cnt;
+}
+
+static void dml_full_validate_bw_helper(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *vlevel,
+ int *split,
+ bool *merge,
+ int *pipe_cnt)
+{
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ /*
+ * DML favors voltage over p-state, but we're more interested in
+ * supporting p-state over voltage. We can't support p-state in
+ * prefetch mode > 0 so try capping the prefetch mode to start.
+ */
+ context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
+ dm_allow_self_refresh_and_mclk_switch;
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* This may adjust vlevel and maxMpcComb */
+ if (*vlevel < context->bw_ctx.dml.soc.num_states)
+ *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+
+ /* Conditions for setting up phantom pipes for SubVP:
+ * 1. Not force disable SubVP
+ * 2. Full update (i.e. !fast_validate)
+ * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
+ * 4. Display configuration passes validation
+ * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
+ */
+ if (!dc->debug.force_disable_subvp &&
+ dml_enough_pipes_for_subvp(dc, context) &&
+ *vlevel < context->bw_ctx.dml.soc.num_states &&
+ (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
+ dc->debug.force_subvp_mclk_switch)) {
+
+ dml_add_phantom_pipes(dc, context);
+
+ /* Create input to DML based on new context which includes phantom pipes
+ * TODO: Input to DML should mark which pipes are phantom
+ */
+ *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
+ *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, sizeof(split));
+ memset(merge, 0, sizeof(merge));
+ *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
+ }
+
+ // If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
+ // remove phantom pipes and repopulate dml pipes
+ if (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ dml_remove_phantom_pipes(dc, context);
+ *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
+ }
+ }
+}
+
+static void dcn20_adjust_adaptive_sync_v_startup(
+ const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end = 0;
+ uint32_t asic_blank_start = 0;
+ uint32_t newVstartup = 0;
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.v_front_porch < 2)
+ patched_crtc_timing.v_front_porch = 2;
+ } else {
+ if (patched_crtc_timing.v_front_porch < 1)
+ patched_crtc_timing.v_front_porch = 1;
+ }
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
+ patched_crtc_timing.v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+
+ *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
+}
+
+static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->stream->link->hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+
+static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+#if defined (CONFIG_DRM_AMD_DC_DP2_0)
+ if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ return true;
+#endif
+ }
+ return false;
+}
+
+static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+{
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+ }
+}
+
+static bool dml_internal_validate(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate)
+{
+ bool out = false;
+ bool repopulate_pipes = false;
+ int split[MAX_PIPES] = { 0 };
+ bool merge[MAX_PIPES] = { false };
+ bool newly_split[MAX_PIPES] = { false };
+ int pipe_cnt, i, pipe_idx, vlevel;
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+
+ ASSERT(pipes);
+ if (!pipes)
+ return false;
+
+ // For each full update, remove all existing phantom pipes first
+ dml_remove_phantom_pipes(dc, context);
+
+ dml_update_soc_for_wm_a(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ // On initial pass through DML, we intend to use MALL for SS on all
+ // (non-PSR) surfaces with none using MALL for P-State
+ // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state
+ //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ // pipe->plane_state->mall_plane_config.use_mall_for_ss = true;
+ }
+ }
+ pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+
+ if (!pipe_cnt) {
+ out = true;
+ goto validate_out;
+ }
+
+ dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+
+ if (!fast_validate) {
+ dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
+ }
+
+ if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ /*
+ * If mode is unsupported or there's still no p-state support then
+ * fall back to favoring voltage.
+ *
+ * We don't actually support prefetch mode 2, so require that we
+ * at least support prefetch mode 1.
+ */
+ context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
+ dm_allow_self_refresh;
+
+ vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+ if (vlevel < context->bw_ctx.dml.soc.num_states) {
+ memset(split, 0, sizeof(split));
+ memset(merge, 0, sizeof(merge));
+ vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
+ }
+ }
+
+ dml_log_mode_support_params(&context->bw_ctx.dml);
+
+ if (vlevel == context->bw_ctx.dml.soc.num_states)
+ goto validate_fail;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+
+ if (!pipe->stream)
+ continue;
+
+ /* We only support full screen mpo with ODM */
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_res.scl_data.recout,
+ &pipe->plane_res.scl_data.recout,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
+ }
+
+ /* merge pipes if necessary */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /*skip pipes that don't need merging*/
+ if (!merge[i])
+ continue;
+
+ /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
+ if (pipe->prev_odm_pipe) {
+ /*split off odm pipe*/
+ pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
+ if (pipe->next_odm_pipe)
+ pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
+
+ pipe->bottom_pipe = NULL;
+ pipe->next_odm_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ pipe->top_pipe = NULL;
+ pipe->prev_odm_pipe = NULL;
+ if (pipe->stream_res.dsc)
+ dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
+ } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ struct pipe_ctx *top_pipe = pipe->top_pipe;
+ struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
+
+ top_pipe->bottom_pipe = bottom_pipe;
+ if (bottom_pipe)
+ bottom_pipe->top_pipe = top_pipe;
+
+ pipe->top_pipe = NULL;
+ pipe->bottom_pipe = NULL;
+ pipe->plane_state = NULL;
+ pipe->stream = NULL;
+ memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
+ memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ repopulate_pipes = true;
+ } else
+ ASSERT(0); /* Should never try to merge master pipe */
+
+ }
+
+ for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *hsplit_pipe = NULL;
+ bool odm;
+ int old_index = -1;
+
+ if (!pipe->stream || newly_split[i])
+ continue;
+
+ pipe_idx++;
+ odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
+
+ if (!pipe->plane_state && !odm)
+ continue;
+
+ if (split[i]) {
+ if (odm) {
+ if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ } else {
+ if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else if (old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ }
+ hsplit_pipe = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(hsplit_pipe);
+ if (!hsplit_pipe)
+ goto validate_fail;
+
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, hsplit_pipe, odm))
+ goto validate_fail;
+
+ newly_split[hsplit_pipe->pipe_idx] = true;
+ repopulate_pipes = true;
+ }
+ if (split[i] == 4) {
+ struct pipe_ctx *pipe_4to1;
+
+ if (odm && old_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+
+ if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
+ && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
+ old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+ else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
+ old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+ old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+ else
+ old_index = -1;
+ pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
+ ASSERT(pipe_4to1);
+ if (!pipe_4to1)
+ goto validate_fail;
+ if (!dml_split_stream_for_mpc_or_odm(
+ dc, &context->res_ctx,
+ hsplit_pipe, pipe_4to1, odm))
+ goto validate_fail;
+ newly_split[pipe_4to1->pipe_idx] = true;
+ }
+ if (odm)
+ dml_build_mapped_resource(dc, context, pipe->stream);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state) {
+ if (!resource_build_scaling_params(pipe))
+ goto validate_fail;
+ }
+ }
+
+ /* Actual dsc count per stream dsc validation*/
+ if (!dml_validate_dsc(dc, context)) {
+ vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
+ goto validate_fail;
+ }
+
+ if (repopulate_pipes)
+ pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
+ *vlevel_out = vlevel;
+ *pipe_cnt_out = pipe_cnt;
+
+ out = true;
+ goto validate_out;
+
+validate_fail:
+ out = false;
+
+validate_out:
+ return out;
+}
+
+static void dml_calculate_dlg_params(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx;
+ int plane_count;
+
+ /* Writeback MCIF_WB arbitration parameters */
+ if (dc->res_pool)
+ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
+
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
+ context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
+ context->bw_ctx.bw.dcn.clk.p_state_change_support =
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
+ != dm_dram_clock_change_unsupported;
+
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks
+ * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
+ DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
+ */
+ plane_count = 0;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /* Commented out as per above error for now.
+ if (plane_count == 0)
+ context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
+ */
+ context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
+ /* TODO : Uncomment the below line and make changes
+ * as per DML nomenclature once it is available.
+ * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support;
+ */
+
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+ pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+ if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
+ context->res_ctx.pipe_ctx[i].unbounded_req = false;
+ } else {
+ context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
+ context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
+ }
+
+ if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
+ pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ pipe_idx++;
+ }
+ /*save a original dppclock copy*/
+ context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
+ context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
+ context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
+ context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
+ - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
+
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
+ &context->res_ctx.pipe_ctx[i].dlg_regs,
+ &context->res_ctx.pipe_ctx[i].ttu_regs,
+ pipes,
+ pipe_cnt,
+ pipe_idx,
+ cstate_en,
+ context->bw_ctx.bw.dcn.clk.p_state_change_support,
+ false, false, true);
+
+ context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
+ &context->res_ctx.pipe_ctx[i].rq_regs,
+ &pipes[pipe_idx].pipe);
+ pipe_idx++;
+ }
+}
+
+static void dml_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ int i, pipe_idx, vlevel_temp = 0;
+
+ double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
+ double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+ dm_dram_clock_change_unsupported;
+
+ /* Set B:
+ * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
+ * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
+ * calculations to cover bootup clocks.
+ * DCFCLK: soc.clock_limits[2] when available
+ * UCLK: soc.clock_limits[2] when available
+ */
+ if (context->bw_ctx.dml.soc.num_states > 2) {
+ vlevel_temp = 2;
+ dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
+ } else
+ dcfclk = 615; //DCFCLK Vmin_lv
+
+ pipes[0].clks_cfg.voltage = vlevel_temp;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8;
+
+ /* Set D:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW when available
+ * UCLK : Min, as reported by PM FW when available
+ * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
+ */
+
+ if (context->bw_ctx.dml.soc.num_states > 2) {
+ vlevel_temp = 0;
+ dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
+ } else
+ dcfclk = 615; //DCFCLK Vmin_lv
+
+ pipes[0].clks_cfg.voltage = vlevel_temp;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
+
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8;
+
+ /* Set C, for Dummy P-State:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW, when available
+ * UCLK : Min, as reported by PM FW, when available
+ * pstate latency as per UCLK state dummy pstate latency
+ */
+ if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+ unsigned int min_dram_speed_mts_margin = 160;
+
+ if ((!pstate_en))
+ min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+ /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
+ for (i = 3; i > 0; i--)
+ if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
+ break;
+
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
+ }
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+ /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4;
+ context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8;
+
+ if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
+ /* The only difference between A and C is p-state latency, if p-state is not supported
+ * with full p-state latency we want to calculate DLG based on dummy p-state latency,
+ * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation.
+ */
+ context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ } else {
+ /* Set A:
+ * All clocks min.
+ * DCFCLK: Min, as reported by PM FW, when available
+ * UCLK: Min, as reported by PM FW, when available
+ */
+ dml_update_soc_for_wm_a(dc, context);
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+ }
+
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
+ pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+
+ pipe_idx++;
+ }
+
+ context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+ dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+ if (!pstate_en)
+ /* Restore full p-state latency */
+ context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+ dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+}
+
+bool dml_validate(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state;
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+
+ if (pipe_cnt == 0)
+ goto validate_out;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
new file mode 100644
index 000000000000..4ec5310a2962
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifdef DML_WRAPPER_TRANSLATION_
+
+static void gfx10array_mode_to_dml_params(
+ enum array_mode_values array_mode,
+ enum legacy_tiling_compat_level compat_level,
+ unsigned int *sw_mode)
+{
+ switch (array_mode) {
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ case DC_ARRAY_LINEAR_GENERAL:
+ *sw_mode = dm_sw_linear;
+ break;
+ case DC_ARRAY_2D_TILED_THIN1:
+// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed
+#if 0
+ if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO)
+ *sw_mode = dm_sw_gfx7_2d_thin_l_vp;
+ else
+ *sw_mode = dm_sw_gfx7_2d_thin_gl;
+#endif
+ break;
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+}
+
+static void swizzle_to_dml_params(
+ enum swizzle_mode_values swizzle,
+ unsigned int *sw_mode)
+{
+ switch (swizzle) {
+ case DC_SW_LINEAR:
+ *sw_mode = dm_sw_linear;
+ break;
+ case DC_SW_4KB_S:
+ *sw_mode = dm_sw_4kb_s;
+ break;
+ case DC_SW_4KB_S_X:
+ *sw_mode = dm_sw_4kb_s_x;
+ break;
+ case DC_SW_4KB_D:
+ *sw_mode = dm_sw_4kb_d;
+ break;
+ case DC_SW_4KB_D_X:
+ *sw_mode = dm_sw_4kb_d_x;
+ break;
+ case DC_SW_64KB_S:
+ *sw_mode = dm_sw_64kb_s;
+ break;
+ case DC_SW_64KB_S_X:
+ *sw_mode = dm_sw_64kb_s_x;
+ break;
+ case DC_SW_64KB_S_T:
+ *sw_mode = dm_sw_64kb_s_t;
+ break;
+ case DC_SW_64KB_D:
+ *sw_mode = dm_sw_64kb_d;
+ break;
+ case DC_SW_64KB_D_X:
+ *sw_mode = dm_sw_64kb_d_x;
+ break;
+ case DC_SW_64KB_D_T:
+ *sw_mode = dm_sw_64kb_d_t;
+ break;
+ case DC_SW_64KB_R_X:
+ *sw_mode = dm_sw_64kb_r_x;
+ break;
+ case DC_SW_VAR_S:
+ *sw_mode = dm_sw_var_s;
+ break;
+ case DC_SW_VAR_S_X:
+ *sw_mode = dm_sw_var_s_x;
+ break;
+ case DC_SW_VAR_D:
+ *sw_mode = dm_sw_var_d;
+ break;
+ case DC_SW_VAR_D_X:
+ *sw_mode = dm_sw_var_d_x;
+ break;
+
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+}
+
+static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest)
+{
+ dest->hblank_start = timing->h_total - timing->h_front_porch;
+ dest->hblank_end = dest->hblank_start
+ - timing->h_addressable
+ - timing->h_border_left
+ - timing->h_border_right;
+ dest->vblank_start = timing->v_total - timing->v_front_porch;
+ dest->vblank_end = dest->vblank_start
+ - timing->v_addressable
+ - timing->v_border_top
+ - timing->v_border_bottom;
+ dest->htotal = timing->h_total;
+ dest->vtotal = timing->v_total;
+ dest->hactive = timing->h_addressable;
+ dest->vactive = timing->v_addressable;
+ dest->interlaced = timing->flags.INTERLACE;
+ dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0;
+ if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ dest->pixel_rate_mhz *= 2;
+}
+
+static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe)
+{
+ int odm_split_count = 0;
+ enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+
+ // Traverse pipe tree to determine odm split count
+ while (next_pipe) {
+ odm_split_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_split_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+
+ // Translate split to DML odm combine factor
+ switch (odm_split_count) {
+ case 1:
+ combine_mode = dm_odm_combine_mode_2to1;
+ break;
+ case 3:
+ combine_mode = dm_odm_combine_mode_4to1;
+ break;
+ default:
+ combine_mode = dm_odm_combine_mode_disabled;
+ }
+
+ return combine_mode;
+}
+
+static int get_dml_output_type(enum signal_type dc_signal)
+{
+ int dml_output_type = -1;
+
+ switch (dc_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ dml_output_type = dm_dp;
+ break;
+ case SIGNAL_TYPE_EDP:
+ dml_output_type = dm_edp;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ dml_output_type = dm_hdmi;
+ break;
+ default:
+ break;
+ }
+
+ return dml_output_type;
+}
+
+static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout)
+{
+ int output_bpc = 0;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ output_bpc = 6;
+ break;
+ case COLOR_DEPTH_888:
+ output_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ output_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ output_bpc = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ output_bpc = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ output_bpc = 16;
+ break;
+ case COLOR_DEPTH_999:
+ output_bpc = 9;
+ break;
+ case COLOR_DEPTH_111111:
+ output_bpc = 11;
+ break;
+ default:
+ output_bpc = 8;
+ break;
+ }
+
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ dout->output_format = dm_444;
+ dout->output_bpp = output_bpc * 3;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ dout->output_format = dm_420;
+ dout->output_bpp = (output_bpc * 3.0) / 2;
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple)
+ dout->output_format = dm_n422;
+ else
+ dout->output_format = dm_s422;
+ dout->output_bpp = output_bpc * 2;
+ break;
+ default:
+ dout->output_format = dm_444;
+ dout->output_bpp = output_bpc * 3;
+ }
+}
+
+static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format)
+{
+ enum source_format_class dml_format = dm_444_32;
+
+ switch (dc_format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ dml_format = dm_420_8;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ dml_format = dm_420_10;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ dml_format = dm_444_64;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ dml_format = dm_444_16;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ dml_format = dm_444_8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+ dml_format = dm_rgbe_alpha;
+ break;
+ default:
+ dml_format = dm_444_32;
+ break;
+ }
+
+ return dml_format;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
index 3ee858f311d1..122ba291a7ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
@@ -61,16 +61,6 @@ static double dsc_roundf(double num)
return (int)(num);
}
-static double dsc_ceil(double num)
-{
- double retval = (int)num;
-
- if (retval != num && num > 0)
- retval = num + 1;
-
- return (int)retval;
-}
-
static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
enum max_min max_min, float bpp)
{
@@ -268,24 +258,3 @@ void _do_calc_rc_params(struct rc_params *rc,
rc->rc_buf_thresh[13] = 8064;
}
-u32 _do_bytes_per_pixel_calc(int slice_width,
- u16 drm_bpp,
- bool is_navite_422_or_420)
-{
- float bpp;
- u32 bytes_per_pixel;
- double d_bytes_per_pixel;
-
- dc_assert_fp_enabled();
-
- bpp = ((float)drm_bpp / 16.0);
- d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
- // TODO: Make sure the formula for calculating this is precise (ceiling
- // vs. floor, and at what point they should be applied)
- if (is_navite_422_or_420)
- d_bytes_per_pixel /= 2;
-
- bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- return bytes_per_pixel;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
index b93b95409fbe..cad244c023cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
@@ -78,10 +78,6 @@ struct qp_entry {
typedef struct qp_entry qp_table[];
-u32 _do_bytes_per_pixel_calc(int slice_width,
- u16 drm_bpp,
- bool is_navite_422_or_420);
-
void _do_calc_rc_params(struct rc_params *rc,
enum colour_mode cm,
enum bits_per_comp bpc,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0321b4446e05..9c74564cbd8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -455,6 +455,7 @@ static bool intersect_dsc_caps(
if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420)
dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8);
+ dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel;
dsc_common_caps->is_dp = dsc_sink_caps->is_dp;
return true;
}
@@ -513,6 +514,13 @@ static bool decide_dsc_bandwidth_range(
range->min_target_bpp_x16 = preferred_bpp_x16;
}
}
+ /* TODO - make this value generic to all signal types */
+ else if (dsc_caps->edp_sink_max_bits_per_pixel) {
+ /* apply max bpp limitation from edp sink */
+ range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel,
+ max_bpp_x16);
+ range->min_target_bpp_x16 = min_bpp_x16;
+ }
else {
range->max_target_bpp_x16 = max_bpp_x16;
range->min_target_bpp_x16 = min_bpp_x16;
@@ -574,7 +582,7 @@ static bool decide_dsc_target_bpp_x16(
return *target_bpp_x16 != 0;
}
-#define MIN_AVAILABLE_SLICES_SIZE 4
+#define MIN_AVAILABLE_SLICES_SIZE 6
static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices)
{
@@ -860,6 +868,10 @@ static bool setup_dsc_config(
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
is_dsc_possible = (min_slices_h <= max_slices_h);
+
+ if (min_slices_h == 0 && max_slices_h == 0)
+ is_dsc_possible = false;
+
if (!is_dsc_possible)
goto done;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index b19d3aeb5962..e97cf09be9d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -60,31 +60,3 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
pps->dsc_version_minor);
DC_FP_END();
}
-
-/**
- * calc_dsc_bytes_per_pixel - calculate bytes per pixel
- * @pps: DRM struct with all required DSC values
- *
- * Based on the information inside drm_dsc_config, this function calculates the
- * total of bytes per pixel.
- *
- * @note This calculation requires float point operation, most of it executes
- * under kernel_fpu_{begin,end}.
- *
- * Return:
- * Return the number of bytes per pixel
- */
-u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps)
-
-{
- u32 ret;
- u16 drm_bpp = pps->bits_per_pixel;
- int slice_width = pps->slice_width;
- bool is_navite_422_or_420 = pps->native_422 || pps->native_420;
-
- DC_FP_START();
- ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp,
- is_navite_422_or_420);
- DC_FP_END();
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index c2340e001b57..80921c1c0d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -30,7 +30,6 @@
#include "dml/dsc/rc_calc_fpu.h"
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
-u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 1e19dd674e5a..7e306aa3e2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -100,8 +100,7 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
int ret;
struct rc_params rc;
struct drm_dsc_config dsc_cfg;
-
- dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps);
+ unsigned long long tmp;
calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
@@ -113,6 +112,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
ret = drm_dsc_compute_rc_parameters(&dsc_cfg);
+ tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1);
+ do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP
+ dsc_params->bytes_per_pixel = (uint32_t)tmp;
copy_pps_fields(&dsc_params->pps, &dsc_cfg);
dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 806f3041db14..337c0161e72d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -619,7 +619,7 @@ struct dcn_ip_params {
};
extern const struct dcn_ip_params dcn10_ip_defaults;
-bool dcn_validate_bandwidth(
+bool dcn10_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
new file mode 100644
index 000000000000..5dcfbd8e2697
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DML_WRAPPER_H_
+#define DML_WRAPPER_H_
+
+#include "dc.h"
+#include "dml/display_mode_vba.h"
+
+bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index f94135c6e3c2..346f0ba73e86 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -61,6 +61,8 @@ struct dcn_dsc_state {
uint32_t dsc_pic_height;
uint32_t dsc_slice_bpg_offset;
uint32_t dsc_chunk_size;
+ uint32_t dsc_fw_en;
+ uint32_t dsc_opp_source;
};
@@ -88,6 +90,7 @@ struct dsc_enc_caps {
int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */
int32_t max_slice_width;
uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */
+ uint32_t edp_sink_max_bits_per_pixel;
bool is_dp;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index c88e113b94d1..073f8b667eff 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -164,6 +164,10 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
+ void (*reset_fifo)(
+ struct stream_encoder *enc
+ );
+
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 7390baf916b5..c29320b3855d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -290,6 +290,8 @@ struct timing_generator_funcs {
enum optc_dsc_mode dsc_mode,
uint32_t dsc_bytes_per_pixel,
uint32_t dsc_slice_width);
+ void (*get_dsc_status)(struct timing_generator *optc,
+ uint32_t *dsc_mode);
void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);
void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index 10dcf6a5e9b1..a4e43b4826e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -36,7 +36,7 @@
* Initialise link encoder resource tracking.
*/
void link_enc_cfg_init(
- struct dc *dc,
+ const struct dc *dc,
struct dc_state *state);
/*
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index cd204eef073b..83855b8a32e9 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -360,6 +360,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub);
+ void (*clear_inbox0_ack_register)(struct dmub_srv *dmub);
+ uint32_t (*read_inbox0_ack_register)(struct dmub_srv *dmub);
void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
uint32_t (*get_current_time)(struct dmub_srv *dmub);
@@ -409,6 +411,7 @@ struct dmub_srv {
struct dmub_srv_base_funcs funcs;
struct dmub_srv_hw_funcs hw_funcs;
struct dmub_rb inbox1_rb;
+ uint32_t inbox1_last_wptr;
/**
* outbox1_rb is accessed without locks (dal & dc)
* and to be used only in dmub_srv_stat_get_notification()
@@ -735,6 +738,45 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_
bool dmub_srv_should_detect(struct dmub_srv *dmub);
+/**
+ * dmub_srv_send_inbox0_cmd() - Send command to DMUB using INBOX0
+ * @dmub: the dmub service
+ * @data: the data to be sent in the INBOX0 command
+ *
+ * Send command by writing directly to INBOX0 WPTR
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ */
+enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
+
+/**
+ * dmub_srv_wait_for_inbox0_ack() - wait for DMUB to ACK INBOX0 command
+ * @dmub: the dmub service
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Wait for DMUB to ACK the INBOX0 message
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ * DMUB_STATUS_TIMEOUT - wait for ack timed out
+ */
+enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us);
+
+/**
+ * dmub_srv_wait_for_inbox0_ack() - clear ACK register for INBOX0
+ * @dmub: the dmub service
+ *
+ * Clear ACK register for INBOX0
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - hw_init false or hw function does not exist
+ */
+enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c29a67ccef17..7eec65090862 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -46,10 +46,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e
+#define DMUB_FW_VERSION_GIT_HASH 0x465e619a
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 91
+#define DMUB_FW_VERSION_REVISION 94
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -173,13 +173,6 @@ extern "C" {
#endif
/**
- * Number of nanoseconds per DMUB tick.
- * DMCUB_TIMER_CURRENT increments in DMUB ticks, which are 10ns by default.
- * If DMCUB_TIMER_WINDOW is non-zero this will no longer be true.
- */
-#define NS_PER_DMUB_TICK 10
-
-/**
* union dmub_addr - DMUB physical/virtual 64-bit address.
*/
union dmub_addr {
@@ -208,10 +201,9 @@ union dmub_psr_debug_flags {
uint32_t use_hw_lock_mgr : 1;
/**
- * Unused.
- * TODO: Remove.
+ * Use TPS3 signal when restore main link.
*/
- uint32_t log_line_nums : 1;
+ uint32_t force_wakeup_by_tps3 : 1;
} bitfields;
/**
@@ -1550,10 +1542,14 @@ struct dmub_cmd_psr_copy_settings_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /*
+ * DSC enable status in driver
+ */
+ uint8_t dsc_enable_status;
/**
- * Explicit padding to 4 byte boundary.
+ * Explicit padding to 3 byte boundary.
*/
- uint8_t pad3[4];
+ uint8_t pad3[3];
};
/**
@@ -2722,7 +2718,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
const uint64_t *src = (const uint64_t *)cmd;
uint8_t i;
@@ -2840,7 +2836,7 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,
static inline bool dmub_rb_out_front(struct dmub_rb *rb,
union dmub_rb_out_cmd *cmd)
{
- const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+ const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr);
uint64_t *dst = (uint64_t *)cmd;
uint8_t i;
@@ -2888,7 +2884,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t wptr = rb->wrpt;
while (rptr != wptr) {
- uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr);
//uint64_t volatile *p = (uint64_t volatile *)data;
uint64_t temp;
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 56d400ffa7ac..f673a1c1777a 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -100,24 +100,9 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb)
}
static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
+dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset)
{
const union dmub_fw_meta *meta;
- const uint8_t *blob = NULL;
- uint32_t blob_size = 0;
- uint32_t meta_offset = 0;
-
- if (params->fw_bss_data && params->bss_data_size) {
- /* Legacy metadata region. */
- blob = params->fw_bss_data;
- blob_size = params->bss_data_size;
- meta_offset = DMUB_FW_META_OFFSET;
- } else if (params->fw_inst_const && params->inst_const_size) {
- /* Combined metadata region. */
- blob = params->fw_inst_const;
- blob_size = params->inst_const_size;
- meta_offset = 0;
- }
if (!blob || !blob_size)
return NULL;
@@ -134,6 +119,32 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
return &meta->info;
}
+static const struct dmub_fw_meta_info *
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
+{
+ const struct dmub_fw_meta_info *info = NULL;
+
+ if (params->fw_bss_data && params->bss_data_size) {
+ /* Legacy metadata region. */
+ info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data,
+ params->bss_data_size,
+ DMUB_FW_META_OFFSET);
+ } else if (params->fw_inst_const && params->inst_const_size) {
+ /* Combined metadata region - can be aligned to 16-bytes. */
+ uint32_t i;
+
+ for (i = 0; i < 16; ++i) {
+ info = dmub_get_fw_meta_info_from_blob(
+ params->fw_inst_const, params->inst_const_size, i);
+
+ if (info)
+ break;
+ }
+ }
+
+ return info;
+}
+
static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
{
struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs;
@@ -598,6 +609,8 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
{
+ struct dmub_rb flush_rb;
+
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
@@ -606,9 +619,14 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)
* been flushed to framebuffer memory. Otherwise DMCUB might
* read back stale, fully invalid or partially invalid data.
*/
- dmub_rb_flush_pending(&dmub->inbox1_rb);
+ flush_rb = dmub->inbox1_rb;
+ flush_rb.rptr = dmub->inbox1_last_wptr;
+ dmub_rb_flush_pending(&flush_rb);
+
+ dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
+
+ dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
- dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);
return DMUB_STATUS_OK;
}
@@ -831,3 +849,38 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub)
return dmub->hw_funcs.should_detect(dmub);
}
+
+enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub)
+{
+ if (!dmub->hw_init || dmub->hw_funcs.clear_inbox0_ack_register)
+ return DMUB_STATUS_INVALID;
+
+ dmub->hw_funcs.clear_inbox0_ack_register(dmub);
+ return DMUB_STATUS_OK;
+}
+
+enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us)
+{
+ uint32_t i = 0;
+ uint32_t ack = 0;
+
+ if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register)
+ return DMUB_STATUS_INVALID;
+
+ for (i = 0; i <= timeout_us; i++) {
+ ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
+ if (ack)
+ return DMUB_STATUS_OK;
+ }
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub,
+ union dmub_inbox0_data_register data)
+{
+ if (!dmub->hw_init || dmub->hw_funcs.send_inbox0_cmd)
+ return DMUB_STATUS_INVALID;
+
+ dmub->hw_funcs.send_inbox0_cmd(dmub, data);
+ return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index 4de59b66bb1a..a2b80514d83e 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -35,6 +35,7 @@
#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
#define DP_BRANCH_DEVICE_ID_006037 0x006037
+#define DP_DEVICE_ID_38EC11 0x38EC11
enum ddc_result {
DDC_RESULT_UNKNOWN = 0,
DDC_RESULT_SUCESSFULL,
@@ -117,4 +118,7 @@ struct av_sync_data {
uint8_t aud_del_ins3;/* DPCD 0002Dh */
};
+static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0};
+static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0};
+
#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 370fad883e33..f093b49c5e6e 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -72,9 +72,7 @@
#define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
#define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#if defined(CONFIG_DRM_AMD_DC_DCN)
#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__)
-#endif
struct dal_logger;
@@ -126,9 +124,7 @@ enum dc_log_type {
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
-#if defined(CONFIG_DRM_AMD_DC_DCN)
LOG_DP2,
-#endif
LOG_SECTION_TOTAL_COUNT
};