diff options
42 files changed, 300 insertions, 165 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0047da06041f..414548064648 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -179,6 +179,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) case CHIP_VEGA20: case CHIP_ARCTURUS: case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: /* enable runpm if runpm=1 */ if (amdgpu_runtime_pm > 0) adev->runpm = true; @@ -678,8 +679,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * in the bitfields */ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) se_num = 0xffffffff; + else if (se_num >= AMDGPU_GFX_MAX_SE) + return -EINVAL; if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) sh_num = 0xffffffff; + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) + return -EINVAL; if (info->read_mmr_reg.count > 128) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 7fe564275457..d8c6520ff74a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -522,8 +522,7 @@ static int psp_asd_load(struct psp_context *psp) * add workaround to bypass it for sriov now. * TODO: add version check to make it common */ - if (amdgpu_sriov_vf(psp->adev) || - (psp->adev->asic_type == CHIP_NAVY_FLOUNDER)) + if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw) return 0; cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 65997ffaed45..037a187aa42f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7263,10 +7263,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); - - /* only for Vega10 & Raven1 */ - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK; + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK | + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK); if (def != data) WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index da8024c2826e..ca11253e787c 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -364,6 +364,7 @@ nv_asic_reset_method(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: return AMD_RESET_METHOD_MODE1; default: if (smu_baco_is_support(smu)) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index df9338257ae0..b51c527a3f0d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2834,12 +2834,18 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) &dm_atomic_state_funcs); r = amdgpu_display_modeset_create_props(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } r = amdgpu_dm_audio_init(adev); - if (r) + if (r) { + dc_release_state(state->context); + kfree(state); return r; + } return 0; } @@ -2856,6 +2862,8 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm) #if defined(CONFIG_ACPI) struct amdgpu_dm_backlight_caps caps; + memset(&caps, 0, sizeof(caps)); + if (dm->backlight_caps.caps_valid) return; @@ -2894,51 +2902,50 @@ static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness) return rc ? 0 : 1; } -static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps, - const uint32_t user_brightness) +static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, + unsigned *min, unsigned *max) { - u32 min, max, conversion_pace; - u32 brightness = user_brightness; - if (!caps) - goto out; + return 0; - if (!caps->aux_support) { - max = caps->max_input_signal; - min = caps->min_input_signal; - /* - * The brightness input is in the range 0-255 - * It needs to be rescaled to be between the - * requested min and max input signal - * It also needs to be scaled up by 0x101 to - * match the DC interface which has a range of - * 0 to 0xffff - */ - conversion_pace = 0x101; - brightness = - user_brightness - * conversion_pace - * (max - min) - / AMDGPU_MAX_BL_LEVEL - + min * conversion_pace; + if (caps->aux_support) { + // Firmware limits are in nits, DC API wants millinits. + *max = 1000 * caps->aux_max_input_signal; + *min = 1000 * caps->aux_min_input_signal; } else { - /* TODO - * We are doing a linear interpolation here, which is OK but - * does not provide the optimal result. We probably want - * something close to the Perceptual Quantizer (PQ) curve. - */ - max = caps->aux_max_input_signal; - min = caps->aux_min_input_signal; - - brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min - + user_brightness * max; - // Multiple the value by 1000 since we use millinits - brightness *= 1000; - brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL); + // Firmware limits are 8-bit, PWM control is 16-bit. + *max = 0x101 * caps->max_input_signal; + *min = 0x101 * caps->min_input_signal; } + return 1; +} -out: - return brightness; +static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, + uint32_t brightness) +{ + unsigned min, max; + + if (!get_brightness_range(caps, &min, &max)) + return brightness; + + // Rescale 0..255 to min..max + return min + DIV_ROUND_CLOSEST((max - min) * brightness, + AMDGPU_MAX_BL_LEVEL); +} + +static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, + uint32_t brightness) +{ + unsigned min, max; + + if (!get_brightness_range(caps, &min, &max)) + return brightness; + + if (brightness < min) + return 0; + // Rescale min..max to 0..255 + return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), + max - min); } static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) @@ -2954,7 +2961,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) link = (struct dc_link *)dm->backlight_link; - brightness = convert_brightness(&caps, bd->props.brightness); + brightness = convert_brightness_from_user(&caps, bd->props.brightness); // Change brightness based on AUX property if (caps.aux_support) return set_backlight_via_aux(link, brightness); @@ -2971,7 +2978,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) if (ret == DC_ERROR_UNEXPECTED) return bd->props.brightness; - return ret; + return convert_brightness_to_user(&dm->backlight_caps, ret); } static const struct backlight_ops amdgpu_dm_backlight_ops = { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index e85b58f0f416..336aaa09be46 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -67,7 +67,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); - if (payload.write) + if (payload.write && result >= 0) result = msg->size; if (result < 0) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index c664404a75d4..543afa34d87a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -94,6 +94,15 @@ int rn_get_active_display_cnt_wa( return display_count; } +void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; +} + void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -516,6 +525,7 @@ static struct clk_mgr_funcs dcn21_funcs = { .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, .are_clock_states_equal = rn_are_clock_states_equal, + .set_low_power_state = rn_set_low_power_state, .notify_wm_ranges = rn_notify_wm_ranges, .notify_link_rate_change = rn_notify_link_rate_change, }; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 117d8aaf2a9b..437d1a7a16fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -763,6 +763,7 @@ static bool detect_dp(struct dc_link *link, sink_caps->signal = dp_passive_dongle_detection(link->ddc, sink_caps, audio_support); + link->dpcd_caps.dongle_type = sink_caps->dongle_type; } return true; @@ -3286,10 +3287,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) core_link_set_avmute(pipe_ctx, true); } - dc->hwss.blank_stream(pipe_ctx); #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, true); #endif + dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 9bc03f26efda..b2be6ad5101d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -4409,9 +4409,9 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, + if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, - sizeof(union dpcd_source_backlight_get))) + sizeof(union dpcd_source_backlight_get)) != DC_OK) return false; *backlight_millinits_avg = @@ -4450,9 +4450,9 @@ bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_milli link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; - if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *) backlight_millinits, - sizeof(uint32_t))) + sizeof(uint32_t)) != DC_OK) return false; return true; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 633442bc7ef2..d9888f316da6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -233,7 +233,7 @@ struct dc_stream_state { union stream_update_flags update_flags; }; -#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF +#define ABM_LEVEL_IMMEDIATE_DISABLE 255 struct dc_stream_update { struct dc_stream_state *stream; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a643927e272b..fa643ec5a876 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1450,33 +1450,42 @@ void dcn10_init_hw(struct dc *dc) void dcn10_power_down_on_boot(struct dc *dc) { int i = 0; + struct dc_link *edp_link; - if (dc->config.power_down_display_on_boot) { - struct dc_link *edp_link = get_edp_link(dc); - - if (edp_link && - edp_link->link_enc->funcs->is_dig_enabled && - edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && - dc->hwseq->funcs.edp_backlight_control && - dc->hwss.power_down && - dc->hwss.edp_power_control) { - dc->hwseq->funcs.edp_backlight_control(edp_link, false); - dc->hwss.power_down(dc); - dc->hwss.edp_power_control(edp_link, false); - } else { - for (i = 0; i < dc->link_count; i++) { - struct dc_link *link = dc->links[i]; - - if (link->link_enc->funcs->is_dig_enabled && - link->link_enc->funcs->is_dig_enabled(link->link_enc) && - dc->hwss.power_down) { - dc->hwss.power_down(dc); - break; - } + if (!dc->config.power_down_display_on_boot) + return; + + edp_link = get_edp_link(dc); + if (edp_link && + edp_link->link_enc->funcs->is_dig_enabled && + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && + dc->hwseq->funcs.edp_backlight_control && + dc->hwss.power_down && + dc->hwss.edp_power_control) { + dc->hwseq->funcs.edp_backlight_control(edp_link, false); + dc->hwss.power_down(dc); + dc->hwss.edp_power_control(edp_link, false); + } else { + for (i = 0; i < dc->link_count; i++) { + struct dc_link *link = dc->links[i]; + if (link->link_enc->funcs->is_dig_enabled && + link->link_enc->funcs->is_dig_enabled(link->link_enc) && + dc->hwss.power_down) { + dc->hwss.power_down(dc); + break; } + } } + + /* + * Call update_clocks with empty context + * to send DISPLAY_OFF + * Otherwise DISPLAY_OFF may not be asserted + */ + if (dc->clk_mgr->funcs->set_low_power_state) + dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); } void dcn10_reset_hw_ctx_wrap( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 17d5cb422025..8939541ad7af 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1213,6 +1213,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont bool video_large = false; bool desktop_large = false; bool dcc_disabled = false; + bool mpo_enabled = false; for (i = 0; i < context->stream_count; i++) { if (context->stream_status[i].plane_count == 0) @@ -1221,6 +1222,9 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont if (context->stream_status[i].plane_count > 2) return DC_FAIL_UNSUPPORTED_1; + if (context->stream_status[i].plane_count > 1) + mpo_enabled = true; + for (j = 0; j < context->stream_status[i].plane_count; j++) { struct dc_plane_state *plane = context->stream_status[i].plane_states[j]; @@ -1244,6 +1248,10 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont } } + /* Disable MPO in multi-display configurations. */ + if (context->stream_count > 1 && mpo_enabled) + return DC_FAIL_UNSUPPORTED_1; + /* * Workaround: On DCN10 there is UMC issue that causes underflow when * playing 4k video on 4k desktop with video downscaled and single channel diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 5994d2a33c40..947d6106f341 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -230,6 +230,8 @@ struct clk_mgr_funcs { int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); + void (*set_low_power_state)(struct clk_mgr *clk_mgr); + void (*init_clocks)(struct clk_mgr *clk_mgr); void (*enable_pme_wa) (struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index c9cfe90a2947..9ee8cf8267c8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -204,8 +204,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->need_min_deep_sleep_dcefclk && - smu10_data->deep_sleep_dcefclk != clock) { + if (clock && smu10_data->deep_sleep_dcefclk != clock) { smu10_data->deep_sleep_dcefclk = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk, @@ -219,8 +218,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->dcf_actual_hard_min_freq && - smu10_data->dcf_actual_hard_min_freq != clock) { + if (clock && smu10_data->dcf_actual_hard_min_freq != clock) { smu10_data->dcf_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinDcefclkByFreq, @@ -234,8 +232,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->f_actual_hard_min_freq && - smu10_data->f_actual_hard_min_freq != clock) { + if (clock && smu10_data->f_actual_hard_min_freq != clock) { smu10_data->f_actual_hard_min_freq = clock; smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 468bdd6f6697..d572ba4ec9b1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -363,17 +363,19 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > tdp_table->usSoftwareShutdownTemp) + high = tdp_table->usSoftwareShutdownTemp; if (low > high) return -EINVAL; @@ -382,8 +384,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index c15b9756025d..7ace439dcde7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c index 7add2f60f49c..364162ddaa9c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c @@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; struct amdgpu_device *adev = hwmgr->adev; - int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP * - PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP; + int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; - if (low < range->min) - low = range->min; - if (high > range->max) - high = range->max; + /* compare them in unit celsius degree */ + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + if (high > pptable_information->us_software_shutdown_temp) + high = pptable_information->us_software_shutdown_temp; if (low > high) return -EINVAL; @@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high); + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c index 3865dbed5f93..638760839c15 100644 --- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c @@ -95,6 +95,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), @@ -775,7 +776,7 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL); if (ret) return ret; - if (adev->asic_type == CHIP_SIENNA_CICHLID) { + if (adev->vcn.num_vcn_inst > 1) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0x10000, NULL); if (ret) @@ -787,7 +788,7 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); if (ret) return ret; - if (adev->asic_type == CHIP_SIENNA_CICHLID) { + if (adev->vcn.num_vcn_inst > 1) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0x10000, NULL); if (ret) @@ -1732,6 +1733,11 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, return ret; } +static int sienna_cichlid_run_btc(struct smu_context *smu) +{ + return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); +} + static bool sienna_cichlid_is_baco_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -2719,6 +2725,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .mode1_reset = smu_v11_0_mode1_reset, .get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .run_btc = sienna_cichlid_run_btc, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, }; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index f68c69a45752..9e1ad493e689 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -34,6 +34,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> +#include <drm/drm_drv.h> #include <drm/drm_plane_helper.h> #include <drm/drm_print.h> #include <drm/drm_self_refresh_helper.h> @@ -3106,7 +3107,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) if (ret) DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); } EXPORT_SYMBOL(drm_atomic_helper_shutdown); @@ -3246,7 +3247,7 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev) } unlock: - DRM_MODESET_LOCK_ALL_END(ctx, err); + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); if (err) return ERR_PTR(err); @@ -3327,7 +3328,7 @@ int drm_atomic_helper_resume(struct drm_device *dev, err = drm_atomic_helper_commit_duplicated_state(state, &ctx); - DRM_MODESET_LOCK_ALL_END(ctx, err); + DRM_MODESET_LOCK_ALL_END(dev, ctx, err); drm_atomic_state_put(state); return err; diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index c93123ff7c21..138ff34b31db 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -294,7 +294,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, crtc->gamma_size, &ctx); out: - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 283bcc4362ca..aecdd7ea26dc 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -588,7 +588,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id)) return -EACCES; - mutex_lock(&crtc->dev->mode_config.mutex); DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); @@ -756,8 +755,7 @@ out: fb = NULL; mode = NULL; - DRM_MODESET_LOCK_ALL_END(ctx, ret); - mutex_unlock(&crtc->dev->mode_config.mutex); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index b23cb2fec3f3..67dd72ea200e 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -5040,8 +5040,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm crtc = conn_state->crtc; - if (WARN_ON(!crtc)) - return -EINVAL; + if (!crtc) + continue; if (!drm_dp_mst_dsc_aux_for_port(pos->port)) continue; diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index 901b078abf40..db05f386a709 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -428,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, out_unref: drm_mode_object_put(obj); out: - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } @@ -470,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj, break; } drm_property_change_valid_put(prop, ref); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index b7b90b3a2e38..affe1cfed009 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -792,7 +792,7 @@ static int setplane_internal(struct drm_plane *plane, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, &ctx); - DRM_MODESET_LOCK_ALL_END(ctx, ret); + DRM_MODESET_LOCK_ALL_END(plane->dev, ctx, ret); return ret; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index d5a4cd85a0f6..c6404b8d067f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -337,9 +337,16 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV); - gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); gpu->identity.customer_id = gpu_read(gpu, VIVS_HI_CHIP_CUSTOMER_ID); - gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); + + /* + * Reading these two registers on GC600 rev 0x19 result in a + * unhandled fault: external abort on non-linefetch + */ + if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) { + gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); + gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); + } /* * !!!! HACK ALERT !!!! diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 4e3e95dce6d8..cd46c882269c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -89,12 +89,15 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) u32 dma_addr; int change; + /* block scheduler */ + drm_sched_stop(&gpu->sched, sched_job); + /* * If the GPU managed to complete this jobs fence, the timout is * spurious. Bail out. */ if (dma_fence_is_signaled(submit->out_fence)) - return; + goto out_no_timeout; /* * If the GPU is still making forward progress on the front-end (which @@ -105,12 +108,9 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) change = dma_addr - gpu->hangcheck_dma_addr; if (change < 0 || change > 16) { gpu->hangcheck_dma_addr = dma_addr; - return; + goto out_no_timeout; } - /* block scheduler */ - drm_sched_stop(&gpu->sched, sched_job); - if(sched_job) drm_sched_increase_karma(sched_job); @@ -120,6 +120,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job) drm_sched_resubmit_jobs(&gpu->sched); +out_no_timeout: /* restart scheduler after GPU is usable again */ drm_sched_start(&gpu->sched, true); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 56a2b47e1af7..5147f5929be7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -92,7 +92,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, offset = fbi->var.xoffset * fb->format->cpp[0]; offset += fbi->var.yoffset * fb->pitches[0]; - fbi->screen_base = exynos_gem->kvaddr + offset; + fbi->screen_buffer = exynos_gem->kvaddr + offset; fbi->screen_size = size; fbi->fix.smem_len = size; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 7445748288da..74e926abeff0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -40,7 +40,7 @@ struct exynos_drm_gem { unsigned int flags; unsigned long size; void *cookie; - void __iomem *kvaddr; + void *kvaddr; dma_addr_t dma_addr; unsigned long dma_attrs; struct sg_table *sgt; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index b67b38c8fadf..46a29e383bfd 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -133,7 +133,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (!gmu->legacy) { a6xx_hfi_set_freq(gmu, perf_index); - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); + dev_pm_opp_set_bw(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); return; } @@ -157,11 +157,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) if (ret) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); - /* - * Eventually we will want to scale the path vote with the frequency but - * for now leave it at max so that the performance is nominal. - */ - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); + dev_pm_opp_set_bw(&gpu->pdev->dev, opp); pm_runtime_put(gmu->dev); } @@ -204,6 +200,16 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) { int ret; u32 val; + u32 mask, reset_val; + + val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); + if (val <= 0x20010004) { + mask = 0xffffffff; + reset_val = 0xbabeface; + } else { + mask = 0x1ff; + reset_val = 0x100; + } gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1); @@ -215,7 +221,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu) gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0); ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, - val == 0xbabeface, 100, 10000); + (val & mask) == reset_val, 100, 10000); if (ret) DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n"); @@ -845,10 +851,24 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) if (IS_ERR_OR_NULL(gpu_opp)) return; + gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ a6xx_gmu_set_freq(gpu, gpu_opp); dev_pm_opp_put(gpu_opp); } +static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) +{ + struct dev_pm_opp *gpu_opp; + unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; + + gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true); + if (IS_ERR_OR_NULL(gpu_opp)) + return; + + dev_pm_opp_set_bw(&gpu->pdev->dev, gpu_opp); + dev_pm_opp_put(gpu_opp); +} + int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; @@ -882,7 +902,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) } /* Set the bus quota to a reasonable value for boot */ - icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); + a6xx_gmu_set_initial_bw(gpu, gmu); /* Enable the GMU interrupt */ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0); @@ -1051,7 +1071,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) a6xx_gmu_shutdown(gmu); /* Remove the bus vote */ - icc_set_bw(gpu->icc_path, 0, 0); + dev_pm_opp_set_bw(&gpu->pdev->dev, NULL); /* * Make sure the GX domain is off before turning off the GMU (CX) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 959656ad6987..b12f5b4a1bea 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -938,7 +938,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu) msm_gem_kernel_put(dumper.bo, gpu->aspace, true); } - a6xx_get_debugbus(gpu, a6xx_state); + if (snapshot_debugbus) + a6xx_get_debugbus(gpu, a6xx_state); return &a6xx_state->base; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index 846fd5b54c23..2fb58b7098e4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -372,7 +372,7 @@ static const struct a6xx_indexed_registers { u32 data; u32 count; } a6xx_indexed_reglist[] = { - { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR, + { "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR, REG_A6XX_CP_SQE_STAT_DATA, 0x33 }, { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR, REG_A6XX_CP_DRAW_STATE_DATA, 0x100 }, diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 4e84f3c76f4f..9eeb46bf2a5d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -14,6 +14,10 @@ bool hang_debug = false; MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!)"); module_param_named(hang_debug, hang_debug, bool, 0600); +bool snapshot_debugbus = false; +MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)"); +module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600); + static const struct adreno_info gpulist[] = { { .rev = ADRENO_REV(2, 0, 0, 0), diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index e23641a5ec84..d2dbb6968cba 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -396,7 +396,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->next = ring->start; /* reset completed fence seqno: */ - ring->memptrs->fence = ring->seqno; + ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 99bb468f5f24..e55abae365b5 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -21,6 +21,8 @@ #define REG_SKIP ~0 #define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP +extern bool snapshot_debugbus; + /** * adreno_regs: List of registers that are used in across all * 3D devices. Each device type has different offset value for the same diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index f272a8d0f95b..c2729f71e2fa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -827,7 +827,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc, { struct dpu_crtc *dpu_crtc; struct drm_encoder *encoder; - bool request_bandwidth; + bool request_bandwidth = false; if (!crtc) { DPU_ERROR("invalid crtc\n"); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index a97f6d2e5a08..bd6def436c65 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -599,7 +599,10 @@ static int dpu_encoder_virt_atomic_check( dpu_kms = to_dpu_kms(priv->kms); mode = &crtc_state->mode; adj_mode = &crtc_state->adjusted_mode; - global_state = dpu_kms_get_existing_global_state(dpu_kms); + global_state = dpu_kms_get_global_state(crtc_state->state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state); + trace_dpu_enc_atomic_check(DRMID(drm_enc)); /* perform atomic check on the first physical encoder (master) */ @@ -625,12 +628,15 @@ static int dpu_encoder_virt_atomic_check( /* Reserve dynamic resources now. */ if (!ret) { /* - * Avoid reserving resources when mode set is pending. Topology - * info may not be available to complete reservation. + * Release and Allocate resources on every modeset + * Dont allocate when active is false. */ if (drm_atomic_crtc_needs_modeset(crtc_state)) { - ret = dpu_rm_reserve(&dpu_kms->rm, global_state, - drm_enc, crtc_state, topology); + dpu_rm_release(global_state, drm_enc); + + if (!crtc_state->active_changed || crtc_state->active) + ret = dpu_rm_reserve(&dpu_kms->rm, global_state, + drm_enc, crtc_state, topology); } } @@ -1181,7 +1187,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) struct dpu_encoder_virt *dpu_enc = NULL; struct msm_drm_private *priv; struct dpu_kms *dpu_kms; - struct dpu_global_state *global_state; int i = 0; if (!drm_enc) { @@ -1200,7 +1205,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) priv = drm_enc->dev->dev_private; dpu_kms = to_dpu_kms(priv->kms); - global_state = dpu_kms_get_existing_global_state(dpu_kms); trace_dpu_enc_disable(DRMID(drm_enc)); @@ -1230,8 +1234,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc) DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n"); - dpu_rm_release(global_state, drm_enc); - mutex_unlock(&dpu_enc->enc_lock); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 33f6c56f01ed..29e373d2e7b5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -866,9 +866,9 @@ static int dpu_plane_atomic_check(struct drm_plane *plane, crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); - min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale); + min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale); ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale, - pdpu->pipe_sblk->maxupscale << 16, + pdpu->pipe_sblk->maxdwnscale << 16, true, true); if (ret) { DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret); diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 5ccfad794c6a..561bfa48841c 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -27,6 +27,34 @@ int msm_atomic_prepare_fb(struct drm_plane *plane, return msm_framebuffer_prepare(new_state->fb, kms->aspace); } +/* + * Helpers to control vblanks while we flush.. basically just to ensure + * that vblank accounting is switched on, so we get valid seqn/timestamp + * on pageflip events (if requested) + */ + +static void vblank_get(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_get(crtc); + } +} + +static void vblank_put(struct msm_kms *kms, unsigned crtc_mask) +{ + struct drm_crtc *crtc; + + for_each_crtc_mask(kms->dev, crtc, crtc_mask) { + if (!crtc->state->active) + continue; + drm_crtc_vblank_put(crtc); + } +} + static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) { unsigned crtc_mask = BIT(crtc_idx); @@ -44,6 +72,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->enable_commit(kms); + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -58,6 +88,8 @@ static void msm_atomic_async_commit(struct msm_kms *kms, int crtc_idx) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); @@ -221,6 +253,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) */ kms->pending_crtc_mask &= ~crtc_mask; + vblank_get(kms, crtc_mask); + /* * Flush hardware updates: */ @@ -235,6 +269,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state) kms->funcs->wait_flush(kms, crtc_mask); trace_msm_atomic_wait_flush_finish(crtc_mask); + vblank_put(kms, crtc_mask); + mutex_lock(&kms->commit_lock); kms->funcs->complete_commit(kms, crtc_mask); mutex_unlock(&kms->commit_lock); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7d641c7e3514..79333842f70a 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1320,6 +1320,13 @@ static int msm_pdev_remove(struct platform_device *pdev) return 0; } +static void msm_pdev_shutdown(struct platform_device *pdev) +{ + struct drm_device *drm = platform_get_drvdata(pdev); + + drm_atomic_helper_shutdown(drm); +} + static const struct of_device_id dt_match[] = { { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 }, { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 }, @@ -1332,6 +1339,7 @@ MODULE_DEVICE_TABLE(of, dt_match); static struct platform_driver msm_platform_driver = { .probe = msm_pdev_probe, .remove = msm_pdev_remove, + .shutdown = msm_pdev_shutdown, .driver = { .name = "msm", .of_match_table = dt_match, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index e397c44cc011..39ecb5a18431 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -27,7 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->id = id; ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, - MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova); + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo, + &ring->iova); if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 6d40914675da..328a4a74f534 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -451,11 +451,12 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, if (omap_state->manually_updated) return; - spin_lock_irq(&crtc->dev->event_lock); drm_crtc_vblank_on(crtc); + ret = drm_crtc_vblank_get(crtc); WARN_ON(ret != 0); + spin_lock_irq(&crtc->dev->event_lock); omap_crtc_arm_event(crtc); spin_unlock_irq(&crtc->dev->event_lock); } diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index 4fc9a43ac45a..aafd07388eb7 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -164,6 +164,8 @@ int drm_modeset_lock_all_ctx(struct drm_device *dev, * is 0, so no error checking is necessary */ #define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_lock(&dev->mode_config.mutex); \ drm_modeset_acquire_init(&ctx, flags); \ modeset_lock_retry: \ ret = drm_modeset_lock_all_ctx(dev, &ctx); \ @@ -172,6 +174,7 @@ modeset_lock_retry: \ /** * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks + * @dev: drm device * @ctx: local modeset acquire context, will be dereferenced * @ret: local ret/err/etc variable to track error status * @@ -188,7 +191,7 @@ modeset_lock_retry: \ * to that failure. In both of these cases the code between BEGIN/END will not * be run, so the failure will reflect the inability to grab the locks. */ -#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \ +#define DRM_MODESET_LOCK_ALL_END(dev, ctx, ret) \ modeset_lock_fail: \ if (ret == -EDEADLK) { \ ret = drm_modeset_backoff(&ctx); \ @@ -196,6 +199,8 @@ modeset_lock_fail: \ goto modeset_lock_retry; \ } \ drm_modeset_drop_locks(&ctx); \ - drm_modeset_acquire_fini(&ctx); + drm_modeset_acquire_fini(&ctx); \ + if (!drm_drv_uses_atomic_modeset(dev)) \ + mutex_unlock(&dev->mode_config.mutex); #endif /* DRM_MODESET_LOCK_H_ */ |