diff options
Diffstat (limited to 'drivers/gpu/drm')
286 files changed, 16321 insertions, 9992 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ead851413c0a..16fcb56c232b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_bo_base *bo_base, *tmp; int r = 0; + vm->bulk_moveable &= list_empty(&vm->evicted); + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -947,10 +949,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, if (r) return r; - r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); - if (r) - goto error_free_pt; - if (vm->use_cpu_for_update) { r = amdgpu_bo_kmap(pt, NULL); if (r) @@ -963,6 +961,10 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(cursor.parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); + + r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats); + if (r) + goto error_free_pt; } return 0; @@ -3033,13 +3035,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (r) goto error_unreserve; + amdgpu_vm_bo_base_init(&vm->root.base, vm, root); + r = amdgpu_vm_clear_bo(adev, vm, root, adev->vm_manager.root_level, vm->pte_support_ats); if (r) goto error_unreserve; - amdgpu_vm_bo_base_init(&vm->root.base, vm, root); amdgpu_bo_unreserve(vm->root.base.bo); if (pasid) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 5533f6e4f4a4..d0309e8c9d12 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -220,6 +220,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] = static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 840f3bd0fcbe..53327498efbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) } ring->vm_inv_eng = inv_eng - 1; - change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", ring->name, ring->vm_inv_eng, ring->funcs->vmhub); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index c63de945c021..0487e3a4e9e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -500,9 +500,7 @@ static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; uint32_t reg; - reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000; - WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg); - reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2); + reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000); return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 99ebcf29dcb0..ed89a101f73f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -461,7 +461,6 @@ static int soc15_asic_reset(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - case CHIP_VEGA20: soc15_asic_get_baco_capability(adev, &baco_reset); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 47243165a082..ae90a99909ef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) { - uint64_t addr; - struct cik_mqd *m; - int retval; - - retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd), - mqd_mem_obj); - - if (retval != 0) - return -ENOMEM; - - m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr; - addr = (*mqd_mem_obj)->gpu_addr; - - memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256)); - - m->header = 0xC0310800; - m->compute_pipelinestat_enable = 1; - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; - - m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE | - PRELOAD_REQ; - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | - QUANTUM_DURATION(10); - - m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN; - m->cp_mqd_base_addr_lo = lower_32_bits(addr); - m->cp_mqd_base_addr_hi = upper_32_bits(addr); - - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE; - - /* - * Pipe Priority - * Identifies the pipe relative priority when this queue is connected - * to the pipeline. The pipe priority is against the GFX pipe and HP3D. - * In KFD we are using a fixed pipe priority set to CS_MEDIUM. - * 0 = CS_LOW (typically below GFX) - * 1 = CS_MEDIUM (typically between HP3D and GFX - * 2 = CS_HIGH (typically above HP3D) - */ - m->cp_hqd_pipe_priority = 1; - m->cp_hqd_queue_priority = 15; - - *mqd = m; - if (gart_addr) - *gart_addr = addr; - retval = mm->update_mqd(mm, m, q); - - return retval; + return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); } static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2f26581b93ff..fb27783d7a54 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link) return; } + /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid( @@ -952,6 +953,8 @@ static int dm_resume(void *handle) if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); @@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) sink = aconnector->dc_link->local_sink; + if (sink) + dc_sink_retain(sink); /* * Edid mgmt connector gets first update only in mode_valid hook and then @@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) * to it anymore after disconnect, so on next crtc to connector * reshuffle by UMD we will get into unwanted dc_sink release */ - if (aconnector->dc_sink != aconnector->dc_em_sink) - dc_sink_release(aconnector->dc_sink); + dc_sink_release(aconnector->dc_sink); } aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); amdgpu_dm_update_freesync_caps(connector, aconnector->edid); } else { amdgpu_dm_update_freesync_caps(connector, NULL); - if (!aconnector->dc_sink) + if (!aconnector->dc_sink) { aconnector->dc_sink = aconnector->dc_em_sink; - else if (aconnector->dc_sink != aconnector->dc_em_sink) dc_sink_retain(aconnector->dc_sink); + } } mutex_unlock(&dev->mode_config.mutex); + + if (sink) + dc_sink_release(sink); return; } @@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) * TODO: temporary guard to look for proper fix * if this sink is MST sink, we should not do anything */ - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_sink_release(sink); return; + } if (aconnector->dc_sink == sink) { /* @@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) */ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", aconnector->connector_id); + if (sink) + dc_sink_release(sink); return; } @@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) amdgpu_dm_update_freesync_caps(connector, NULL); aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); if (sink->dc_edid.length == 0) { aconnector->edid = NULL; drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); @@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) amdgpu_dm_update_freesync_caps(connector, NULL); drm_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; + dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; } mutex_unlock(&dev->mode_config.mutex); + + if (sink) + dc_sink_release(sink); } static void handle_hpd_irq(void *param) @@ -2977,6 +2994,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, return stream; } else { sink = aconnector->dc_sink; + dc_sink_retain(sink); } stream = dc_create_stream_for_sink(sink); @@ -3042,8 +3060,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); finish: - if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) - dc_sink_release(sink); + dc_sink_release(sink); return stream; } @@ -3301,6 +3318,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) dm->backlight_dev = NULL; } #endif + + if (aconnector->dc_em_sink) + dc_sink_release(aconnector->dc_em_sink); + aconnector->dc_em_sink = NULL; + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); drm_connector_unregister(connector); drm_connector_cleanup(connector); @@ -3398,10 +3423,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector) (edid->extensions + 1) * EDID_LENGTH, &init_params); - if (aconnector->base.force == DRM_FORCE_ON) + if (aconnector->base.force == DRM_FORCE_ON) { aconnector->dc_sink = aconnector->dc_link->local_sink ? aconnector->dc_link->local_sink : aconnector->dc_em_sink; + dc_sink_retain(aconnector->dc_sink); + } } static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f51d52eb52e6..c4ea3a91f17a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -191,6 +191,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) &init_params); dc_sink->priv = aconnector; + /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; if (aconnector->dc_sink) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 12d1842079ae..eb62d10bb65c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1348,12 +1348,12 @@ void dcn_bw_update_from_pplib(struct dc *dc) struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; bool res; - kernel_fpu_begin(); - /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&fclks); @@ -1372,9 +1372,13 @@ void dcn_bw_update_from_pplib(struct dc *dc) } else BREAK_TO_DEBUGGER(); + kernel_fpu_end(); + res = dm_pp_get_clock_levels_by_type_with_voltage( ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); + kernel_fpu_begin(); + if (res) res = verify_clock_values(&dcfclks); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 7f5a947ad31d..4eba3c4800b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -794,6 +794,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; sink->converter_disable_audio = converter_disable_audio; + /* dc_sink_create returns a new reference */ link->local_sink = sink; edid_status = dm_helpers_read_local_edid( @@ -2037,6 +2038,9 @@ static enum dc_status enable_link( break; } + if (status == DC_OK) + pipe_ctx->stream->link->link_status.link_active = true; + return status; } @@ -2060,6 +2064,14 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_disable_link_phy_mst(link, signal); } else link->link_enc->funcs->disable_output(link->link_enc, signal); + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } } static bool dp_active_dongle_validate_timing( @@ -2623,8 +2635,6 @@ void core_link_enable_stream( } } - stream->link->link_status.link_active = true; - core_dc->hwss.enable_audio_stream(pipe_ctx); /* turn off otg test pattern if enable */ @@ -2659,8 +2669,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) core_dc->hwss.disable_stream(pipe_ctx, option); disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); - - pipe_ctx->stream->link->link_status.link_active = false; } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 94a84bc57c7a..bfd27f10879e 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -724,7 +724,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal, static void build_vrr_infopacket_v2(enum signal_type signal, const struct mod_vrr_params *vrr, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket) { unsigned int payload_size = 0; @@ -732,8 +732,7 @@ static void build_vrr_infopacket_v2(enum signal_type signal, build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); build_vrr_infopacket_data(vrr, infopacket); - if (app_tf != NULL) - build_vrr_infopacket_fs2_data(*app_tf, infopacket); + build_vrr_infopacket_fs2_data(app_tf, infopacket); build_vrr_infopacket_checksum(&payload_size, infopacket); @@ -757,7 +756,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket) { /* SPD info packet for FreeSync diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index 4222e403b151..dcef85994c45 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -145,7 +145,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, enum vrr_packet_type packet_type, - const enum color_transfer_func *app_tf, + enum color_transfer_func app_tf, struct dc_info_packet *infopacket); void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index ce177d7f04cb..6bf48934fdc4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -277,8 +277,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set if (!skip_display_settings) phm_notify_smc_display_config_after_ps_adjustment(hwmgr); - if ((hwmgr->request_dpm_level != hwmgr->dpm_level) && - !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) + if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) hwmgr->dpm_level = hwmgr->request_dpm_level; if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 4588bddf8b33..615cf2c09e54 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -489,15 +489,16 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, } int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, - uint8_t id, uint32_t *frequency) + uint8_t clk_id, uint8_t syspll_id, + uint32_t *frequency) { struct amdgpu_device *adev = hwmgr->adev; struct atom_get_smu_clock_info_parameters_v3_1 parameters; struct atom_get_smu_clock_info_output_parameters_v3_1 *output; uint32_t ix; - parameters.clk_id = id; - parameters.syspll_id = 0; + parameters.clk_id = clk_id; + parameters.syspll_id = syspll_id; parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; parameters.dfsdid = 0; @@ -530,20 +531,23 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr, boot_values->ulSocClk = 0; boot_values->ulDCEFClk = 0; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulSocClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulDCEFClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulEClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulVClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency)) boot_values->ulDClk = frequency; + + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency)) + boot_values->ulFClk = frequency; } static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, @@ -563,19 +567,19 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, boot_values->ulSocClk = 0; boot_values->ulDCEFClk = 0; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency)) boot_values->ulSocClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency)) boot_values->ulDCEFClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency)) boot_values->ulEClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency)) boot_values->ulVClk = frequency; - if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) + if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency)) boot_values->ulDClk = frequency; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index fe9e8ceef50e..b7e2651b570b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -139,6 +139,7 @@ struct pp_atomfwctrl_bios_boot_up_values { uint32_t ulEClk; uint32_t ulVClk; uint32_t ulDClk; + uint32_t ulFClk; uint16_t usVddc; uint16_t usVddci; uint16_t usMvddc; @@ -236,7 +237,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_smc_dpm_parameters *param); int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, - uint8_t id, uint32_t *frequency); + uint8_t clk_id, uint8_t syspll_id, + uint32_t *frequency); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 48187acac59e..83d3d935f3ac 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94, 0); + ixSMU_PM_STATUS_95, 0); for (i = 0; i < 10; i++) { - mdelay(1); + mdelay(500); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); tmp = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94); + ixSMU_PM_STATUS_95); if (tmp != 0) break; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 5479125ff4f6..5c4f701939ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2575,10 +2575,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, - SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk); + SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk); pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, - SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk); + SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk); data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; @@ -4407,9 +4407,9 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 6c8e78611c03..bdb48e94eff6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -2009,9 +2009,9 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index aad79affb081..9aa7bec1b5fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -463,9 +463,9 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) { dpm_state->soft_min_level = 0x0; - dpm_state->soft_max_level = 0xffff; + dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_state->hard_min_level = 0x0; - dpm_state->hard_max_level = 0xffff; + dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT; } static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, @@ -711,8 +711,10 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(!ret, "[SetupDefaultDpmTable] failed to get fclk dpm levels!", return ret); - } else - dpm_table->count = 0; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100; + } vega20_init_dpm_state(&(dpm_table->dpm_state)); /* save a copy of the default DPM table */ @@ -754,6 +756,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.eclock = boot_up_values.ulEClk; data->vbios_boot_state.vclock = boot_up_values.ulVClk; data->vbios_boot_state.dclock = boot_up_values.ulDClk; + data->vbios_boot_state.fclock = boot_up_values.ulFClk; data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; smum_send_msg_to_smc_with_parameter(hwmgr, @@ -780,6 +783,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; int ret; @@ -816,6 +821,10 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) "[OverridePcieParameters] Attempt to override pcie params failed!", return ret); + data->pcie_parameters_override = 1; + data->pcie_gen_level1 = pcie_gen; + data->pcie_width_level1 = pcie_width; + return 0; } @@ -979,6 +988,8 @@ static int vega20_od8_set_feature_capabilities( } if (data->smu_features[GNLD_DPM_UCLK].enabled) { + pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] = + data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value; if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && @@ -2314,32 +2325,8 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) { - struct vega20_hwmgr *data = - (struct vega20_hwmgr *)(hwmgr->backend); - uint32_t soft_min_level, soft_max_level; int ret = 0; - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); - data->dpm_table.gfx_table.dpm_state.soft_min_level = - data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; - data->dpm_table.gfx_table.dpm_state.soft_max_level = - data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; - - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); - data->dpm_table.mem_table.dpm_state.soft_min_level = - data->dpm_table.mem_table.dpm_levels[soft_min_level].value; - data->dpm_table.mem_table.dpm_state.soft_max_level = - data->dpm_table.mem_table.dpm_levels[soft_max_level].value; - - soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); - soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); - data->dpm_table.soc_table.dpm_state.soft_min_level = - data->dpm_table.soc_table.dpm_levels[soft_min_level].value; - data->dpm_table.soc_table.dpm_state.soft_max_level = - data->dpm_table.soc_table.dpm_levels[soft_max_level].value; - ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); PP_ASSERT_WITH_CODE(!ret, "Failed to upload DPM Bootup Levels!", @@ -2641,9 +2628,8 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, - "[GetSclks]: gfxclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_GFXCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2670,9 +2656,8 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, - "[GetMclks]: uclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_UCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = data->mclk_latency_table.count = count; @@ -2696,9 +2681,8 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled, - "[GetDcfclocks]: dcefclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2719,9 +2703,8 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr, struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); int i, count; - PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled, - "[GetSocclks]: socclk dpm not enabled!\n", - return -EPERM); + if (!data->smu_features[GNLD_DPM_SOCCLK].enabled) + return -1; count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; clocks->num_levels = count; @@ -2799,7 +2782,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, data->od8_settings.od8_settings_array; OverDriveTable_t *od_table = &(data->smc_state_table.overdrive_table); - struct pp_clock_levels_with_latency clocks; int32_t input_index, input_clk, input_vol, i; int od8_id; int ret; @@ -2858,11 +2840,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EOPNOTSUPP; } - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get memory clk levels failed!", - return ret); - for (i = 0; i < size; i += 2) { if (i + 2 > size) { pr_info("invalid number of input parameters %d\n", @@ -2879,11 +2856,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, return -EINVAL; } - if (input_clk < clocks.data[0].clocks_in_khz / 1000 || + if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value || input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { pr_info("clock freq %d is not within allowed range [%d - %d]\n", input_clk, - clocks.data[0].clocks_in_khz / 1000, + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); return -EINVAL; } @@ -3088,9 +3065,9 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; features_to_disable = - (features_enabled ^ new_ppfeature_masks) & features_enabled; + features_enabled & ~new_ppfeature_masks; features_to_enable = - (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; + ~features_enabled & new_ppfeature_masks; pr_debug("features_to_disable 0x%llx\n", features_to_disable); pr_debug("features_to_enable 0x%llx\n", features_to_enable); @@ -3128,7 +3105,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, &(data->dpm_table.fclk_table); int i, now, size = 0; int ret = 0; - uint32_t gen_speed, lane_width; + uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; switch (type) { case PP_SCLK: @@ -3137,10 +3114,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current gfx clk Failed!", return ret); - ret = vega20_get_sclks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get gfx clk levels Failed!", - return ret); + if (vega20_get_sclks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3154,10 +3132,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current mclk freq Failed!", return ret); - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get memory clk levels Failed!", - return ret); + if (vega20_get_memclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3171,10 +3150,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current socclk freq Failed!", return ret); - ret = vega20_get_socclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get soc clk levels Failed!", - return ret); + if (vega20_get_socclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3200,10 +3180,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get current dcefclk freq Failed!", return ret); - ret = vega20_get_dcefclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Attempt to get dcefclk levels Failed!", - return ret); + if (vega20_get_dcefclocks(hwmgr, &clocks)) { + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", + now / 100); + break; + } for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", @@ -3212,28 +3193,36 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case PP_PCIE: - gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; - lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; - for (i = 0; i < NUM_LINK_LEVELS; i++) + for (i = 0; i < NUM_LINK_LEVELS; i++) { + if (i == 1 && data->pcie_parameters_override) { + gen_speed = data->pcie_gen_level1; + lane_width = data->pcie_width_level1; + } else { + gen_speed = pptable->PcieGenSpeed[i]; + lane_width = pptable->PcieLaneCount[i]; + } size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, - (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : - (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : - (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : - (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", - (pptable->PcieLaneCount[i] == 1) ? "x1" : - (pptable->PcieLaneCount[i] == 2) ? "x2" : - (pptable->PcieLaneCount[i] == 3) ? "x4" : - (pptable->PcieLaneCount[i] == 4) ? "x8" : - (pptable->PcieLaneCount[i] == 5) ? "x12" : - (pptable->PcieLaneCount[i] == 6) ? "x16" : "", + (gen_speed == 0) ? "2.5GT/s," : + (gen_speed == 1) ? "5.0GT/s," : + (gen_speed == 2) ? "8.0GT/s," : + (gen_speed == 3) ? "16.0GT/s," : "", + (lane_width == 1) ? "x1" : + (lane_width == 2) ? "x2" : + (lane_width == 3) ? "x4" : + (lane_width == 4) ? "x8" : + (lane_width == 5) ? "x12" : + (lane_width == 6) ? "x16" : "", pptable->LclkFreq[i], - (gen_speed == pptable->PcieGenSpeed[i]) && - (lane_width == pptable->PcieLaneCount[i]) ? + (current_gen_speed == gen_speed) && + (current_lane_width == lane_width) ? "*" : ""); + } break; case OD_SCLK: @@ -3288,13 +3277,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - ret = vega20_get_memclocks(hwmgr, &clocks); - PP_ASSERT_WITH_CODE(!ret, - "Fail to get memory clk levels!", - return ret); - size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", - clocks.data[0].clocks_in_khz / 1000, + od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3356,6 +3340,31 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr, return ret; } +static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table); + int ret = 0; + + if (data->smu_features[GNLD_DPM_FCLK].enabled) { + PP_ASSERT_WITH_CODE(dpm_table->count > 0, + "[SetFclkToHightestDpmLevel] Dpm table has no entry!", + return -EINVAL); + PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS, + "[SetFclkToHightestDpmLevel] Dpm table has too many entries!", + return -EINVAL); + + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMinByFreq, + (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)), + "[SetFclkToHightestDpmLevel] Set soft min fclk failed!", + return ret); + } + + return ret; +} + static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); @@ -3366,8 +3375,10 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, &data->dpm_table.mem_table); + if (ret) + return ret; - return ret; + return vega20_set_fclk_to_highest_dpm_level(hwmgr); } static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) @@ -3461,9 +3472,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* gfxclk */ dpm_table = &(data->dpm_table.gfx_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { @@ -3485,9 +3496,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* memclk */ dpm_table = &(data->dpm_table.mem_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { @@ -3526,12 +3537,21 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) if (hwmgr->display_config->nb_pstate_switch_disable) dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + /* fclk */ + dpm_table = &(data->dpm_table.fclk_table); + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; + dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; + if (hwmgr->display_config->nb_pstate_switch_disable) + dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + /* vclk */ dpm_table = &(data->dpm_table.vclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { @@ -3548,9 +3568,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* dclk */ dpm_table = &(data->dpm_table.dclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { @@ -3567,9 +3587,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* socclk */ dpm_table = &(data->dpm_table.soc_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { @@ -3586,9 +3606,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr) /* eclk */ dpm_table = &(data->dpm_table.eclk_table); dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; - dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; + dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; if (PP_CAP(PHM_PlatformCaps_UMDPState)) { if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h index 37f5f5e657da..a5bc758ae097 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h @@ -42,6 +42,8 @@ #define AVFS_CURVE 0 #define OD8_HOTCURVE_TEMPERATURE 85 +#define VG20_CLOCK_MAX_DEFAULT 0xFFFF + typedef uint32_t PP_Clock; enum { @@ -219,6 +221,7 @@ struct vega20_vbios_boot_state { uint32_t eclock; uint32_t dclock; uint32_t vclock; + uint32_t fclock; }; #define DPMTABLE_OD_UPDATE_SCLK 0x00000001 @@ -523,6 +526,10 @@ struct vega20_hwmgr { unsigned long metrics_time; SmuMetrics_t metrics_table; + + bool pcie_parameters_override; + uint32_t pcie_gen_level1; + uint32_t pcie_width_level1; }; #define VEGA20_DPM2_NEAR_TDP_DEC 10 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index 97f8a1a970c3..7a7f15d0c53a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -32,6 +32,8 @@ #include "cgs_common.h" #include "vega20_pptable.h" +#define VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE 105 + static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, enum phm_platform_caps cap) { @@ -798,6 +800,17 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable return 0; } +static int override_powerplay_table_fantargettemperature(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v3_information *pptable_information = + (struct phm_ppt_v3_information *)hwmgr->pptable; + PPTable_t *ppsmc_pptable = (PPTable_t *)(pptable_information->smc_pptable); + + ppsmc_pptable->FanTargetTemperature = VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE; + + return 0; +} + #define VEGA20_ENGINECLOCK_HARDMAX 198000 static int init_powerplay_table_information( struct pp_hwmgr *hwmgr, @@ -887,6 +900,10 @@ static int init_powerplay_table_information( result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); + if (result) + return result; + + result = override_powerplay_table_fantargettemperature(hwmgr); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 52abca065764..2d4cfe14f72e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -2330,6 +2330,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2339,6 +2340,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c index 079fc8e8f709..742b3dc1f6cb 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c @@ -40,10 +40,8 @@ bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t mp1_fw_flags; - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index b7ff7d4d6f44..ba00744c3413 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -49,10 +49,8 @@ static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; uint32_t mp1_fw_flags; - WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, - (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - - mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 2865f7a8f0b7..5f102bdaf841 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -257,8 +257,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm, &mw_state->addrs[0], mw_state->format); - drm_writeback_queue_job(mw_conn, conn_state->writeback_job); - conn_state->writeback_job = NULL; + drm_writeback_queue_job(mw_conn, conn_state); hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, mw_state->pitches, mw_state->n_planes, fb->width, fb->height, mw_state->format, diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 0805801f4e94..e64736c39a9f 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -234,7 +234,7 @@ static int dumb_vga_remove(struct platform_device *pdev) */ static const struct drm_bridge_timings default_dac_timings = { /* Timing specifications, datasheet page 7 */ - .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, .setup_time_ps = 500, .hold_time_ps = 1500, }; @@ -245,7 +245,7 @@ static const struct drm_bridge_timings default_dac_timings = { */ static const struct drm_bridge_timings ti_ths8134_dac_timings = { /* From timing diagram, datasheet page 9 */ - .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, /* From datasheet, page 12 */ .setup_time_ps = 3000, /* I guess this means latched input */ @@ -258,7 +258,7 @@ static const struct drm_bridge_timings ti_ths8134_dac_timings = { */ static const struct drm_bridge_timings ti_ths8135_dac_timings = { /* From timing diagram, datasheet page 14 */ - .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE, /* From datasheet, page 16 */ .setup_time_ps = 2000, .hold_time_ps = 500, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c index cf3f0caf9c63..ed7af7518b52 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c @@ -614,7 +614,6 @@ static int snd_dw_hdmi_suspend(struct device *dev) struct snd_dw_hdmi *dw = dev_get_drvdata(dev); snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold); - snd_pcm_suspend_all(dw->pcm); return 0; } diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 888980d4bc74..e570c9dee180 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1222,8 +1222,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge) &bus_format, 1); tc->connector.display_info.bus_flags = DRM_BUS_FLAG_DE_HIGH | - DRM_BUS_FLAG_PIXDATA_NEGEDGE | - DRM_BUS_FLAG_SYNC_NEGEDGE; + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE | + DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder); return 0; diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 7bfb4f338813..285be4a0f4bd 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -27,10 +27,14 @@ struct tfp410 { struct drm_bridge bridge; struct drm_connector connector; + unsigned int connector_type; struct i2c_adapter *ddc; struct gpio_desc *hpd; struct delayed_work hpd_work; + struct gpio_desc *powerdown; + + struct drm_bridge_timings timings; struct device *dev; }; @@ -126,7 +130,7 @@ static int tfp410_attach(struct drm_bridge *bridge) drm_connector_helper_add(&dvi->connector, &tfp410_con_helper_funcs); ret = drm_connector_init(bridge->dev, &dvi->connector, - &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA); + &tfp410_con_funcs, dvi->connector_type); if (ret) { dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret); return ret; @@ -138,8 +142,24 @@ static int tfp410_attach(struct drm_bridge *bridge) return 0; } +static void tfp410_enable(struct drm_bridge *bridge) +{ + struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); + + gpiod_set_value_cansleep(dvi->powerdown, 0); +} + +static void tfp410_disable(struct drm_bridge *bridge) +{ + struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); + + gpiod_set_value_cansleep(dvi->powerdown, 1); +} + static const struct drm_bridge_funcs tfp410_bridge_funcs = { .attach = tfp410_attach, + .enable = tfp410_enable, + .disable = tfp410_disable, }; static void tfp410_hpd_work_func(struct work_struct *work) @@ -162,6 +182,70 @@ static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg) return IRQ_HANDLED; } +static const struct drm_bridge_timings tfp410_default_timings = { + .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE + | DRM_BUS_FLAG_DE_HIGH, + .setup_time_ps = 1200, + .hold_time_ps = 1300, +}; + +static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c) +{ + struct drm_bridge_timings *timings = &dvi->timings; + struct device_node *ep; + u32 pclk_sample = 0; + s32 deskew = 0; + + /* Start with defaults. */ + *timings = tfp410_default_timings; + + if (i2c) + /* + * In I2C mode timings are configured through the I2C interface. + * As the driver doesn't support I2C configuration yet, we just + * go with the defaults (BSEL=1, DSEL=1, DKEN=0, EDGE=1). + */ + return 0; + + /* + * In non-I2C mode, timings are configured through the BSEL, DSEL, DKEN + * and EDGE pins. They are specified in DT through endpoint properties + * and vendor-specific properties. + */ + ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 0, 0); + if (!ep) + return -EINVAL; + + /* Get the sampling edge from the endpoint. */ + of_property_read_u32(ep, "pclk-sample", &pclk_sample); + of_node_put(ep); + + timings->input_bus_flags = DRM_BUS_FLAG_DE_HIGH; + + switch (pclk_sample) { + case 0: + timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE + | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE; + break; + case 1: + timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE + | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE; + break; + default: + return -EINVAL; + } + + /* Get the setup and hold time from vendor-specific properties. */ + of_property_read_u32(dvi->dev->of_node, "ti,deskew", (u32 *)&deskew); + if (deskew < -4 || deskew > 3) + return -EINVAL; + + timings->setup_time_ps = min(0, 1200 - 350 * deskew); + timings->hold_time_ps = min(0, 1300 + 350 * deskew); + + return 0; +} + static int tfp410_get_connector_properties(struct tfp410 *dvi) { struct device_node *connector_node, *ddc_phandle; @@ -172,6 +256,11 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi) if (!connector_node) return -ENODEV; + if (of_device_is_compatible(connector_node, "hdmi-connector")) + dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA; + else + dvi->connector_type = DRM_MODE_CONNECTOR_DVID; + dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode, "hpd-gpios", 0, GPIOD_IN, "hpd"); if (IS_ERR(dvi->hpd)) { @@ -200,7 +289,7 @@ fail: return ret; } -static int tfp410_init(struct device *dev) +static int tfp410_init(struct device *dev, bool i2c) { struct tfp410 *dvi; int ret; @@ -217,12 +306,24 @@ static int tfp410_init(struct device *dev) dvi->bridge.funcs = &tfp410_bridge_funcs; dvi->bridge.of_node = dev->of_node; + dvi->bridge.timings = &dvi->timings; dvi->dev = dev; + ret = tfp410_parse_timings(dvi, i2c); + if (ret) + goto fail; + ret = tfp410_get_connector_properties(dvi); if (ret) goto fail; + dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown", + GPIOD_OUT_HIGH); + if (IS_ERR(dvi->powerdown)) { + dev_err(dev, "failed to parse powerdown gpio\n"); + return PTR_ERR(dvi->powerdown); + } + if (dvi->hpd) { INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func); @@ -264,7 +365,7 @@ static int tfp410_fini(struct device *dev) static int tfp410_probe(struct platform_device *pdev) { - return tfp410_init(&pdev->dev); + return tfp410_init(&pdev->dev, false); } static int tfp410_remove(struct platform_device *pdev) @@ -301,7 +402,7 @@ static int tfp410_i2c_probe(struct i2c_client *client, return -ENXIO; } - return tfp410_init(&client->dev); + return tfp410_init(&client->dev, true); } static int tfp410_i2c_remove(struct i2c_client *client) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2453678d1186..86efd2da37f9 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -495,7 +495,7 @@ mode_fixup(struct drm_atomic_state *state) static enum drm_mode_status mode_valid_path(struct drm_connector *connector, struct drm_encoder *encoder, struct drm_crtc *crtc, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { enum drm_mode_status ret; @@ -534,7 +534,7 @@ mode_valid(struct drm_atomic_state *state) struct drm_crtc *crtc = conn_state->crtc; struct drm_crtc_state *crtc_state; enum drm_mode_status mode_status; - struct drm_display_mode *mode; + const struct drm_display_mode *mode; if (!crtc || !encoder) continue; @@ -2261,10 +2261,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done); int drm_atomic_helper_prepare_planes(struct drm_device *dev, struct drm_atomic_state *state) { + struct drm_connector *connector; + struct drm_connector_state *new_conn_state; struct drm_plane *plane; struct drm_plane_state *new_plane_state; int ret, i, j; + for_each_new_connector_in_state(state, connector, new_conn_state, i) { + if (!new_conn_state->writeback_job) + continue; + + ret = drm_writeback_prepare_job(new_conn_state->writeback_job); + if (ret < 0) + return ret; + } + for_each_new_plane_in_state(state, plane, new_plane_state, i) { const struct drm_plane_helper_funcs *funcs; @@ -3039,9 +3050,31 @@ commit: return 0; } -static int __drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx, - bool clean_old_fbs) +/** + * drm_atomic_helper_disable_all - disable all currently active outputs + * @dev: DRM device + * @ctx: lock acquisition context + * + * Loops through all connectors, finding those that aren't turned off and then + * turns them off by setting their DPMS mode to OFF and deactivating the CRTC + * that they are connected to. + * + * This is used for example in suspend/resume to disable all currently active + * functions when suspending. If you just want to shut down everything at e.g. + * driver unload, look at drm_atomic_helper_shutdown(). + * + * Note that if callers haven't already acquired all modeset locks this might + * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). + * + * Returns: + * 0 on success or a negative error code on failure. + * + * See also: + * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and + * drm_atomic_helper_shutdown(). + */ +int drm_atomic_helper_disable_all(struct drm_device *dev, + struct drm_modeset_acquire_ctx *ctx) { struct drm_atomic_state *state; struct drm_connector_state *conn_state; @@ -3099,35 +3132,6 @@ free: drm_atomic_state_put(state); return ret; } - -/** - * drm_atomic_helper_disable_all - disable all currently active outputs - * @dev: DRM device - * @ctx: lock acquisition context - * - * Loops through all connectors, finding those that aren't turned off and then - * turns them off by setting their DPMS mode to OFF and deactivating the CRTC - * that they are connected to. - * - * This is used for example in suspend/resume to disable all currently active - * functions when suspending. If you just want to shut down everything at e.g. - * driver unload, look at drm_atomic_helper_shutdown(). - * - * Note that if callers haven't already acquired all modeset locks this might - * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). - * - * Returns: - * 0 on success or a negative error code on failure. - * - * See also: - * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and - * drm_atomic_helper_shutdown(). - */ -int drm_atomic_helper_disable_all(struct drm_device *dev, - struct drm_modeset_acquire_ctx *ctx) -{ - return __drm_atomic_helper_disable_all(dev, ctx, false); -} EXPORT_SYMBOL(drm_atomic_helper_disable_all); /** @@ -3148,7 +3152,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); - ret = __drm_atomic_helper_disable_all(dev, &ctx, true); + ret = drm_atomic_helper_disable_all(dev, &ctx); if (ret) DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c index 4985384e51f6..59ffb6b9c745 100644 --- a/drivers/gpu/drm/drm_atomic_state_helper.c +++ b/drivers/gpu/drm/drm_atomic_state_helper.c @@ -30,6 +30,7 @@ #include <drm/drm_connector.h> #include <drm/drm_atomic.h> #include <drm/drm_device.h> +#include <drm/drm_writeback.h> #include <linux/slab.h> #include <linux/dma-fence.h> @@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state) if (state->commit) drm_crtc_commit_put(state->commit); + + if (state->writeback_job) + drm_writeback_cleanup_job(state->writeback_job); } EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state); diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 4eb81f10bc54..ea797d4c82ee 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -647,28 +647,15 @@ drm_atomic_plane_get_property(struct drm_plane *plane, return 0; } -static struct drm_writeback_job * -drm_atomic_get_writeback_job(struct drm_connector_state *conn_state) -{ - WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); - - if (!conn_state->writeback_job) - conn_state->writeback_job = - kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); - - return conn_state->writeback_job; -} - static int drm_atomic_set_writeback_fb_for_connector( struct drm_connector_state *conn_state, struct drm_framebuffer *fb) { - struct drm_writeback_job *job = - drm_atomic_get_writeback_job(conn_state); - if (!job) - return -ENOMEM; + int ret; - drm_framebuffer_assign(&job->fb, fb); + ret = drm_writeback_set_fb(conn_state, fb); + if (ret < 0) + return ret; if (fb) DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n", @@ -1162,19 +1149,17 @@ static int prepare_signaling(struct drm_device *dev, for_each_new_connector_in_state(state, conn, conn_state, i) { struct drm_writeback_connector *wb_conn; - struct drm_writeback_job *job; struct drm_out_fence_state *f; struct dma_fence *fence; s32 __user *fence_ptr; + if (!conn_state->writeback_job) + continue; + fence_ptr = get_out_fence_for_connector(state, conn); if (!fence_ptr) continue; - job = drm_atomic_get_writeback_job(conn_state); - if (!job) - return -ENOMEM; - f = krealloc(*fence_state, sizeof(**fence_state) * (*num_fences + 1), GFP_KERNEL); if (!f) @@ -1196,7 +1181,7 @@ static int prepare_signaling(struct drm_device *dev, return ret; } - job->out_fence = fence; + conn_state->writeback_job->out_fence = fence; } /* diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 67b1fca39aa6..0e3043e08c69 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, m32.size = map.size; m32.type = map.type; m32.flags = map.flags; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); m32.mtrr = map.mtrr; if (copy_to_user(argp, &m32, sizeof(m32))) return -EFAULT; @@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, m32.offset = map.offset; m32.mtrr = map.mtrr; - m32.handle = ptr_to_compat(map.handle); + m32.handle = ptr_to_compat((void __user *)map.handle); if (map.handle != compat_ptr(m32.handle)) pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", map.handle, m32.type, m32.offset); @@ -526,7 +526,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd, if (err) return err; - req32.handle = ptr_to_compat(req.handle); + req32.handle = ptr_to_compat((void __user *)req.handle); if (copy_to_user(argp, &req32, sizeof(req32))) return -EFAULT; diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 869ac6f4671e..56f92a0bba62 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -655,22 +655,22 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode); * @bus_flags: information about pixelclk, sync and DE polarity will be stored * here * - * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and - * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS + * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_DRIVE_(POS|NEG)EDGE + * and DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS * found in @vm */ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags) { *bus_flags = 0; if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE) - *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE; + *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) - *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE; + *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE) - *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE; + *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE; if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE) - *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE; + *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; if (vm->flags & DISPLAY_FLAGS_DE_LOW) *bus_flags |= DRM_BUS_FLAG_DE_LOW; diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c index c20e6fe00cb3..79ac014701c8 100644 --- a/drivers/gpu/drm/drm_writeback.c +++ b/drivers/gpu/drm/drm_writeback.c @@ -239,14 +239,52 @@ fail: } EXPORT_SYMBOL(drm_writeback_connector_init); +int drm_writeback_set_fb(struct drm_connector_state *conn_state, + struct drm_framebuffer *fb) +{ + WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); + + if (!conn_state->writeback_job) { + conn_state->writeback_job = + kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL); + if (!conn_state->writeback_job) + return -ENOMEM; + + conn_state->writeback_job->connector = + drm_connector_to_writeback(conn_state->connector); + } + + drm_framebuffer_assign(&conn_state->writeback_job->fb, fb); + return 0; +} + +int drm_writeback_prepare_job(struct drm_writeback_job *job) +{ + struct drm_writeback_connector *connector = job->connector; + const struct drm_connector_helper_funcs *funcs = + connector->base.helper_private; + int ret; + + if (funcs->prepare_writeback_job) { + ret = funcs->prepare_writeback_job(connector, job); + if (ret < 0) + return ret; + } + + job->prepared = true; + return 0; +} +EXPORT_SYMBOL(drm_writeback_prepare_job); + /** * drm_writeback_queue_job - Queue a writeback job for later signalling * @wb_connector: The writeback connector to queue a job on - * @job: The job to queue + * @conn_state: The connector state containing the job to queue * - * This function adds a job to the job_queue for a writeback connector. It - * should be considered to take ownership of the writeback job, and so any other - * references to the job must be cleared after calling this function. + * This function adds the job contained in @conn_state to the job_queue for a + * writeback connector. It takes ownership of the writeback job and sets the + * @conn_state->writeback_job to NULL, and so no access to the job may be + * performed by the caller after this function returns. * * Drivers must ensure that for a given writeback connector, jobs are queued in * exactly the same order as they will be completed by the hardware (and @@ -258,16 +296,36 @@ EXPORT_SYMBOL(drm_writeback_connector_init); * See also: drm_writeback_signal_completion() */ void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector, - struct drm_writeback_job *job) + struct drm_connector_state *conn_state) { + struct drm_writeback_job *job; unsigned long flags; + job = conn_state->writeback_job; + conn_state->writeback_job = NULL; + spin_lock_irqsave(&wb_connector->job_lock, flags); list_add_tail(&job->list_entry, &wb_connector->job_queue); spin_unlock_irqrestore(&wb_connector->job_lock, flags); } EXPORT_SYMBOL(drm_writeback_queue_job); +void drm_writeback_cleanup_job(struct drm_writeback_job *job) +{ + struct drm_writeback_connector *connector = job->connector; + const struct drm_connector_helper_funcs *funcs = + connector->base.helper_private; + + if (job->prepared && funcs->cleanup_writeback_job) + funcs->cleanup_writeback_job(connector, job); + + if (job->fb) + drm_framebuffer_put(job->fb); + + kfree(job); +} +EXPORT_SYMBOL(drm_writeback_cleanup_job); + /* * @cleanup_work: deferred cleanup of a writeback job * @@ -280,10 +338,9 @@ static void cleanup_work(struct work_struct *work) struct drm_writeback_job *job = container_of(work, struct drm_writeback_job, cleanup_work); - drm_framebuffer_put(job->fb); - kfree(job); -} + drm_writeback_cleanup_job(job); +} /** * drm_writeback_signal_completion - Signal the completion of a writeback job diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index 041a77e400d4..21df44b78df3 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig @@ -2,7 +2,6 @@ config DRM_ETNAVIV tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" depends on DRM - depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) depends on MMU select SHMEM select SYNC_FILE diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h index acb68c698363..4d5d1a77eb2a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h @@ -15,8 +15,6 @@ struct etnaviv_perfmon_request; struct etnaviv_cmdbuf { /* suballocator this cmdbuf is allocated from */ struct etnaviv_cmdbuf_suballoc *suballoc; - /* user context key, must be unique between all active users */ - struct etnaviv_file_private *ctx; /* cmdbuf properties */ int suballoc_offset; void *vaddr; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 3fbb4855396c..33854c94cb85 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -215,7 +215,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) mutex_lock(&obj->lock); pages = etnaviv_gem_get_pages(obj); mutex_unlock(&obj->lock); - if (pages) { + if (!IS_ERR(pages)) { int j; iter.hdr->data[0] = bomap - bomap_start; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index 7015837ccc1c..753c458497d0 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -91,6 +91,7 @@ struct etnaviv_gem_submit_bo { struct etnaviv_gem_submit { struct drm_sched_job sched_job; struct kref refcount; + struct etnaviv_file_private *ctx; struct etnaviv_gpu *gpu; struct dma_fence *out_fence, *in_fence; int out_fence_id; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 01e7ad96339c..00e8b6a817e3 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -15,7 +15,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) int npages = obj->size >> PAGE_SHIFT; if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ - return NULL; + return ERR_PTR(-EINVAL); return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index a10281e915e5..e054f09ac828 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -506,7 +506,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto err_submit_objects; - submit->cmdbuf.ctx = file->driver_priv; + submit->ctx = file->driver_priv; submit->exec_state = args->exec_state; submit->flags = args->flags; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c index f1c88d8ad5ba..f794e04be9e6 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c @@ -320,8 +320,8 @@ etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) domain = &etnaviv_domain->base; domain->dev = gpu->dev; - domain->base = 0; - domain->size = (u64)SZ_1G * 4; + domain->base = SZ_4K; + domain->size = (u64)SZ_1G * 4 - SZ_4K; domain->ops = &etnaviv_iommuv2_ops; ret = etnaviv_iommuv2_init(etnaviv_domain); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c index 9980d81a26e3..4227a4006c34 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c @@ -113,7 +113,7 @@ static const struct etnaviv_pm_domain doms_3d[] = { .name = "PE", .profile_read = VIVS_MC_PROFILE_PE_READ, .profile_config = VIVS_MC_PROFILE_CONFIG0, - .nr_signals = 5, + .nr_signals = 4, .signal = (const struct etnaviv_pm_signal[]) { { "PIXEL_COUNT_KILLED_BY_COLOR_PIPE", @@ -435,7 +435,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu, dom = meta->domains + signal->domain; - if (signal->iter > dom->nr_signals) + if (signal->iter >= dom->nr_signals) return -EINVAL; sig = &dom->signal[signal->iter]; @@ -461,7 +461,7 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r, dom = meta->domains + r->domain; - if (r->signal > dom->nr_signals) + if (r->signal >= dom->nr_signals) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 67ae26602024..6d24fea1766b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -153,7 +153,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, mutex_lock(&submit->gpu->fence_lock); ret = drm_sched_job_init(&submit->sched_job, sched_entity, - submit->cmdbuf.ctx); + submit->ctx); if (ret) goto out_unlock; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 0573eab0e190..f35e4ab55b27 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -20,6 +20,7 @@ #include "regs-vp.h" #include <linux/kernel.h> +#include <linux/ktime.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> @@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha) mixer_reg_write(ctx, MXR_VIDEO_CFG, val); } -static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) +static bool mixer_is_synced(struct mixer_context *ctx) { - /* block update on vsync */ - mixer_reg_writemask(ctx, MXR_STATUS, enable ? - MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); + u32 base, shadow; + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + return !(mixer_reg_read(ctx, MXR_CFG) & + MXR_CFG_LAYER_UPDATE_COUNT_MASK); + + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && + vp_reg_read(ctx, VP_SHADOW_UPDATE)) + return false; + + base = mixer_reg_read(ctx, MXR_CFG); + shadow = mixer_reg_read(ctx, MXR_CFG_S); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); + if (base != shadow) + return false; + + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); + shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); + if (base != shadow) + return false; + + return true; +} + +static int mixer_wait_for_sync(struct mixer_context *ctx) +{ + ktime_t timeout = ktime_add_us(ktime_get(), 100000); + + while (!mixer_is_synced(ctx)) { + usleep_range(1000, 2000); + if (ktime_compare(ktime_get(), timeout) > 0) + return -ETIMEDOUT; + } + return 0; +} + +static void mixer_disable_sync(struct mixer_context *ctx) +{ + mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE); +} + +static void mixer_enable_sync(struct mixer_context *ctx) +{ + if (ctx->mxr_ver == MXR_VER_16_0_33_0 || + ctx->mxr_ver == MXR_VER_128_0_0_184) + mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); + mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE); if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) - vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? - VP_SHADOW_UPDATE_ENABLE : 0); + vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE); } static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) @@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&ctx->reg_slock, flags); - vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); @@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_regs_dump(ctx); } -static void mixer_layer_update(struct mixer_context *ctx) -{ - mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE); -} - static void mixer_graph_buffer(struct mixer_context *ctx, struct exynos_drm_plane *plane) { @@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_cfg_layer(ctx, win, priority, true); mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha); - /* layer update mandatory for mixer 16.0.33.0 */ - if (ctx->mxr_ver == MXR_VER_16_0_33_0 || - ctx->mxr_ver == MXR_VER_128_0_0_184) - mixer_layer_update(ctx); - spin_unlock_irqrestore(&ctx->reg_slock, flags); mixer_regs_dump(ctx); @@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx) static irqreturn_t mixer_irq_handler(int irq, void *arg) { struct mixer_context *ctx = arg; - u32 val, base, shadow; + u32 val; spin_lock(&ctx->reg_slock); @@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) val &= ~MXR_INT_STATUS_VSYNC; /* interlace scan need to check shadow register */ - if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { - if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && - vp_reg_read(ctx, VP_SHADOW_UPDATE)) - goto out; - - base = mixer_reg_read(ctx, MXR_CFG); - shadow = mixer_reg_read(ctx, MXR_CFG_S); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); - if (base != shadow) - goto out; - - base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1)); - shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1)); - if (base != shadow) - goto out; - } + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags) + && !mixer_is_synced(ctx)) + goto out; drm_crtc_handle_vblank(&ctx->crtc->base); } @@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc) static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) { - struct mixer_context *mixer_ctx = crtc->ctx; + struct mixer_context *ctx = crtc->ctx; - if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) + if (!test_bit(MXR_BIT_POWERED, &ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, false); + if (mixer_wait_for_sync(ctx)) + dev_err(ctx->dev, "timeout waiting for VSYNC\n"); + mixer_disable_sync(ctx); } static void mixer_update_plane(struct exynos_drm_crtc *crtc, @@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) return; - mixer_vsync_set_update(mixer_ctx, true); + mixer_enable_sync(mixer_ctx); exynos_crtc_handle_event(crtc); } @@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) exynos_drm_pipe_clk_enable(crtc, true); - mixer_vsync_set_update(ctx, false); + mixer_disable_sync(ctx); mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); @@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc) mixer_commit(ctx); - mixer_vsync_set_update(ctx, true); + mixer_enable_sync(ctx); set_bit(MXR_BIT_POWERED, &ctx->flags); } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index bf256971063d..83c841b50272 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -94,7 +94,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) drm_display_mode_to_videomode(mode, &vm); /* INV_PXCK as default (most display sample data on rising edge) */ - if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)) + if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)) pol |= DCU_SYN_POL_INV_PXCK; if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 1787e1299b1b..60de05f3fa60 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -46,6 +46,7 @@ i915-y := i915_drv.o \ i915_sw_fence.o \ i915_syncmap.o \ i915_sysfs.o \ + i915_user_extensions.o \ intel_csr.o \ intel_device_info.o \ intel_pm.o \ @@ -56,6 +57,15 @@ i915-$(CONFIG_COMPAT) += i915_ioc32.o i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o +# Test the headers are compilable as standalone units +i915-$(CONFIG_DRM_I915_WERROR) += \ + test_i915_active_types_standalone.o \ + test_i915_gem_context_types_standalone.o \ + test_i915_timeline_types_standalone.o \ + test_intel_context_types_standalone.o \ + test_intel_engine_types_standalone.o \ + test_intel_workarounds_types_standalone.o + # GEM code i915-y += \ i915_active.o \ @@ -77,6 +87,7 @@ i915-y += \ i915_gem_tiling.o \ i915_gem_userptr.o \ i915_gemfs.o \ + i915_globals.o \ i915_query.o \ i915_request.o \ i915_scheduler.o \ @@ -84,6 +95,7 @@ i915-y += \ i915_trace_points.o \ i915_vma.o \ intel_breadcrumbs.o \ + intel_context.o \ intel_engine_cs.o \ intel_hangcheck.o \ intel_lrc.o \ diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 35b4ec3f7618..cf4a1ecf6853 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -391,12 +391,12 @@ struct cmd_info { #define F_POST_HANDLE (1<<2) u32 flag; -#define R_RCS (1 << RCS) -#define R_VCS1 (1 << VCS) -#define R_VCS2 (1 << VCS2) +#define R_RCS BIT(RCS0) +#define R_VCS1 BIT(VCS0) +#define R_VCS2 BIT(VCS1) #define R_VCS (R_VCS1 | R_VCS2) -#define R_BCS (1 << BCS) -#define R_VECS (1 << VECS) +#define R_BCS BIT(BCS0) +#define R_VECS BIT(VECS0) #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) /* rings that support this cmd: BLT/RCS/VCS/VECS */ u16 rings; @@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = { }; static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { - [RCS] = { + [RCS0] = { &decode_info_mi, NULL, NULL, @@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VCS] = { + [VCS0] = { &decode_info_mi, NULL, NULL, @@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [BCS] = { + [BCS0] = { &decode_info_mi, NULL, &decode_info_2d, @@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VECS] = { + [VECS0] = { &decode_info_mi, NULL, NULL, @@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { NULL, }, - [VCS2] = { + [VCS1] = { &decode_info_mi, NULL, NULL, @@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e; hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) { - if ((opcode == e->info->opcode) && - (e->info->rings & (1 << ring_id))) + if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) return e->info; } return NULL; @@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s) struct intel_gvt *gvt = s->vgpu->gvt; for (i = 1; i < cmd_len; i += 2) { - if (IS_BROADWELL(gvt->dev_priv) && - (s->ring_id != RCS)) { - if (s->ring_id == BCS && - cmd_reg(s, i) == - i915_mmio_reg_offset(DERRMR)) + if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { + if (s->ring_id == BCS0 && + cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR)) ret |= 0; else - ret |= (cmd_reg_inhibit(s, i)) ? - -EBADRQC : 0; + ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0; } if (ret) break; @@ -1047,27 +1043,27 @@ struct cmd_interrupt_event { }; static struct cmd_interrupt_event cmd_interrupt_events[] = { - [RCS] = { + [RCS0] = { .pipe_control_notify = RCS_PIPE_CONTROL, .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, .mi_user_interrupt = RCS_MI_USER_INTERRUPT, }, - [BCS] = { + [BCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = BCS_MI_FLUSH_DW, .mi_user_interrupt = BCS_MI_USER_INTERRUPT, }, - [VCS] = { + [VCS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS_MI_FLUSH_DW, .mi_user_interrupt = VCS_MI_USER_INTERRUPT, }, - [VCS2] = { + [VCS1] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VCS2_MI_FLUSH_DW, .mi_user_interrupt = VCS2_MI_USER_INTERRUPT, }, - [VECS] = { + [VECS0] = { .pipe_control_notify = INTEL_GVT_EVENT_RESERVED, .mi_flush_dw = VECS_MI_FLUSH_DW, .mi_user_interrupt = VECS_MI_USER_INTERRUPT, diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 3e7e2b80c857..f27edf17b4ab 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -153,7 +153,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return NULL; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 70494e394d2c..1a93472cb34e 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -47,17 +47,16 @@ ((a)->lrca == (b)->lrca)) static int context_switch_events[] = { - [RCS] = RCS_AS_CONTEXT_SWITCH, - [BCS] = BCS_AS_CONTEXT_SWITCH, - [VCS] = VCS_AS_CONTEXT_SWITCH, - [VCS2] = VCS2_AS_CONTEXT_SWITCH, - [VECS] = VECS_AS_CONTEXT_SWITCH, + [RCS0] = RCS_AS_CONTEXT_SWITCH, + [BCS0] = BCS_AS_CONTEXT_SWITCH, + [VCS0] = VCS_AS_CONTEXT_SWITCH, + [VCS1] = VCS2_AS_CONTEXT_SWITCH, + [VECS0] = VECS_AS_CONTEXT_SWITCH, }; -static int ring_id_to_context_switch_event(int ring_id) +static int ring_id_to_context_switch_event(unsigned int ring_id) { - if (WARN_ON(ring_id < RCS || - ring_id >= ARRAY_SIZE(context_switch_events))) + if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events))) return -EINVAL; return context_switch_events[ring_id]; @@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) + if (workload->status || (vgpu->resetting_eng & BIT(ring_id))) goto out; if (!list_empty(workload_q_head(vgpu, ring_id))) { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index bc64b810e0d5..dbc749617922 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, } else { if (data & GEN6_GRDOM_RENDER) { gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); - engine_mask |= (1 << RCS); + engine_mask |= BIT(RCS0); } if (data & GEN6_GRDOM_MEDIA) { gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); - engine_mask |= (1 << VCS); + engine_mask |= BIT(VCS0); } if (data & GEN6_GRDOM_BLT) { gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); - engine_mask |= (1 << BCS); + engine_mask |= BIT(BCS0); } if (data & GEN6_GRDOM_VECS) { gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); - engine_mask |= (1 << VECS); + engine_mask |= BIT(VECS0); } if (data & GEN8_GRDOM_MEDIA2) { gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); - if (HAS_BSD2(vgpu->gvt->dev_priv)) - engine_mask |= (1 << VCS2); + engine_mask |= BIT(VCS1); } + engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; } /* vgpu_lock already hold by emulate mmio r/w */ @@ -1704,7 +1704,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; ret = intel_vgpu_select_submission_ops(vgpu, - ENGINE_MASK(ring_id), + BIT(ring_id), INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; @@ -1724,19 +1724,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu, switch (offset) { case 0x4260: - id = RCS; + id = RCS0; break; case 0x4264: - id = VCS; + id = VCS0; break; case 0x4268: - id = VCS2; + id = VCS1; break; case 0x426c: - id = BCS; + id = BCS0; break; case 0x4270: - id = VECS; + id = VECS0; break; default: return -EINVAL; @@ -1793,7 +1793,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ - if (HAS_BSD2(dev_priv)) \ + if (HAS_ENGINE(dev_priv, VCS1)) \ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) @@ -1848,7 +1848,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL); MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL); - MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); + MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL); MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL); MMIO_D(GEN7_CXT_SIZE, D_ALL); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 67125c5eec6e..951681813230 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -536,7 +536,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); - if (HAS_BSD2(gvt->dev_priv)) { + if (HAS_ENGINE(gvt->dev_priv, VCS1)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 7d84cfb9051a..76630fbe51b6 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -41,102 +41,102 @@ /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { - {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ - {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ - {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ - {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ - {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ - {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ - {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ - {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ - - {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ - {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ - {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ - {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ - {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ + {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ + {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ + {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ + {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ + {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ + {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ + {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ + {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ + + {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ + {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ + {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ + {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ + {BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ + {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { - {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ - {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ - {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ - {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ - {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ - {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ - {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ - {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ - {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ - - {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ - {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ - {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ - {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ - {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ - {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ - {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ - {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ - {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ - {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ - {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ - {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ - {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ - {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ - {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ - {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */ - {RCS, TRVADR, 0, false}, /* 0x4df0 */ - {RCS, TRTTE, 0, false}, /* 0x4df4 */ - - {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ - {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ - {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ - {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ - {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - - {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ - - {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ - - {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ - {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ - {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ - {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ - - {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ - {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ - - {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ - {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ - {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ - {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ + {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ + {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ + {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ + {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ + {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ + {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ + {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ + {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ + {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ + + {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ + {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ + {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ + {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ + {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ + {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */ + {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ + {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */ + {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ + {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ + {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ + {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ + {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ + {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ + {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */ + {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */ + {RCS0, TRVADR, 0, false}, /* 0x4df0 */ + {RCS0, TRTTE, 0, false}, /* 0x4df4 */ + + {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ + {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ + {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ + {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ + {BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ + + {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ + + {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ + + {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ + {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ + {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ + {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ + + {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ + {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ + + {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ + {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ + {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ + {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct { @@ -149,11 +149,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) { i915_reg_t offset; u32 regs[] = { - [RCS] = 0xc800, - [VCS] = 0xc900, - [VCS2] = 0xca00, - [BCS] = 0xcc00, - [VECS] = 0xcb00, + [RCS0] = 0xc800, + [VCS0] = 0xc900, + [VCS1] = 0xca00, + [BCS0] = 0xcc00, + [VECS0] = 0xcb00, }; int ring_id, i; @@ -301,7 +301,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, goto out; /* no MOCS register in context except render engine */ - if (req->engine->id != RCS) + if (req->engine->id != RCS0) goto out; ret = restore_render_mocs_control_for_inhibit(vgpu, req); @@ -327,15 +327,16 @@ out: static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct intel_uncore *uncore = &dev_priv->uncore; struct intel_vgpu_submission *s = &vgpu->submission; enum forcewake_domains fw; i915_reg_t reg; u32 regs[] = { - [RCS] = 0x4260, - [VCS] = 0x4264, - [VCS2] = 0x4268, - [BCS] = 0x426c, - [VECS] = 0x4270, + [RCS0] = 0x4260, + [VCS0] = 0x4264, + [VCS1] = 0x4268, + [BCS0] = 0x426c, + [VECS0] = 0x4270, }; if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) @@ -351,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) * otherwise device can go to RC6 state and interrupt invalidation * process */ - fw = intel_uncore_forcewake_for_reg(dev_priv, reg, + fw = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9)) + if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) fw |= FORCEWAKE_RENDER; - intel_uncore_forcewake_get(dev_priv, fw); + intel_uncore_forcewake_get(uncore, fw); - I915_WRITE_FW(reg, 0x1); + intel_uncore_write_fw(uncore, reg, 0x1); - if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) + if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50)) gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); else vgpu_vreg_t(vgpu, reg) = 0; - intel_uncore_forcewake_put(dev_priv, fw); + intel_uncore_forcewake_put(uncore, fw); gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); } @@ -378,11 +379,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, u32 old_v, new_v; u32 regs[] = { - [RCS] = 0xc800, - [VCS] = 0xc900, - [VCS2] = 0xca00, - [BCS] = 0xcc00, - [VECS] = 0xcb00, + [RCS0] = 0xc800, + [VCS0] = 0xc900, + [VCS1] = 0xca00, + [BCS0] = 0xcc00, + [VECS0] = 0xcb00, }; int i; @@ -390,8 +391,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv) - || IS_COFFEELAKE(dev_priv)) && ring_id == RCS) + if (ring_id == RCS0 && + (IS_KABYLAKE(dev_priv) || + IS_BROXTON(dev_priv) || + IS_COFFEELAKE(dev_priv))) return; if (!pre && !gen9_render_mocs.initialized) @@ -414,7 +417,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, offset.reg += 4; } - if (ring_id == RCS) { + if (ring_id == RCS0) { l3_offset.reg = 0xb020; for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { if (pre) @@ -492,7 +495,8 @@ static void switch_mmio(struct intel_vgpu *pre, * itself. */ if (mmio->in_context && - !is_inhibit_context(&s->shadow_ctx->__engine[ring_id])) + !is_inhibit_context(intel_context_lookup(s->shadow_ctx, + dev_priv->engine[ring_id]))) continue; if (mmio->mask) @@ -549,9 +553,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, * performace for batch mmio read/write, so we need * handle forcewake mannually. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); switch_mmio(pre, next, ring_id); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } /** diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1bb8f936fdaa..3faf2438b9bc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, i915_mmio_reg_offset(EU_PERF_CNTL6), }; - if (workload->ring_id != RCS) + if (workload->ring_id != RCS0) return; if (save) { @@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) COPY_REG_MASKED(ctx_ctrl); COPY_REG(ctx_timestamp); - if (ring_id == RCS) { + if (ring_id == RCS0) { COPY_REG(bb_per_ctx_ptr); COPY_REG(rcs_indirect_ctx); COPY_REG(rcs_indirect_ctx_offset); @@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0) context_page_num = 19; i = 2; @@ -440,8 +440,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) if (ret) goto err_unpin; - if ((workload->ring_id == RCS) && - (workload->wa_ctx.indirect_ctx.size != 0)) { + if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) goto err_shadow; @@ -791,7 +790,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) context_page_num = rq->engine->context_size; context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) + if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0) context_page_num = 19; i = 2; @@ -891,8 +890,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) workload->status = 0; } - if (!workload->status && !(vgpu->resetting_eng & - ENGINE_MASK(ring_id))) { + if (!workload->status && + !(vgpu->resetting_eng & BIT(ring_id))) { update_guest_context(workload); for_each_set_bit(event, workload->pending_events, @@ -915,7 +914,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { + if (workload->status || vgpu->resetting_eng & BIT(ring_id)) { /* if workload->status is not successful means HW GPU * has occurred GPU hang or something wrong with i915/GVT, * and GVT won't inject context switch interrupt to guest. @@ -929,7 +928,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) * cleaned up during the resetting process later, so doing * the workload clean up here doesn't have any impact. **/ - intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id)); + intel_vgpu_clean_workloads(vgpu, BIT(ring_id)); } workload->complete(workload); @@ -989,7 +988,7 @@ static int workload_thread(void *priv) workload->ring_id, workload); if (need_force_wake) - intel_uncore_forcewake_get(gvt->dev_priv, + intel_uncore_forcewake_get(&gvt->dev_priv->uncore, FORCEWAKE_ALL); ret = dispatch_workload(workload); @@ -1011,7 +1010,7 @@ complete: complete_current_workload(gvt, ring_id); if (need_force_wake) - intel_uncore_forcewake_put(gvt->dev_priv, + intel_uncore_forcewake_put(&gvt->dev_priv->uncore, FORCEWAKE_ALL); intel_runtime_pm_put_unchecked(gvt->dev_priv); @@ -1102,9 +1101,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; int i; - if (i915_vm_is_48bit(&i915_ppgtt->vm)) + if (i915_vm_is_4lvl(&i915_ppgtt->vm)) { px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; - else { + } else { for (i = 0; i < GEN8_3LVL_PDPES; i++) px_dma(i915_ppgtt->pdp.page_directory[i]) = s->i915_context_pdps[i]; @@ -1155,7 +1154,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; int i; - if (i915_vm_is_48bit(&i915_ppgtt->vm)) + if (i915_vm_is_4lvl(&i915_ppgtt->vm)) s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); else { for (i = 0; i < GEN8_3LVL_PDPES; i++) @@ -1438,7 +1437,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, workload->rb_start = start; workload->rb_ctl = ctl; - if (ring_id == RCS) { + if (ring_id == RCS0) { intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 720e2b10adaa..314e40121e47 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id; - vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; + vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT; diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 215b6ff8aa73..863ae12707ba 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_active.h" +#include "i915_globals.h" #define BKL(ref) (&(ref)->i915->drm.struct_mutex) @@ -17,6 +18,7 @@ * nodes from a local slab cache to hopefully reduce the fragmentation. */ static struct i915_global_active { + struct i915_global base; struct kmem_cache *slab_cache; } global; @@ -163,17 +165,25 @@ int i915_active_ref(struct i915_active *ref, struct i915_request *rq) { struct i915_active_request *active; + int err = 0; + + /* Prevent reaping in case we malloc/wait while building the tree */ + i915_active_acquire(ref); active = active_instance(ref, timeline); - if (IS_ERR(active)) - return PTR_ERR(active); + if (IS_ERR(active)) { + err = PTR_ERR(active); + goto out; + } if (!i915_active_request_isset(active)) ref->count++; __i915_active_request_set(active, rq); GEM_BUG_ON(!ref->count); - return 0; +out: + i915_active_release(ref); + return err; } bool i915_active_acquire(struct i915_active *ref) @@ -223,19 +233,25 @@ int i915_request_await_active_request(struct i915_request *rq, int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) { struct active_node *it, *n; - int ret; + int err = 0; - ret = i915_request_await_active_request(rq, &ref->last); - if (ret) - return ret; + /* await allocates and so we need to avoid hitting the shrinker */ + if (i915_active_acquire(ref)) + goto out; /* was idle */ + + err = i915_request_await_active_request(rq, &ref->last); + if (err) + goto out; rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - ret = i915_request_await_active_request(rq, &it->base); - if (ret) - return ret; + err = i915_request_await_active_request(rq, &it->base); + if (err) + goto out; } - return 0; +out: + i915_active_release(ref); + return err; } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) @@ -271,16 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active, #include "selftests/i915_active.c" #endif +static void i915_global_active_shrink(void) +{ + kmem_cache_shrink(global.slab_cache); +} + +static void i915_global_active_exit(void) +{ + kmem_cache_destroy(global.slab_cache); +} + +static struct i915_global_active global = { { + .shrink = i915_global_active_shrink, + .exit = i915_global_active_exit, +} }; + int __init i915_global_active_init(void) { global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); if (!global.slab_cache) return -ENOMEM; + i915_global_register(&global.base); return 0; } - -void __exit i915_global_active_exit(void) -{ - kmem_cache_destroy(global.slab_cache); -} diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index 12b5c1d287d1..7d758719ce39 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active, active->retire = fn ?: i915_active_retire_noop; } -static inline struct i915_request * -__i915_active_request_peek(const struct i915_active_request *active) -{ - /* - * Inside the error capture (running with the driver in an unknown - * state), we want to bend the rules slightly (a lot). - * - * Work is in progress to make it safer, in the meantime this keeps - * the known issue from spamming the logs. - */ - return rcu_dereference_protected(active->request, 1); -} - /** * i915_active_request_raw - return the active request * @active - the active tracker @@ -419,7 +406,4 @@ void i915_active_fini(struct i915_active *ref); static inline void i915_active_fini(struct i915_active *ref) { } #endif -int i915_global_active_init(void); -void i915_global_active_exit(void); - #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 33e8eed64423..503d548a55f7 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -868,8 +868,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) if (!IS_GEN(engine->i915, 7)) return; - switch (engine->id) { - case RCS: + switch (engine->class) { + case RENDER_CLASS: if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = @@ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; - case VCS: + case VIDEO_DECODE_CLASS: cmd_tables = gen7_video_cmds; cmd_table_count = ARRAY_SIZE(gen7_video_cmds); engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; - case BCS: + case COPY_ENGINE_CLASS: if (IS_HASWELL(engine->i915)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); @@ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine) engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; - case VECS: + case VIDEO_ENHANCEMENT_CLASS: cmd_tables = hsw_vebox_cmds; cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); /* VECS can use the same length_mask function as VCS */ engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; default: - MISSING_CASE(engine->id); + MISSING_CASE(engine->class); return; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 852ff74ef76d..652f65d2e131 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -388,12 +388,9 @@ static void print_context_stats(struct seq_file *m, struct i915_gem_context *ctx; list_for_each_entry(ctx, &i915->contexts.list, link) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - struct intel_context *ce = to_intel_context(ctx, engine); + struct intel_context *ce; + list_for_each_entry(ce, &ctx->active_engines, active_link) { if (ce->state) per_file_stats(0, ce->state->obj, &kstats); if (ce->ring) @@ -412,9 +409,8 @@ static void print_context_stats(struct seq_file *m, rcu_read_lock(); task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID); - snprintf(name, sizeof(name), "%s/%d", - task ? task->comm : "<unknown>", - ctx->user_handle); + snprintf(name, sizeof(name), "%s", + task ? task->comm : "<unknown>"); rcu_read_unlock(); print_file_stats(m, name, stats); @@ -884,7 +880,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) for_each_engine(engine, dev_priv, id) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", - engine->name, I915_READ_IMR(engine)); + engine->name, ENGINE_READ(engine, RING_IMR)); } } @@ -1097,7 +1093,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } /* RPSTAT1 is in the GT power well */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); reqf = I915_READ(GEN6_RPNSWREQ); if (INTEL_GEN(dev_priv) >= 9) @@ -1125,7 +1121,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) cagf = intel_gpu_freq(dev_priv, intel_get_cagf(dev_priv, rpstat)); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); if (INTEL_GEN(dev_priv) >= 11) { pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); @@ -1281,14 +1277,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_wakeref_t wakeref; enum intel_engine_id id; + seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags); if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) - seq_puts(m, "Wedged\n"); + seq_puts(m, "\tWedged\n"); if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) - seq_puts(m, "Reset in progress: struct_mutex backoff\n"); - if (waitqueue_active(&dev_priv->gpu_error.wait_queue)) - seq_puts(m, "Waiter holding struct mutex\n"); - if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) - seq_puts(m, "struct_mutex blocked for reset\n"); + seq_puts(m, "\tDevice (global) reset in progress\n"); if (!i915_modparams.enable_hangcheck) { seq_puts(m, "Hangcheck disabled\n"); @@ -1298,10 +1291,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) with_intel_runtime_pm(dev_priv, wakeref) { for_each_engine(engine, dev_priv, id) { acthd[id] = intel_engine_get_active_head(engine); - seqno[id] = intel_engine_get_seqno(engine); + seqno[id] = intel_engine_get_hangcheck_seqno(engine); } - intel_engine_get_instdone(dev_priv->engine[RCS], &instdone); + intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone); } if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer)) @@ -1318,8 +1311,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) { seq_printf(m, "%s:\n", engine->name); seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n", - engine->hangcheck.seqno, seqno[id], - intel_engine_last_submit(engine), + engine->hangcheck.last_seqno, + seqno[id], + engine->hangcheck.next_seqno, jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); @@ -1327,7 +1321,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) (long long)engine->hangcheck.acthd, (long long)acthd[id]); - if (engine->id == RCS) { + if (engine->id == RCS0) { seq_puts(m, "\tinstdone read =\n"); i915_instdone_info(dev_priv, m, &instdone); @@ -1419,13 +1413,14 @@ static int ironlake_drpc_info(struct seq_file *m) static int i915_forcewake_domains(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_uncore *uncore = &i915->uncore; struct intel_uncore_forcewake_domain *fw_domain; unsigned int tmp; seq_printf(m, "user.bypass_count = %u\n", - i915->uncore.user_forcewake.count); + uncore->user_forcewake.count); - for_each_fw_domain(fw_domain, i915, tmp) + for_each_fw_domain(fw_domain, uncore, tmp) seq_printf(m, "%s.wake_count = %u\n", intel_uncore_forcewake_domain_to_str(fw_domain->id), READ_ONCE(fw_domain->wake_count)); @@ -1882,9 +1877,7 @@ static int i915_context_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; - struct intel_engine_cs *engine; struct i915_gem_context *ctx; - enum intel_engine_id id; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -1892,6 +1885,8 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + struct intel_context *ce; + seq_puts(m, "HW context "); if (!list_empty(&ctx->hw_id_link)) seq_printf(m, "%x [pin %u]", ctx->hw_id, @@ -1914,11 +1909,8 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_putc(m, ctx->remap_slice ? 'R' : 'r'); seq_putc(m, '\n'); - for_each_engine(engine, dev_priv, id) { - struct intel_context *ce = - to_intel_context(ctx, engine); - - seq_printf(m, "%s: ", engine->name); + list_for_each_entry(ce, &ctx->active_engines, active_link) { + seq_printf(m, "%s: ", ce->engine->name); if (ce->state) describe_obj(m, ce->state->obj); if (ce->ring) @@ -2023,11 +2015,9 @@ static const char *rps_power_to_str(unsigned int power) static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; struct intel_rps *rps = &dev_priv->gt_pm.rps; u32 act_freq = rps->cur_freq; intel_wakeref_t wakeref; - struct drm_file *file; with_intel_runtime_pm_if_in_use(dev_priv, wakeref) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -2061,22 +2051,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_gpu_freq(dev_priv, rps->efficient_freq), intel_gpu_freq(dev_priv, rps->boost_freq)); - mutex_lock(&dev->filelist_mutex); - list_for_each_entry_reverse(file, &dev->filelist, lhead) { - struct drm_i915_file_private *file_priv = file->driver_priv; - struct task_struct *task; - - rcu_read_lock(); - task = pid_task(file->pid, PIDTYPE_PID); - seq_printf(m, "%s [%d]: %d boosts\n", - task ? task->comm : "<unknown>", - task ? task->pid : -1, - atomic_read(&file_priv->rps_client.boosts)); - rcu_read_unlock(); - } - seq_printf(m, "Kernel (anonymous) boosts: %d\n", - atomic_read(&rps->boosts)); - mutex_unlock(&dev->filelist_mutex); + seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && @@ -2084,12 +2059,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) u32 rpup, rpupei; u32 rpdown, rpdownei; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", rps_power_to_str(rps->power.mode)); @@ -2607,7 +2582,6 @@ static int i915_edp_psr_debug_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; - struct drm_modeset_acquire_ctx ctx; intel_wakeref_t wakeref; int ret; @@ -2618,18 +2592,7 @@ i915_edp_psr_debug_set(void *data, u64 val) wakeref = intel_runtime_pm_get(dev_priv); - drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); - -retry: - ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val); - if (ret == -EDEADLK) { - ret = drm_modeset_backoff(&ctx); - if (!ret) - goto retry; - } - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); + ret = intel_psr_debug_set(dev_priv, val); intel_runtime_pm_put(dev_priv, wakeref); @@ -2686,8 +2649,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) seq_printf(m, "Runtime power status: %s\n", enableddisabled(!dev_priv->power_domains.wakeref)); - seq_printf(m, "GPU idle: %s (epoch %u)\n", - yesno(!dev_priv->gt.awake), dev_priv->gt.epoch); + seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); seq_printf(m, "IRQs disabled: %s\n", yesno(!intel_irqs_enabled(dev_priv))); #ifdef CONFIG_PM @@ -3122,8 +3084,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(dev_priv); - seq_printf(m, "GT awake? %s (epoch %u)\n", - yesno(dev_priv->gt.awake), dev_priv->gt.epoch); + seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake)); seq_printf(m, "Global active requests: %d\n", dev_priv->gt.active_requests); seq_printf(m, "CS timestamp frequency: %u kHz\n", @@ -3210,7 +3171,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) static int i915_wa_registers(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); - const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list; + const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list; struct i915_wa *wa; unsigned int i; @@ -3864,11 +3825,18 @@ static const struct file_operations i915_cur_wm_latency_fops = { static int i915_wedged_get(void *data, u64 *val) { - struct drm_i915_private *dev_priv = data; + int ret = i915_terminally_wedged(data); - *val = i915_terminally_wedged(&dev_priv->gpu_error); - - return 0; + switch (ret) { + case -EIO: + *val = 1; + return 0; + case 0: + *val = 0; + return 0; + default: + return ret; + } } static int @@ -3876,16 +3844,9 @@ i915_wedged_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - /* - * There is no safeguard against this debugfs entry colliding - * with the hangcheck calling same i915_handle_error() in - * parallel, causing an explosion. For now we assume that the - * test harness is responsible enough not to inject gpu hangs - * while it is writing to 'i915_wedged' - */ - - if (i915_reset_backoff(&i915->gpu_error)) - return -EAGAIN; + /* Flush any previous reset before applying for a new one */ + wait_event(i915->gpu_error.reset_queue, + !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)); i915_handle_error(i915, val, I915_ERROR_CAPTURE, "Manually set wedged engine mask = %llx", val); @@ -3926,12 +3887,9 @@ static int i915_drop_caches_set(void *data, u64 val) { struct drm_i915_private *i915 = data; - intel_wakeref_t wakeref; - int ret = 0; DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", val, val & DROP_ALL); - wakeref = intel_runtime_pm_get(i915); if (val & DROP_RESET_ACTIVE && wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) @@ -3940,9 +3898,11 @@ i915_drop_caches_set(void *data, u64 val) /* No need to check and wait for gpu resets, only libdrm auto-restarts * on ioctls on -EAGAIN. */ if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) { + int ret; + ret = mutex_lock_interruptible(&i915->drm.struct_mutex); if (ret) - goto out; + return ret; if (val & DROP_ACTIVE) ret = i915_gem_wait_for_idle(i915, @@ -3956,7 +3916,7 @@ i915_drop_caches_set(void *data, u64 val) mutex_unlock(&i915->drm.struct_mutex); } - if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error)) + if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915)) i915_handle_error(i915, ALL_ENGINES, 0, NULL); fs_reclaim_acquire(GFP_KERNEL); @@ -3981,10 +3941,7 @@ i915_drop_caches_set(void *data, u64 val) if (val & DROP_FREED) i915_gem_drain_freed_objects(i915); -out: - intel_runtime_pm_put(i915, wakeref); - - return ret; + return 0; } DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, @@ -4292,7 +4249,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) return 0; file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915); - intel_uncore_forcewake_user_get(i915); + intel_uncore_forcewake_user_get(&i915->uncore); return 0; } @@ -4304,7 +4261,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) if (INTEL_GEN(i915) < 6) return 0; - intel_uncore_forcewake_user_put(i915); + intel_uncore_forcewake_user_put(&i915->uncore); intel_runtime_pm_put(i915, (intel_wakeref_t)(uintptr_t)file->private_data); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6630212f2faf..bbe1a5d56480 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -188,6 +188,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_CNP; + case INTEL_PCH_CMP_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n"); + WARN_ON(!IS_COFFEELAKE(dev_priv)); + /* CometPoint is CNP Compatible */ + return PCH_CNP; case INTEL_PCH_ICP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Ice Lake PCH\n"); WARN_ON(!IS_ICELAKE(dev_priv)); @@ -219,20 +224,20 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv) * make an educated guess as to which PCH is really there. */ - if (IS_GEN(dev_priv, 5)) - id = INTEL_PCH_IBX_DEVICE_ID_TYPE; - else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) - id = INTEL_PCH_CPT_DEVICE_ID_TYPE; + if (IS_ICELAKE(dev_priv)) + id = INTEL_PCH_ICP_DEVICE_ID_TYPE; + else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) + id = INTEL_PCH_CNP_DEVICE_ID_TYPE; + else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv)) + id = INTEL_PCH_SPT_DEVICE_ID_TYPE; else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) id = INTEL_PCH_LPT_DEVICE_ID_TYPE; - else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - id = INTEL_PCH_SPT_DEVICE_ID_TYPE; - else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) - id = INTEL_PCH_CNP_DEVICE_ID_TYPE; - else if (IS_ICELAKE(dev_priv)) - id = INTEL_PCH_ICP_DEVICE_ID_TYPE; + else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) + id = INTEL_PCH_CPT_DEVICE_ID_TYPE; + else if (IS_GEN(dev_priv, 5)) + id = INTEL_PCH_IBX_DEVICE_ID_TYPE; if (id) DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); @@ -330,16 +335,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = dev_priv->overlay ? 1 : 0; break; case I915_PARAM_HAS_BSD: - value = !!dev_priv->engine[VCS]; + value = !!dev_priv->engine[VCS0]; break; case I915_PARAM_HAS_BLT: - value = !!dev_priv->engine[BCS]; + value = !!dev_priv->engine[BCS0]; break; case I915_PARAM_HAS_VEBOX: - value = !!dev_priv->engine[VECS]; + value = !!dev_priv->engine[VECS0]; break; case I915_PARAM_HAS_BSD2: - value = !!dev_priv->engine[VCS2]; + value = !!dev_priv->engine[VCS1]; break; case I915_PARAM_HAS_LLC: value = HAS_LLC(dev_priv); @@ -348,10 +353,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, value = HAS_WT(dev_priv); break; case I915_PARAM_HAS_ALIASING_PPGTT: - value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL); + value = INTEL_PPGTT(dev_priv); break; case I915_PARAM_HAS_SEMAPHORES: - value = 0; + value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES); break; case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); @@ -714,8 +719,7 @@ static int i915_load_modeset_init(struct drm_device *dev) return 0; cleanup_gem: - if (i915_gem_suspend(dev_priv)) - DRM_ERROR("failed to idle hardware; continuing to unload!\n"); + i915_gem_suspend(dev_priv); i915_gem_fini(dev_priv); cleanup_modeset: intel_modeset_cleanup(dev); @@ -757,39 +761,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) return ret; } -#if !defined(CONFIG_VGA_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return 0; -} -#elif !defined(CONFIG_DUMMY_CONSOLE) -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - return -ENODEV; -} -#else -static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - DRM_INFO("Replacing VGA console driver\n"); - - console_lock(); - if (con_is_bound(&vga_con)) - ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); - if (ret == 0) { - ret = do_unregister_con_driver(&vga_con); - - /* Ignore "already unregistered". */ - if (ret == -ENODEV) - ret = 0; - } - console_unlock(); - - return ret; -} -#endif - static void intel_init_dpio(struct drm_i915_private *dev_priv) { /* @@ -906,6 +877,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->av_mutex); mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); + mutex_init(&dev_priv->hdcp_comp_mutex); i915_memcpy_init_early(dev_priv); intel_runtime_pm_init_early(dev_priv); @@ -963,46 +935,6 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) i915_engines_cleanup(dev_priv); } -static int i915_mmio_setup(struct drm_i915_private *dev_priv) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - int mmio_bar; - int mmio_size; - - mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0; - /* - * Before gen4, the registers and the GTT are behind different BARs. - * However, from gen4 onwards, the registers and the GTT are shared - * in the same BAR, so we want to restrict this ioremap from - * clobbering the GTT which we want ioremap_wc instead. Fortunately, - * the register BAR remains the same size for all the earlier - * generations up to Ironlake. - */ - if (INTEL_GEN(dev_priv) < 5) - mmio_size = 512 * 1024; - else - mmio_size = 2 * 1024 * 1024; - dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size); - if (dev_priv->regs == NULL) { - DRM_ERROR("failed to map registers\n"); - - return -EIO; - } - - /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev_priv); - - return 0; -} - -static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - - intel_teardown_mchbar(dev_priv); - pci_iounmap(pdev, dev_priv->regs); -} - /** * i915_driver_init_mmio - setup device MMIO * @dev_priv: device private @@ -1022,15 +954,16 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) if (i915_get_bridge_dev(dev_priv)) return -EIO; - ret = i915_mmio_setup(dev_priv); + ret = intel_uncore_init(&dev_priv->uncore); if (ret < 0) goto err_bridge; - intel_uncore_init(dev_priv); + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev_priv); intel_device_info_init_mmio(dev_priv); - intel_uncore_prune(dev_priv); + intel_uncore_prune(&dev_priv->uncore); intel_uc_init_mmio(dev_priv); @@ -1043,8 +976,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) return 0; err_uncore: - intel_uncore_fini(dev_priv); - i915_mmio_cleanup(dev_priv); + intel_teardown_mchbar(dev_priv); + intel_uncore_fini(&dev_priv->uncore); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1057,8 +990,8 @@ err_bridge: */ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) { - intel_uncore_fini(dev_priv); - i915_mmio_cleanup(dev_priv); + intel_teardown_mchbar(dev_priv); + intel_uncore_fini(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); } @@ -1067,110 +1000,180 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) intel_gvt_sanitize_options(dev_priv); } -static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank) +#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type + +static const char *intel_dram_type_str(enum intel_dram_type type) { - if (size == 0) - return I915_DRAM_RANK_INVALID; - if (rank == SKL_DRAM_RANK_SINGLE) - return I915_DRAM_RANK_SINGLE; - else if (rank == SKL_DRAM_RANK_DUAL) - return I915_DRAM_RANK_DUAL; + static const char * const str[] = { + DRAM_TYPE_STR(UNKNOWN), + DRAM_TYPE_STR(DDR3), + DRAM_TYPE_STR(DDR4), + DRAM_TYPE_STR(LPDDR3), + DRAM_TYPE_STR(LPDDR4), + }; + + if (type >= ARRAY_SIZE(str)) + type = INTEL_DRAM_UNKNOWN; - return I915_DRAM_RANK_INVALID; + return str[type]; } -static bool -skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width) +#undef DRAM_TYPE_STR + +static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) { - if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16) - return true; - else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32) - return true; - else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8) - return true; - else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16) - return true; + return dimm->ranks * 64 / (dimm->width ?: 1); +} - return false; +/* Returns total GB for the whole DIMM */ +static int skl_get_dimm_size(u16 val) +{ + return val & SKL_DRAM_SIZE_MASK; } -static int -skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val) +static int skl_get_dimm_width(u16 val) { - u32 tmp_l, tmp_s; - u32 s_val = val >> SKL_DRAM_S_SHIFT; + if (skl_get_dimm_size(val) == 0) + return 0; - if (!val) - return -EINVAL; + switch (val & SKL_DRAM_WIDTH_MASK) { + case SKL_DRAM_WIDTH_X8: + case SKL_DRAM_WIDTH_X16: + case SKL_DRAM_WIDTH_X32: + val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; + return 8 << val; + default: + MISSING_CASE(val); + return 0; + } +} + +static int skl_get_dimm_ranks(u16 val) +{ + if (skl_get_dimm_size(val) == 0) + return 0; - tmp_l = val & SKL_DRAM_SIZE_MASK; - tmp_s = s_val & SKL_DRAM_SIZE_MASK; + val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT; + + return val + 1; +} + +/* Returns total GB for the whole DIMM */ +static int cnl_get_dimm_size(u16 val) +{ + return (val & CNL_DRAM_SIZE_MASK) / 2; +} - if (tmp_l == 0 && tmp_s == 0) +static int cnl_get_dimm_width(u16 val) +{ + if (cnl_get_dimm_size(val) == 0) + return 0; + + switch (val & CNL_DRAM_WIDTH_MASK) { + case CNL_DRAM_WIDTH_X8: + case CNL_DRAM_WIDTH_X16: + case CNL_DRAM_WIDTH_X32: + val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT; + return 8 << val; + default: + MISSING_CASE(val); + return 0; + } +} + +static int cnl_get_dimm_ranks(u16 val) +{ + if (cnl_get_dimm_size(val) == 0) + return 0; + + val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT; + + return val + 1; +} + +static bool +skl_is_16gb_dimm(const struct dram_dimm_info *dimm) +{ + /* Convert total GB to Gb per DRAM device */ + return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16; +} + +static void +skl_dram_get_dimm_info(struct drm_i915_private *dev_priv, + struct dram_dimm_info *dimm, + int channel, char dimm_name, u16 val) +{ + if (INTEL_GEN(dev_priv) >= 10) { + dimm->size = cnl_get_dimm_size(val); + dimm->width = cnl_get_dimm_width(val); + dimm->ranks = cnl_get_dimm_ranks(val); + } else { + dimm->size = skl_get_dimm_size(val); + dimm->width = skl_get_dimm_width(val); + dimm->ranks = skl_get_dimm_ranks(val); + } + + DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n", + channel, dimm_name, dimm->size, dimm->width, dimm->ranks, + yesno(skl_is_16gb_dimm(dimm))); +} + +static int +skl_dram_get_channel_info(struct drm_i915_private *dev_priv, + struct dram_channel_info *ch, + int channel, u32 val) +{ + skl_dram_get_dimm_info(dev_priv, &ch->dimm_l, + channel, 'L', val & 0xffff); + skl_dram_get_dimm_info(dev_priv, &ch->dimm_s, + channel, 'S', val >> 16); + + if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) { + DRM_DEBUG_KMS("CH%u not populated\n", channel); return -EINVAL; + } - ch->l_info.size = tmp_l; - ch->s_info.size = tmp_s; - - tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; - tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT; - ch->l_info.width = (1 << tmp_l) * 8; - ch->s_info.width = (1 << tmp_s) * 8; - - tmp_l = val & SKL_DRAM_RANK_MASK; - tmp_s = s_val & SKL_DRAM_RANK_MASK; - ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l); - ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s); - - if (ch->l_info.rank == I915_DRAM_RANK_DUAL || - ch->s_info.rank == I915_DRAM_RANK_DUAL) - ch->rank = I915_DRAM_RANK_DUAL; - else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE && - ch->s_info.rank == I915_DRAM_RANK_SINGLE) - ch->rank = I915_DRAM_RANK_DUAL; + if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2) + ch->ranks = 2; + else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1) + ch->ranks = 2; else - ch->rank = I915_DRAM_RANK_SINGLE; + ch->ranks = 1; - ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size, - ch->l_info.width) || - skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size, - ch->s_info.width); + ch->is_16gb_dimm = + skl_is_16gb_dimm(&ch->dimm_l) || + skl_is_16gb_dimm(&ch->dimm_s); - DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n", - ch->l_info.size, ch->l_info.width, - ch->l_info.rank ? "dual" : "single", - ch->s_info.size, ch->s_info.width, - ch->s_info.rank ? "dual" : "single"); + DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n", + channel, ch->ranks, yesno(ch->is_16gb_dimm)); return 0; } static bool -intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1, - struct dram_channel_info *ch0) +intel_is_dram_symmetric(const struct dram_channel_info *ch0, + const struct dram_channel_info *ch1) { - return (val_ch0 == val_ch1 && - (ch0->s_info.size == 0 || - (ch0->l_info.size == ch0->s_info.size && - ch0->l_info.width == ch0->s_info.width && - ch0->l_info.rank == ch0->s_info.rank))); + return !memcmp(ch0, ch1, sizeof(*ch0)) && + (ch0->dimm_s.size == 0 || + !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l))); } static int skl_dram_get_channels_info(struct drm_i915_private *dev_priv) { struct dram_info *dram_info = &dev_priv->dram_info; - struct dram_channel_info ch0, ch1; - u32 val_ch0, val_ch1; + struct dram_channel_info ch0 = {}, ch1 = {}; + u32 val; int ret; - val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); - ret = skl_dram_get_channel_info(&ch0, val_ch0); + val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); + ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val); if (ret == 0) dram_info->num_channels++; - val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); - ret = skl_dram_get_channel_info(&ch1, val_ch1); + val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN); + ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val); if (ret == 0) dram_info->num_channels++; @@ -1184,28 +1187,47 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv) * will be same as if single rank memory, so consider single rank * memory. */ - if (ch0.rank == I915_DRAM_RANK_SINGLE || - ch1.rank == I915_DRAM_RANK_SINGLE) - dram_info->rank = I915_DRAM_RANK_SINGLE; + if (ch0.ranks == 1 || ch1.ranks == 1) + dram_info->ranks = 1; else - dram_info->rank = max(ch0.rank, ch1.rank); + dram_info->ranks = max(ch0.ranks, ch1.ranks); - if (dram_info->rank == I915_DRAM_RANK_INVALID) { + if (dram_info->ranks == 0) { DRM_INFO("couldn't get memory rank information\n"); return -EINVAL; } dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm; - dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0, - val_ch1, - &ch0); + dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); - DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n", - dev_priv->dram_info.symmetric_memory ? "" : "not "); + DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n", + yesno(dram_info->symmetric_memory)); return 0; } +static enum intel_dram_type +skl_get_dram_type(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN); + + switch (val & SKL_DRAM_DDR_TYPE_MASK) { + case SKL_DRAM_DDR_TYPE_DDR3: + return INTEL_DRAM_DDR3; + case SKL_DRAM_DDR_TYPE_DDR4: + return INTEL_DRAM_DDR4; + case SKL_DRAM_DDR_TYPE_LPDDR3: + return INTEL_DRAM_LPDDR3; + case SKL_DRAM_DDR_TYPE_LPDDR4: + return INTEL_DRAM_LPDDR4; + default: + MISSING_CASE(val); + return INTEL_DRAM_UNKNOWN; + } +} + static int skl_get_dram_info(struct drm_i915_private *dev_priv) { @@ -1213,6 +1235,9 @@ skl_get_dram_info(struct drm_i915_private *dev_priv) u32 mem_freq_khz, val; int ret; + dram_info->type = skl_get_dram_type(dev_priv); + DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type)); + ret = skl_dram_get_channels_info(dev_priv); if (ret) return ret; @@ -1233,6 +1258,85 @@ skl_get_dram_info(struct drm_i915_private *dev_priv) return 0; } +/* Returns Gb per DRAM device */ +static int bxt_get_dimm_size(u32 val) +{ + switch (val & BXT_DRAM_SIZE_MASK) { + case BXT_DRAM_SIZE_4GBIT: + return 4; + case BXT_DRAM_SIZE_6GBIT: + return 6; + case BXT_DRAM_SIZE_8GBIT: + return 8; + case BXT_DRAM_SIZE_12GBIT: + return 12; + case BXT_DRAM_SIZE_16GBIT: + return 16; + default: + MISSING_CASE(val); + return 0; + } +} + +static int bxt_get_dimm_width(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return 0; + + val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; + + return 8 << val; +} + +static int bxt_get_dimm_ranks(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return 0; + + switch (val & BXT_DRAM_RANK_MASK) { + case BXT_DRAM_RANK_SINGLE: + return 1; + case BXT_DRAM_RANK_DUAL: + return 2; + default: + MISSING_CASE(val); + return 0; + } +} + +static enum intel_dram_type bxt_get_dimm_type(u32 val) +{ + if (!bxt_get_dimm_size(val)) + return INTEL_DRAM_UNKNOWN; + + switch (val & BXT_DRAM_TYPE_MASK) { + case BXT_DRAM_TYPE_DDR3: + return INTEL_DRAM_DDR3; + case BXT_DRAM_TYPE_LPDDR3: + return INTEL_DRAM_LPDDR3; + case BXT_DRAM_TYPE_DDR4: + return INTEL_DRAM_DDR4; + case BXT_DRAM_TYPE_LPDDR4: + return INTEL_DRAM_LPDDR4; + default: + MISSING_CASE(val); + return INTEL_DRAM_UNKNOWN; + } +} + +static void bxt_get_dimm_info(struct dram_dimm_info *dimm, + u32 val) +{ + dimm->width = bxt_get_dimm_width(val); + dimm->ranks = bxt_get_dimm_ranks(val); + + /* + * Size in register is Gb per DRAM device. Convert to total + * GB to match the way we report this for non-LP platforms. + */ + dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8; +} + static int bxt_get_dram_info(struct drm_i915_private *dev_priv) { @@ -1261,57 +1365,44 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv) * Now read each DUNIT8/9/10/11 to check the rank of each dimms. */ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) { - u8 size, width; - enum dram_rank rank; - u32 tmp; + struct dram_dimm_info dimm; + enum intel_dram_type type; val = I915_READ(BXT_D_CR_DRP0_DUNIT(i)); if (val == 0xFFFFFFFF) continue; dram_info->num_channels++; - tmp = val & BXT_DRAM_RANK_MASK; - - if (tmp == BXT_DRAM_RANK_SINGLE) - rank = I915_DRAM_RANK_SINGLE; - else if (tmp == BXT_DRAM_RANK_DUAL) - rank = I915_DRAM_RANK_DUAL; - else - rank = I915_DRAM_RANK_INVALID; - - tmp = val & BXT_DRAM_SIZE_MASK; - if (tmp == BXT_DRAM_SIZE_4GB) - size = 4; - else if (tmp == BXT_DRAM_SIZE_6GB) - size = 6; - else if (tmp == BXT_DRAM_SIZE_8GB) - size = 8; - else if (tmp == BXT_DRAM_SIZE_12GB) - size = 12; - else if (tmp == BXT_DRAM_SIZE_16GB) - size = 16; - else - size = 0; - - tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT; - width = (1 << tmp) * 8; - DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size, - width, rank == I915_DRAM_RANK_SINGLE ? "single" : - rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown"); + + bxt_get_dimm_info(&dimm, val); + type = bxt_get_dimm_type(val); + + WARN_ON(type != INTEL_DRAM_UNKNOWN && + dram_info->type != INTEL_DRAM_UNKNOWN && + dram_info->type != type); + + DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n", + i - BXT_D_CR_DRP0_DUNIT_START, + dimm.size, dimm.width, dimm.ranks, + intel_dram_type_str(type)); /* * If any of the channel is single rank channel, * worst case output will be same as if single rank * memory, so consider single rank memory. */ - if (dram_info->rank == I915_DRAM_RANK_INVALID) - dram_info->rank = rank; - else if (rank == I915_DRAM_RANK_SINGLE) - dram_info->rank = I915_DRAM_RANK_SINGLE; + if (dram_info->ranks == 0) + dram_info->ranks = dimm.ranks; + else if (dimm.ranks == 1) + dram_info->ranks = 1; + + if (type != INTEL_DRAM_UNKNOWN) + dram_info->type = type; } - if (dram_info->rank == I915_DRAM_RANK_INVALID) { - DRM_INFO("couldn't get memory rank information\n"); + if (dram_info->type == INTEL_DRAM_UNKNOWN || + dram_info->ranks == 0) { + DRM_INFO("couldn't get memory information\n"); return -EINVAL; } @@ -1323,14 +1414,8 @@ static void intel_get_dram_info(struct drm_i915_private *dev_priv) { struct dram_info *dram_info = &dev_priv->dram_info; - char bandwidth_str[32]; int ret; - dram_info->valid = false; - dram_info->rank = I915_DRAM_RANK_INVALID; - dram_info->bandwidth_kbps = 0; - dram_info->num_channels = 0; - /* * Assume 16Gb DIMMs are present until proven otherwise. * This is only used for the level 0 watermark latency @@ -1338,28 +1423,22 @@ intel_get_dram_info(struct drm_i915_private *dev_priv) */ dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) < 9) return; - /* Need to calculate bandwidth only for Gen9 */ - if (IS_BROXTON(dev_priv)) + if (IS_GEN9_LP(dev_priv)) ret = bxt_get_dram_info(dev_priv); - else if (IS_GEN(dev_priv, 9)) - ret = skl_get_dram_info(dev_priv); else - ret = skl_dram_get_channels_info(dev_priv); + ret = skl_get_dram_info(dev_priv); if (ret) return; - if (dram_info->bandwidth_kbps) - sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps); - else - sprintf(bandwidth_str, "unknown"); - DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n", - bandwidth_str, dram_info->num_channels); - DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n", - (dram_info->rank == I915_DRAM_RANK_DUAL) ? - "dual" : "single", yesno(dram_info->is_16gb_dimm)); + DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n", + dram_info->bandwidth_kbps, + dram_info->num_channels); + + DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n", + dram_info->ranks, yesno(dram_info->is_16gb_dimm)); } /** @@ -1381,7 +1460,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (HAS_PPGTT(dev_priv)) { if (intel_vgpu_active(dev_priv) && - !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) { + !intel_vgpu_has_full_ppgtt(dev_priv)) { i915_report_error(dev_priv, "incompatible vGPU found, support for isolated ppGTT required\n"); return -ENXIO; @@ -1420,7 +1499,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) goto err_ggtt; } - ret = i915_kick_out_vgacon(dev_priv); + ret = vga_remove_vgacon(pdev); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); goto err_ggtt; @@ -1786,8 +1865,7 @@ void i915_driver_unload(struct drm_device *dev) /* Flush any external code that still may be under the RCU lock */ synchronize_rcu(); - if (i915_gem_suspend(dev_priv)) - DRM_ERROR("failed to idle hardware; continuing to unload!\n"); + i915_gem_suspend(dev_priv); drm_atomic_helper_shutdown(dev); @@ -1895,7 +1973,6 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) static int i915_drm_prepare(struct drm_device *dev) { struct drm_i915_private *i915 = to_i915(dev); - int err; /* * NB intel_display_suspend() may issue new requests after we've @@ -1903,12 +1980,9 @@ static int i915_drm_prepare(struct drm_device *dev) * split out that work and pull it forward so that after point, * the GPU is not woken again. */ - err = i915_gem_suspend(i915); - if (err) - dev_err(&i915->drm.pdev->dev, - "GEM idle failed, suspend/resume might fail\n"); + i915_gem_suspend(i915); - return err; + return 0; } static int i915_drm_suspend(struct drm_device *dev) @@ -1978,7 +2052,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) i915_gem_suspend_late(dev_priv); - intel_uncore_suspend(dev_priv); + intel_uncore_suspend(&dev_priv->uncore); intel_power_domains_suspend(dev_priv, get_suspend_mode(dev_priv, hibernation)); @@ -2174,7 +2248,9 @@ static int i915_drm_resume_early(struct drm_device *dev) DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", ret); - intel_uncore_resume_early(dev_priv); + intel_uncore_resume_early(&dev_priv->uncore); + + i915_check_and_clear_faults(dev_priv); if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) { gen9_sanitize_dc_state(dev_priv); @@ -2578,7 +2654,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) if (!force_on) return 0; - err = intel_wait_for_register(dev_priv, + err = intel_wait_for_register(&dev_priv->uncore, VLV_GTLC_SURVIVABILITY_REG, VLV_GFX_CLK_STATUS_BIT, VLV_GFX_CLK_STATUS_BIT, @@ -2744,7 +2820,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_runtime_pm_disable_interrupts(dev_priv); - intel_uncore_suspend(dev_priv); + intel_uncore_suspend(&dev_priv->uncore); ret = 0; if (INTEL_GEN(dev_priv) >= 11) { @@ -2761,7 +2837,7 @@ static int intel_runtime_suspend(struct device *kdev) if (ret) { DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); - intel_uncore_runtime_resume(dev_priv); + intel_uncore_runtime_resume(&dev_priv->uncore); intel_runtime_pm_enable_interrupts(dev_priv); @@ -2778,7 +2854,7 @@ static int intel_runtime_suspend(struct device *kdev) enable_rpm_wakeref_asserts(dev_priv); intel_runtime_pm_cleanup(dev_priv); - if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) + if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore)) DRM_ERROR("Unclaimed access detected prior to suspending\n"); dev_priv->runtime_pm.suspended = true; @@ -2806,7 +2882,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D1); } - assert_forcewakes_inactive(dev_priv); + assert_forcewakes_inactive(&dev_priv->uncore); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_poll_init(dev_priv); @@ -2832,7 +2908,7 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); dev_priv->runtime_pm.suspended = false; - if (intel_uncore_unclaimed_mmio(dev_priv)) + if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); if (INTEL_GEN(dev_priv) >= 11) { @@ -2858,7 +2934,7 @@ static int intel_runtime_resume(struct device *kdev) ret = vlv_resume_prepare(dev_priv, true); } - intel_uncore_runtime_resume(dev_priv); + intel_uncore_runtime_resume(&dev_priv->uncore); intel_runtime_pm_enable_interrupts(dev_priv); @@ -3002,7 +3078,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9adc7bb9e69c..25c264e55d3c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -55,6 +55,7 @@ #include <drm/drm_util.h> #include <drm/drm_dsc.h> #include <drm/drm_connector.h> +#include <drm/i915_mei_hdcp_interface.h> #include "i915_fixed.h" #include "i915_params.h" @@ -91,8 +92,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20190207" -#define DRIVER_TIMESTAMP 1549572331 +#define DRIVER_DATE "20190328" +#define DRIVER_TIMESTAMP 1553776914 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -215,11 +216,12 @@ struct drm_i915_file_private { */ #define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20) } mm; + struct idr context_idr; + struct mutex context_idr_lock; /* guards context_idr */ - struct intel_rps_client { - atomic_t boosts; - } rps_client; + struct idr vm_idr; + struct mutex vm_idr_lock; /* guards vm_idr */ unsigned int bsd_engine; @@ -508,7 +510,7 @@ struct i915_psr { u32 debug; bool sink_support; - bool prepared, enabled; + bool enabled; struct intel_dp *dp; enum pipe pipe; bool active; @@ -526,16 +528,22 @@ struct i915_psr { u16 su_x_granularity; }; +/* + * Sorted by south display engine compatibility. + * If the new PCH comes with a south display engine that is not + * inherited from the latest item, please do not add it to the + * end. Instead, add it right after its "parent" PCH. + */ enum intel_pch { + PCH_NOP = -1, /* PCH without south display */ PCH_NONE = 0, /* No PCH present */ PCH_IBX, /* Ibexpeak PCH */ PCH_CPT, /* Cougarpoint/Pantherpoint PCH */ PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */ PCH_SPT, /* Sunrisepoint PCH */ PCH_KBP, /* Kaby Lake PCH */ - PCH_CNP, /* Cannon Lake PCH */ + PCH_CNP, /* Cannon/Comet Lake PCH */ PCH_ICP, /* Ice Lake PCH */ - PCH_NOP, /* PCH without south display */ }; enum intel_sbi_destination { @@ -949,6 +957,7 @@ struct ddi_vbt_port_info { #define HDMI_LEVEL_SHIFT_UNKNOWN 0xff u8 hdmi_level_shift; + u8 present:1; u8 supports_dvi:1; u8 supports_hdmi:1; u8 supports_dp:1; @@ -1009,6 +1018,7 @@ struct intel_vbt_data { enum psr_lines_to_wait lines_to_wait; int tp1_wakeup_time_us; int tp2_tp3_wakeup_time_us; + int psr2_tp2_tp3_wakeup_time_us; } psr; struct { @@ -1130,6 +1140,7 @@ struct skl_wm_level { u16 plane_res_b; u8 plane_res_l; bool plane_en; + bool ignore_lines; }; /* Stores plane specific WM parameters */ @@ -1200,7 +1211,11 @@ enum intel_pipe_crc_source { INTEL_PIPE_CRC_SOURCE_NONE, INTEL_PIPE_CRC_SOURCE_PLANE1, INTEL_PIPE_CRC_SOURCE_PLANE2, - INTEL_PIPE_CRC_SOURCE_PF, + INTEL_PIPE_CRC_SOURCE_PLANE3, + INTEL_PIPE_CRC_SOURCE_PLANE4, + INTEL_PIPE_CRC_SOURCE_PLANE5, + INTEL_PIPE_CRC_SOURCE_PLANE6, + INTEL_PIPE_CRC_SOURCE_PLANE7, INTEL_PIPE_CRC_SOURCE_PIPE, /* TV/DP on pre-gen5/vlv can't use the pipe source. */ INTEL_PIPE_CRC_SOURCE_TV, @@ -1468,13 +1483,6 @@ struct intel_cdclk_state { struct drm_i915_private { struct drm_device drm; - struct kmem_cache *objects; - struct kmem_cache *vmas; - struct kmem_cache *luts; - struct kmem_cache *requests; - struct kmem_cache *dependencies; - struct kmem_cache *priorities; - const struct intel_device_info __info; /* Use INTEL_INFO() to access. */ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */ struct intel_driver_caps caps; @@ -1503,8 +1511,6 @@ struct drm_i915_private { */ resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ - void __iomem *regs; - struct intel_uncore uncore; struct i915_virtual_gpu vgpu; @@ -1831,13 +1837,16 @@ struct drm_i915_private { bool valid; bool is_16gb_dimm; u8 num_channels; - enum dram_rank { - I915_DRAM_RANK_INVALID = 0, - I915_DRAM_RANK_SINGLE, - I915_DRAM_RANK_DUAL - } rank; + u8 ranks; u32 bandwidth_kbps; bool symmetric_memory; + enum intel_dram_type { + INTEL_DRAM_UNKNOWN, + INTEL_DRAM_DDR3, + INTEL_DRAM_DDR4, + INTEL_DRAM_LPDDR3, + INTEL_DRAM_LPDDR4 + } type; } dram_info; struct i915_runtime_pm runtime_pm; @@ -1997,6 +2006,7 @@ struct drm_i915_private { struct list_head hwsp_free_list; } timelines; + intel_engine_mask_t active_engines; struct list_head active_rings; struct list_head closed_vma; u32 active_requests; @@ -2011,12 +2021,6 @@ struct drm_i915_private { intel_wakeref_t awake; /** - * The number of times we have woken up. - */ - unsigned int epoch; -#define I915_EPOCH_INVALID 0 - - /** * We leave the user IRQ off as much as possible, * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to @@ -2039,6 +2043,14 @@ struct drm_i915_private { struct i915_vma *scratch; } gt; + /* For i945gm vblank irq vs. C3 workaround */ + struct { + struct work_struct work; + struct pm_qos_request pm_qos; + u8 c3_disable_latency; + u8 enabled; + } i945gm_vblank; + /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -2055,18 +2067,25 @@ struct drm_i915_private { struct i915_pmu pmu; + struct i915_hdcp_comp_master *hdcp_master; + bool hdcp_comp_added; + + /* Mutex to protect the above hdcp component related values. */ + struct mutex hdcp_comp_mutex; + /* * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch * will be rejected. Instead look for a better place. */ }; +struct dram_dimm_info { + u8 size, width, ranks; +}; + struct dram_channel_info { - struct info { - u8 size, width; - enum dram_rank rank; - } l_info, s_info; - enum dram_rank rank; + struct dram_dimm_info dimm_l, dimm_s; + u8 ranks; bool is_16gb_dimm; }; @@ -2095,6 +2114,11 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) return container_of(huc, struct drm_i915_private, huc); } +static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore) +{ + return container_of(uncore, struct drm_i915_private, uncore); +} + /* Simple iterator over all initialised engines */ #define for_each_engine(engine__, dev_priv__, id__) \ for ((id__) = 0; \ @@ -2104,7 +2128,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc) /* Iterator over subset of engines selected by mask */ #define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ - for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \ + for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \ (tmp__) ? \ ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \ 0;) @@ -2308,6 +2332,7 @@ static inline unsigned int i915_sg_segment_size(void) #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) +#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) @@ -2346,7 +2371,8 @@ static inline unsigned int i915_sg_segment_size(void) INTEL_DEVID(dev_priv) == 0x5915 || \ INTEL_DEVID(dev_priv) == 0x591E) #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ - INTEL_DEVID(dev_priv) == 0x87C0) + INTEL_DEVID(dev_priv) == 0x87C0 || \ + INTEL_DEVID(dev_priv) == 0x87CA) #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ INTEL_INFO(dev_priv)->gt == 2) #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ @@ -2425,24 +2451,19 @@ static inline unsigned int i915_sg_segment_size(void) #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) -#define ENGINE_MASK(id) BIT(id) -#define RENDER_RING ENGINE_MASK(RCS) -#define BSD_RING ENGINE_MASK(VCS) -#define BLT_RING ENGINE_MASK(BCS) -#define VEBOX_RING ENGINE_MASK(VECS) -#define BSD2_RING ENGINE_MASK(VCS2) -#define BSD3_RING ENGINE_MASK(VCS3) -#define BSD4_RING ENGINE_MASK(VCS4) -#define VEBOX2_RING ENGINE_MASK(VECS2) -#define ALL_ENGINES (~0) - -#define HAS_ENGINE(dev_priv, id) \ - (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id))) - -#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS) -#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2) -#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS) -#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS) +#define ALL_ENGINES (~0u) +#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) + +#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \ + unsigned int first__ = (first); \ + unsigned int count__ = (count); \ + (INTEL_INFO(dev_priv)->engine_mask & \ + GENMASK(first__ + count__ - 1, first__)) >> first__; \ +}) +#define VDBOX_MASK(dev_priv) \ + ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS) +#define VEBOX_MASK(dev_priv) \ + ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) #define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc) #define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop) @@ -2461,13 +2482,11 @@ static inline unsigned int i915_sg_segment_size(void) #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) -#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt) +#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) #define HAS_PPGTT(dev_priv) \ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE) #define HAS_FULL_PPGTT(dev_priv) \ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL) -#define HAS_FULL_48BIT_PPGTT(dev_priv) \ - (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL) #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \ GEM_BUG_ON((sizes) == 0); \ @@ -2511,6 +2530,7 @@ static inline unsigned int i915_sg_segment_size(void) #define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi) #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg) #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) +#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) #define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p) @@ -2557,6 +2577,7 @@ static inline unsigned int i915_sg_segment_size(void) #define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280 #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 +#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 @@ -2566,8 +2587,6 @@ static inline unsigned int i915_sg_segment_size(void) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) -#define HAS_PCH_CNP_LP(dev_priv) \ - (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) @@ -2799,8 +2818,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); -void *i915_gem_object_alloc(struct drm_i915_private *dev_priv); -void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); struct drm_i915_gem_object * @@ -2973,6 +2990,14 @@ i915_coherent_map_type(struct drm_i915_private *i915) void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type); +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size); +static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj) +{ + __i915_gem_object_flush_map(obj, 0, obj->base.size); +} + /** * i915_gem_object_unpin_map - releases an earlier mapping * @obj: the object to unmap @@ -3001,7 +3026,12 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } -int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); +static inline int __must_check +i915_mutex_lock_interruptible(struct drm_device *dev) +{ + return mutex_lock_interruptible(&dev->struct_mutex); +} + int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -3015,22 +3045,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); -struct i915_request * -i915_gem_find_active_request(struct intel_engine_cs *engine); - -static inline bool i915_reset_backoff(struct i915_gpu_error *error) -{ - return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); -} - -static inline bool i915_terminally_wedged(struct i915_gpu_error *error) +static inline bool __i915_wedged(struct i915_gpu_error *error) { return unlikely(test_bit(I915_WEDGED, &error->flags)); } -static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error) +static inline bool i915_reset_failed(struct drm_i915_private *i915) { - return i915_reset_backoff(error) | i915_terminally_wedged(error); + return __i915_wedged(&i915->gpu_error); } static inline u32 i915_reset_count(struct i915_gpu_error *error) @@ -3055,14 +3077,13 @@ void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, unsigned int flags, long timeout); -int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv); +void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); vm_fault_t i915_gem_fault(struct vm_fault *vmf); int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, - long timeout, - struct intel_rps_client *rps); + long timeout); int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); @@ -3105,7 +3126,6 @@ struct drm_i915_fence_reg * i915_reserve_fence(struct drm_i915_private *dev_priv); void i915_unreserve_fence(struct drm_i915_fence_reg *fence); -void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); void i915_gem_restore_fences(struct drm_i915_private *dev_priv); void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv); @@ -3141,7 +3161,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, + struct intel_context *ce, u32 *reg_state); /* i915_gem_evict.c */ @@ -3456,18 +3476,21 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000); } -#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) -#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) +#define __I915_REG_OP(op__, dev_priv__, ...) \ + intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) + +#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__)) +#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__)) -#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) -#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) -#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) -#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) +#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__)) +#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__)) +#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__)) +#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__)) -#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) -#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) -#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) -#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) +#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) +#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) +#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__)) +#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__)) /* Be very careful with read/write 64-bit values. On 32-bit machines, they * will be implemented using 2 32-bit writes in an arbitrary order with @@ -3483,46 +3506,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, * * You have been warned. */ -#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) - -#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ - u32 upper, lower, old_upper, loop = 0; \ - upper = I915_READ(upper_reg); \ - do { \ - old_upper = upper; \ - lower = I915_READ(lower_reg); \ - upper = I915_READ(upper_reg); \ - } while (upper != old_upper && loop++ < 2); \ - (u64)upper << 32 | lower; }) - -#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) -#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) - -#define __raw_read(x, s) \ -static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ - i915_reg_t reg) \ -{ \ - return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ -} - -#define __raw_write(x, s) \ -static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \ - i915_reg_t reg, uint##x##_t val) \ -{ \ - write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \ -} -__raw_read(8, b) -__raw_read(16, w) -__raw_read(32, l) -__raw_read(64, q) - -__raw_write(8, b) -__raw_write(16, w) -__raw_write(32, l) -__raw_write(64, q) +#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__)) +#define I915_READ64_2x32(lower_reg__, upper_reg__) \ + __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__)) -#undef __raw_read -#undef __raw_write +#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) +#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__)) /* These are untraced mmio-accessors that are only valid to be used inside * critical sections, such as inside IRQ handlers, where forcewake is explicitly @@ -3550,10 +3539,10 @@ __raw_write(64, q) * therefore generally be serialised, by either the dev_priv->uncore.lock or * a more localised lock guarding all access to that bank of registers. */ -#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) -#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) -#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__)) -#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) +#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) +#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) +#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__)) +#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__)) /* "Broadcast RGB" property */ #define INTEL_BROADCAST_RGB_AUTO 0 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6728ea5c71d4..e506e43cfade 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -42,6 +42,7 @@ #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gemfs.h" +#include "i915_globals.h" #include "i915_reset.h" #include "i915_trace.h" #include "i915_vgpu.h" @@ -100,48 +101,7 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->mm.object_stat_lock); } -static int -i915_gem_wait_for_error(struct i915_gpu_error *error) -{ - int ret; - - might_sleep(); - - /* - * Only wait 10 seconds for the gpu reset to complete to avoid hanging - * userspace. If it takes that long something really bad is going on and - * we should simply try to bail out and fail as gracefully as possible. - */ - ret = wait_event_interruptible_timeout(error->reset_queue, - !i915_reset_backoff(error), - I915_RESET_TIMEOUT); - if (ret == 0) { - DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); - return -EIO; - } else if (ret < 0) { - return ret; - } else { - return 0; - } -} - -int i915_mutex_lock_interruptible(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int ret; - - ret = i915_gem_wait_for_error(&dev_priv->gpu_error); - if (ret) - return ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; - - return 0; -} - -static u32 __i915_gem_park(struct drm_i915_private *i915) +static void __i915_gem_park(struct drm_i915_private *i915) { intel_wakeref_t wakeref; @@ -152,9 +112,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) GEM_BUG_ON(!list_empty(&i915->gt.active_rings)); if (!i915->gt.awake) - return I915_EPOCH_INVALID; - - GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID); + return; /* * Be paranoid and flush a concurrent interrupt to make sure @@ -183,7 +141,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915) intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); - return i915->gt.epoch; + i915_globals_park(); } void i915_gem_park(struct drm_i915_private *i915) @@ -225,8 +183,7 @@ void i915_gem_unpark(struct drm_i915_private *i915) i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); GEM_BUG_ON(!i915->gt.awake); - if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ - i915->gt.epoch = 1; + i915_globals_unpark(); intel_enable_gt_powersave(i915); i915_update_gfx_val(i915); @@ -459,8 +416,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) static long i915_gem_object_wait_fence(struct dma_fence *fence, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { struct i915_request *rq; @@ -478,27 +434,6 @@ i915_gem_object_wait_fence(struct dma_fence *fence, if (i915_request_completed(rq)) goto out; - /* - * This client is about to stall waiting for the GPU. In many cases - * this is undesirable and limits the throughput of the system, as - * many clients cannot continue processing user input/output whilst - * blocked. RPS autotuning may take tens of milliseconds to respond - * to the GPU load and thus incurs additional latency for the client. - * We can circumvent that by promoting the GPU frequency to maximum - * before we wait. This makes the GPU throttle up much more quickly - * (good for benchmarks and user experience, e.g. window animations), - * but at a cost of spending more power processing the workload - * (bad for battery). Not all clients even want their results - * immediately and for them we should just let the GPU select its own - * frequency to maximise efficiency. To prevent a single client from - * forcing the clocks too high for the whole system, we only allow - * each client to waitboost once in a busy period. - */ - if (rps_client && !i915_request_started(rq)) { - if (INTEL_GEN(rq->i915) >= 6) - gen6_rps_boost(rq, rps_client); - } - timeout = i915_request_wait(rq, flags, timeout); out: @@ -511,8 +446,7 @@ out: static long i915_gem_object_wait_reservation(struct reservation_object *resv, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { unsigned int seq = __read_seqcount_begin(&resv->seq); struct dma_fence *excl; @@ -530,8 +464,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, for (i = 0; i < count; i++) { timeout = i915_gem_object_wait_fence(shared[i], - flags, timeout, - rps_client); + flags, timeout); if (timeout < 0) break; @@ -557,8 +490,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, } if (excl && timeout >= 0) - timeout = i915_gem_object_wait_fence(excl, flags, timeout, - rps_client); + timeout = i915_gem_object_wait_fence(excl, flags, timeout); dma_fence_put(excl); @@ -652,30 +584,19 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, * @obj: i915 gem object * @flags: how to wait (under a lock, for all rendering or just for writes etc) * @timeout: how long to wait - * @rps_client: client (user process) to charge for any waitboosting */ int i915_gem_object_wait(struct drm_i915_gem_object *obj, unsigned int flags, - long timeout, - struct intel_rps_client *rps_client) + long timeout) { might_sleep(); GEM_BUG_ON(timeout < 0); - timeout = i915_gem_object_wait_reservation(obj->resv, - flags, timeout, - rps_client); + timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout); return timeout < 0 ? timeout : 0; } -static struct intel_rps_client *to_rps_client(struct drm_file *file) -{ - struct drm_i915_file_private *fpriv = file->driver_priv; - - return &fpriv->rps_client; -} - static int i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -698,28 +619,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, return 0; } -void *i915_gem_object_alloc(struct drm_i915_private *dev_priv) -{ - return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); -} - -void i915_gem_object_free(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - kmem_cache_free(dev_priv->objects, obj); -} - static int i915_gem_create(struct drm_file *file, struct drm_i915_private *dev_priv, - u64 size, + u64 *size_p, u32 *handle_p) { struct drm_i915_gem_object *obj; - int ret; u32 handle; + u64 size; + int ret; - size = roundup(size, PAGE_SIZE); + size = round_up(*size_p, PAGE_SIZE); if (size == 0) return -EINVAL; @@ -735,6 +646,7 @@ i915_gem_create(struct drm_file *file, return ret; *handle_p = handle; + *size_p = obj->base.size; return 0; } @@ -747,7 +659,7 @@ i915_gem_dumb_create(struct drm_file *file, args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); args->size = args->pitch * args->height; return i915_gem_create(file, to_i915(dev), - args->size, &args->handle); + &args->size, &args->handle); } static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) @@ -772,7 +684,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, i915_gem_flush_free_objects(dev_priv); return i915_gem_create(file, dev_priv, - args->size, &args->handle); + &args->size, &args->handle); } static inline enum fb_op_origin @@ -881,8 +793,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -934,8 +845,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -1197,8 +1107,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -1497,8 +1406,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto err; @@ -1578,17 +1486,37 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS) return -EINVAL; - /* Having something in the write domain implies it's in the read + /* + * Having something in the write domain implies it's in the read * domain, and only that read domain. Enforce that in the request. */ - if (write_domain != 0 && read_domains != write_domain) + if (write_domain && read_domains != write_domain) return -EINVAL; + if (!read_domains) + return 0; + obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; - /* Try to flush the object off the GPU without holding the lock. + /* + * Already in the desired write domain? Nothing for us to do! + * + * We apply a little bit of cunning here to catch a broader set of + * no-ops. If obj->write_domain is set, we must be in the same + * obj->read_domains, and only that domain. Therefore, if that + * obj->write_domain matches the request read_domains, we are + * already in the same read/write domain and can skip the operation, + * without having to further check the requested write_domain. + */ + if (READ_ONCE(obj->write_domain) == read_domains) { + err = 0; + goto out; + } + + /* + * Try to flush the object off the GPU without holding the lock. * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. */ @@ -1596,8 +1524,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | (write_domain ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (err) goto out; @@ -1688,7 +1615,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp, if (vma->vm_file != filp) return false; - return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; + return vma->vm_start == addr && + (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); } /** @@ -1733,8 +1661,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, * pages from. */ if (!obj->base.filp) { - i915_gem_object_put(obj); - return -ENXIO; + addr = -ENXIO; + goto err; + } + + if (range_overflows(args->offset, args->size, (u64)obj->base.size)) { + addr = -EINVAL; + goto err; } addr = vm_mmap(obj->base.filp, 0, args->size, @@ -1748,8 +1681,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct vm_area_struct *vma; if (down_write_killable(&mm->mmap_sem)) { - i915_gem_object_put(obj); - return -EINTR; + addr = -EINTR; + goto err; } vma = find_vma(mm, addr); if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) @@ -1767,12 +1700,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, i915_gem_object_put(obj); args->addr_ptr = (u64)addr; - return 0; err: i915_gem_object_put(obj); - return addr; } @@ -1804,6 +1735,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 2 - Recognise WC as a separate cache domain so that we can flush the * delayed writes via GTT before performing direct access via WC. * + * 3 - Remove implicit set-domain(GTT) and synchronisation on initial + * pagefault; swapin remains transparent. + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -1831,7 +1765,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 2; + return 3; } static inline struct i915_ggtt_view @@ -1887,6 +1821,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) intel_wakeref_t wakeref; struct i915_vma *vma; pgoff_t page_offset; + int srcu; int ret; /* Sanity check that we allow writing into this object */ @@ -1898,27 +1833,21 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) trace_i915_gem_object_fault(obj, page_offset, true, write); - /* Try to flush the object off the GPU first without holding the lock. - * Upon acquiring the lock, we will perform our sanity checks and then - * repeat the flush holding the lock in the normal manner to catch cases - * where we are gazumped. - */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - NULL); - if (ret) - goto err; - ret = i915_gem_object_pin_pages(obj); if (ret) goto err; wakeref = intel_runtime_pm_get(dev_priv); + srcu = i915_reset_trylock(dev_priv); + if (srcu < 0) { + ret = srcu; + goto err_rpm; + } + ret = i915_mutex_lock_interruptible(dev); if (ret) - goto err_rpm; + goto err_reset; /* Access to snoopable pages through the GTT is incoherent. */ if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) { @@ -1926,7 +1855,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - /* Now pin it into the GTT as needed */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | @@ -1960,10 +1888,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_unlock; } - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto err_unpin; - ret = i915_vma_pin_fence(vma); if (ret) goto err_unpin; @@ -1991,6 +1915,8 @@ err_unpin: __i915_vma_unpin(vma); err_unlock: mutex_unlock(&dev->struct_mutex); +err_reset: + i915_reset_unlock(dev_priv, srcu); err_rpm: intel_runtime_pm_put(dev_priv, wakeref); i915_gem_object_unpin_pages(obj); @@ -2003,7 +1929,7 @@ err: * fail). But any other -EIO isn't ours (e.g. swap in failure) * and so needs to be reported. */ - if (!i915_terminally_wedged(&dev_priv->gpu_error)) + if (!i915_terminally_wedged(dev_priv)) return VM_FAULT_SIGBUS; /* else: fall through */ case -EAGAIN: @@ -2484,7 +2410,7 @@ rebuild_st: do { cond_resched(); page = shmem_read_mapping_page_gfp(mapping, i, gfp); - if (likely(!IS_ERR(page))) + if (!IS_ERR(page)) break; if (!*s) { @@ -2618,6 +2544,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, lockdep_assert_held(&obj->mm.lock); + /* Make the pages coherent with the GPU (flushing any swapin). */ + if (obj->cache_dirty) { + obj->write_domain = 0; + if (i915_gem_object_has_struct_page(obj)) + drm_clflush_sg(pages); + obj->cache_dirty = false; + } + obj->mm.get_page.sg_pos = pages->sgl; obj->mm.get_page.sg_idx = 0; @@ -2819,6 +2753,33 @@ err_unlock: goto out_unlock; } +void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, + unsigned long offset, + unsigned long size) +{ + enum i915_map_type has_type; + void *ptr; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + GEM_BUG_ON(range_overflows_t(typeof(obj->base.size), + offset, size, obj->base.size)); + + obj->mm.dirty = true; + + if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) + return; + + ptr = page_unpack_bits(obj->mm.mapping, &has_type); + if (has_type == I915_MAP_WC) + return; + + drm_clflush_virt_range(ptr + offset, size); + if (size == obj->base.size) { + obj->write_domain &= ~I915_GEM_DOMAIN_CPU; + obj->cache_dirty = false; + } +} + static int i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg) @@ -2891,51 +2852,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, return 0; } -static bool match_ring(struct i915_request *rq) -{ - struct drm_i915_private *dev_priv = rq->i915; - u32 ring = I915_READ(RING_START(rq->engine->mmio_base)); - - return ring == i915_ggtt_offset(rq->ring->vma); -} - -struct i915_request * -i915_gem_find_active_request(struct intel_engine_cs *engine) -{ - struct i915_request *request, *active = NULL; - unsigned long flags; - - /* - * We are called by the error capture, reset and to dump engine - * state at random points in time. In particular, note that neither is - * crucially ordered with an interrupt. After a hang, the GPU is dead - * and we assume that no more writes can happen (we waited long enough - * for all writes that were in transaction to be flushed) - adding an - * extra delay for a recent interrupt is pointless. Hence, we do - * not need an engine->irq_seqno_barrier() before the seqno reads. - * At all other times, we must assume the GPU is still running, but - * we only care about the snapshot of this moment. - */ - spin_lock_irqsave(&engine->timeline.lock, flags); - list_for_each_entry(request, &engine->timeline.requests, link) { - if (i915_request_completed(request)) - continue; - - if (!i915_request_started(request)) - break; - - /* More than one preemptible request may match! */ - if (!match_ring(request)) - break; - - active = request; - break; - } - spin_unlock_irqrestore(&engine->timeline.lock, flags); - - return active; -} - static void i915_gem_retire_work_handler(struct work_struct *work) { @@ -2960,180 +2876,105 @@ i915_gem_retire_work_handler(struct work_struct *work) round_jiffies_up_relative(HZ)); } -static void shrink_caches(struct drm_i915_private *i915) +static bool switch_to_kernel_context_sync(struct drm_i915_private *i915, + unsigned long mask) { - /* - * kmem_cache_shrink() discards empty slabs and reorders partially - * filled slabs to prioritise allocating from the mostly full slabs, - * with the aim of reducing fragmentation. - */ - kmem_cache_shrink(i915->priorities); - kmem_cache_shrink(i915->dependencies); - kmem_cache_shrink(i915->requests); - kmem_cache_shrink(i915->luts); - kmem_cache_shrink(i915->vmas); - kmem_cache_shrink(i915->objects); -} - -struct sleep_rcu_work { - union { - struct rcu_head rcu; - struct work_struct work; - }; - struct drm_i915_private *i915; - unsigned int epoch; -}; + bool result = true; -static inline bool -same_epoch(struct drm_i915_private *i915, unsigned int epoch) -{ /* - * There is a small chance that the epoch wrapped since we started - * sleeping. If we assume that epoch is at least a u32, then it will - * take at least 2^32 * 100ms for it to wrap, or about 326 years. + * Even if we fail to switch, give whatever is running a small chance + * to save itself before we report the failure. Yes, this may be a + * false positive due to e.g. ENOMEM, caveat emptor! */ - return epoch == READ_ONCE(i915->gt.epoch); -} - -static void __sleep_work(struct work_struct *work) -{ - struct sleep_rcu_work *s = container_of(work, typeof(*s), work); - struct drm_i915_private *i915 = s->i915; - unsigned int epoch = s->epoch; - - kfree(s); - if (same_epoch(i915, epoch)) - shrink_caches(i915); -} - -static void __sleep_rcu(struct rcu_head *rcu) -{ - struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu); - struct drm_i915_private *i915 = s->i915; + if (i915_gem_switch_to_kernel_context(i915, mask)) + result = false; - destroy_rcu_head(&s->rcu); + if (i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED | + I915_WAIT_FOR_IDLE_BOOST, + I915_GEM_IDLE_TIMEOUT)) + result = false; + + if (!result) { + if (i915_modparams.reset) { /* XXX hide warning from gem_eio */ + dev_err(i915->drm.dev, + "Failed to idle engines, declaring wedged!\n"); + GEM_TRACE_DUMP(); + } - if (same_epoch(i915, s->epoch)) { - INIT_WORK(&s->work, __sleep_work); - queue_work(i915->wq, &s->work); - } else { - kfree(s); + /* Forcibly cancel outstanding work and leave the gpu quiet. */ + i915_gem_set_wedged(i915); } -} -static inline bool -new_requests_since_last_retire(const struct drm_i915_private *i915) -{ - return (READ_ONCE(i915->gt.active_requests) || - work_pending(&i915->gt.idle_work.work)); + i915_retire_requests(i915); /* ensure we flush after wedging */ + return result; } -static void assert_kernel_context_is_current(struct drm_i915_private *i915) +static bool load_power_context(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; + /* Force loading the kernel context on all engines */ + if (!switch_to_kernel_context_sync(i915, ALL_ENGINES)) + return false; - if (i915_terminally_wedged(&i915->gpu_error)) - return; + /* + * Immediately park the GPU so that we enable powersaving and + * treat it as idle. The next time we issue a request, we will + * unpark and start using the engine->pinned_default_state, otherwise + * it is in limbo and an early reset may fail. + */ + __i915_gem_park(i915); - GEM_BUG_ON(i915->gt.active_requests); - for_each_engine(engine, i915, id) { - GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request)); - GEM_BUG_ON(engine->last_retired_context != - to_intel_context(i915->kernel_context, engine)); - } + return true; } static void i915_gem_idle_work_handler(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), gt.idle_work.work); - unsigned int epoch = I915_EPOCH_INVALID; + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), gt.idle_work.work); bool rearm_hangcheck; - if (!READ_ONCE(dev_priv->gt.awake)) + if (!READ_ONCE(i915->gt.awake)) return; - if (READ_ONCE(dev_priv->gt.active_requests)) + if (READ_ONCE(i915->gt.active_requests)) return; - /* - * Flush out the last user context, leaving only the pinned - * kernel context resident. When we are idling on the kernel_context, - * no more new requests (with a context switch) are emitted and we - * can finally rest. A consequence is that the idle work handler is - * always called at least twice before idling (and if the system is - * idle that implies a round trip through the retire worker). - */ - mutex_lock(&dev_priv->drm.struct_mutex); - i915_gem_switch_to_kernel_context(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); - - GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n", - READ_ONCE(dev_priv->gt.active_requests)); - - /* - * Wait for last execlists context complete, but bail out in case a - * new request is submitted. As we don't trust the hardware, we - * continue on if the wait times out. This is necessary to allow - * the machine to suspend even if the hardware dies, and we will - * try to recover in resume (after depriving the hardware of power, - * it may be in a better mmod). - */ - __wait_for(if (new_requests_since_last_retire(dev_priv)) return, - intel_engines_are_idle(dev_priv), - I915_IDLE_ENGINES_TIMEOUT * 1000, - 10, 500); - rearm_hangcheck = - cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); + cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work); - if (!mutex_trylock(&dev_priv->drm.struct_mutex)) { + if (!mutex_trylock(&i915->drm.struct_mutex)) { /* Currently busy, come back later */ - mod_delayed_work(dev_priv->wq, - &dev_priv->gt.idle_work, + mod_delayed_work(i915->wq, + &i915->gt.idle_work, msecs_to_jiffies(50)); goto out_rearm; } /* - * New request retired after this work handler started, extend active - * period until next instance of the work. + * Flush out the last user context, leaving only the pinned + * kernel context resident. Should anything unfortunate happen + * while we are idle (such as the GPU being power cycled), no users + * will be harmed. */ - if (new_requests_since_last_retire(dev_priv)) - goto out_unlock; + if (!work_pending(&i915->gt.idle_work.work) && + !i915->gt.active_requests) { + ++i915->gt.active_requests; /* don't requeue idle */ - epoch = __i915_gem_park(dev_priv); + switch_to_kernel_context_sync(i915, i915->gt.active_engines); - assert_kernel_context_is_current(dev_priv); + if (!--i915->gt.active_requests) { + __i915_gem_park(i915); + rearm_hangcheck = false; + } + } - rearm_hangcheck = false; -out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&i915->drm.struct_mutex); out_rearm: if (rearm_hangcheck) { - GEM_BUG_ON(!dev_priv->gt.awake); - i915_queue_hangcheck(dev_priv); - } - - /* - * When we are idle, it is an opportune time to reap our caches. - * However, we have many objects that utilise RCU and the ordered - * i915->wq that this work is executing on. To try and flush any - * pending frees now we are idle, we first wait for an RCU grace - * period, and then queue a task (that will run last on the wq) to - * shrink and re-optimize the caches. - */ - if (same_epoch(dev_priv, epoch)) { - struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL); - if (s) { - init_rcu_head(&s->rcu); - s->i915 = dev_priv; - s->epoch = epoch; - call_rcu(&s->rcu, __sleep_rcu); - } + GEM_BUG_ON(!i915->gt.awake); + i915_queue_hangcheck(i915); } } @@ -3167,7 +3008,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) list_del(&lut->obj_link); list_del(&lut->ctx_link); - kmem_cache_free(i915->luts, lut); + i915_lut_handle_free(lut); __i915_gem_object_release_unless_active(obj); } @@ -3230,8 +3071,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) I915_WAIT_INTERRUPTIBLE | I915_WAIT_PRIORITY | I915_WAIT_ALL, - to_wait_timeout(args->timeout_ns), - to_rps_client(file)); + to_wait_timeout(args->timeout_ns)); if (args->timeout_ns > 0) { args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); @@ -3300,7 +3140,7 @@ wait_for_timelines(struct drm_i915_private *i915, * stalls, so allow the gpu to boost to maximum clocks. */ if (flags & I915_WAIT_FOR_IDLE_BOOST) - gen6_rps_boost(rq, NULL); + gen6_rps_boost(rq); timeout = i915_request_wait(rq, flags, timeout); i915_request_put(rq); @@ -3336,19 +3176,11 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, lockdep_assert_held(&i915->drm.struct_mutex); - if (GEM_SHOW_DEBUG() && !timeout) { - /* Presume that timeout was non-zero to begin with! */ - dev_warn(&i915->drm.pdev->dev, - "Missed idle-completion interrupt!\n"); - GEM_TRACE_DUMP(); - } - err = wait_for_engines(i915); if (err) return err; i915_retire_requests(i915); - GEM_BUG_ON(i915->gt.active_requests); } return 0; @@ -3395,8 +3227,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3458,8 +3289,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3574,8 +3404,7 @@ restart: I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3713,8 +3542,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT, - to_rps_client(file)); + MAX_SCHEDULE_TIMEOUT); if (ret) goto out; @@ -3840,8 +3668,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED | (write ? I915_WAIT_ALL : 0), - MAX_SCHEDULE_TIMEOUT, - NULL); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -3887,8 +3714,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) long ret; /* ABI: return -EIO if already wedged */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) - return -EIO; + ret = i915_terminally_wedged(dev_priv); + if (ret) + return ret; spin_lock(&file_priv->mm.lock); list_for_each_entry(request, &file_priv->mm.request_list, client_link) { @@ -3964,7 +3792,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, } vma = i915_vma_instance(obj, vm, view); - if (unlikely(IS_ERR(vma))) + if (IS_ERR(vma)) return vma; if (i915_vma_misplaced(vma, size, alignment, flags)) { @@ -3998,20 +3826,17 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, static __always_inline unsigned int __busy_read_flag(unsigned int id) { - /* Note that we could alias engines in the execbuf API, but - * that would be very unwise as it prevents userspace from - * fine control over engine selection. Ahem. - * - * This should be something like EXEC_MAX_ENGINE instead of - * I915_NUM_ENGINES. - */ - BUILD_BUG_ON(I915_NUM_ENGINES > 16); + if (id == I915_ENGINE_CLASS_INVALID) + return 0xffff0000; + + GEM_BUG_ON(id >= 16); return 0x10000 << id; } static __always_inline unsigned int __busy_write_id(unsigned int id) { - /* The uABI guarantees an active writer is also amongst the read + /* + * The uABI guarantees an active writer is also amongst the read * engines. This would be true if we accessed the activity tracking * under the lock, but as we perform the lookup of the object and * its activity locklessly we can not guarantee that the last_write @@ -4019,16 +3844,20 @@ static __always_inline unsigned int __busy_write_id(unsigned int id) * last_read - hence we always set both read and write busy for * last_write. */ - return id | __busy_read_flag(id); + if (id == I915_ENGINE_CLASS_INVALID) + return 0xffffffff; + + return (id + 1) | __busy_read_flag(id); } static __always_inline unsigned int __busy_set_if_active(const struct dma_fence *fence, unsigned int (*flag)(unsigned int id)) { - struct i915_request *rq; + const struct i915_request *rq; - /* We have to check the current hw status of the fence as the uABI + /* + * We have to check the current hw status of the fence as the uABI * guarantees forward progress. We could rely on the idle worker * to eventually flush us, but to minimise latency just ask the * hardware. @@ -4039,11 +3868,11 @@ __busy_set_if_active(const struct dma_fence *fence, return 0; /* opencode to_request() in order to avoid const warnings */ - rq = container_of(fence, struct i915_request, fence); + rq = container_of(fence, const struct i915_request, fence); if (i915_request_completed(rq)) return 0; - return flag(rq->engine->uabi_id); + return flag(rq->engine->uabi_class); } static __always_inline unsigned int @@ -4077,7 +3906,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (!obj) goto out; - /* A discrepancy here is that we do not report the status of + /* + * A discrepancy here is that we do not report the status of * non-i915 fences, i.e. even though we may report the object as idle, * a call to set-domain may still stall waiting for foreign rendering. * This also means that wait-ioctl may report an object as busy, @@ -4277,7 +4107,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return ERR_PTR(-ENOMEM); @@ -4410,7 +4240,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(i915, obj->base.size); - kfree(obj->bit_17); + bitmap_free(obj->bit_17); i915_gem_object_free(obj); GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); @@ -4533,7 +4363,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) GEM_TRACE("\n"); wakeref = intel_runtime_pm_get(i915); - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); /* * As we have just resumed the machine and woken the device up from @@ -4541,7 +4371,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * back to defaults, recovering from whatever wedged state we left it * in and so worth trying to use the device once more. */ - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) i915_gem_unset_wedged(i915); /* @@ -4554,7 +4384,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) */ intel_engines_sanitize(i915, false); - intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); intel_runtime_pm_put(i915, wakeref); mutex_lock(&i915->drm.struct_mutex); @@ -4562,15 +4392,13 @@ void i915_gem_sanitize(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); } -int i915_gem_suspend(struct drm_i915_private *i915) +void i915_gem_suspend(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - int ret; GEM_TRACE("\n"); wakeref = intel_runtime_pm_get(i915); - intel_suspend_gt_powersave(i915); flush_workqueue(i915->wq); @@ -4585,22 +4413,7 @@ int i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - if (!i915_terminally_wedged(&i915->gpu_error)) { - ret = i915_gem_switch_to_kernel_context(i915); - if (ret) - goto err_unlock; - - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_FOR_IDLE_BOOST, - MAX_SCHEDULE_TIMEOUT); - if (ret && ret != -EIO) - goto err_unlock; - - assert_kernel_context_is_current(i915); - } - i915_retire_requests(i915); /* ensure we flush after wedging */ + switch_to_kernel_context_sync(i915, i915->gt.active_engines); mutex_unlock(&i915->drm.struct_mutex); i915_reset_flush(i915); @@ -4613,23 +4426,15 @@ int i915_gem_suspend(struct drm_i915_private *i915) */ drain_delayed_work(&i915->gt.idle_work); - intel_uc_suspend(i915); - /* * Assert that we successfully flushed all the work and * reset the GPU back to its idle, low power state. */ - WARN_ON(i915->gt.awake); - if (WARN_ON(!intel_engines_are_idle(i915))) - i915_gem_set_wedged(i915); /* no hope, discard everything */ + GEM_BUG_ON(i915->gt.awake); - intel_runtime_pm_put(i915, wakeref); - return 0; + intel_uc_suspend(i915); -err_unlock: - mutex_unlock(&i915->drm.struct_mutex); intel_runtime_pm_put(i915, wakeref); - return ret; } void i915_gem_suspend_late(struct drm_i915_private *i915) @@ -4679,7 +4484,7 @@ void i915_gem_resume(struct drm_i915_private *i915) WARN_ON(i915->gt.awake); mutex_lock(&i915->drm.struct_mutex); - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); i915_gem_restore_gtt_mappings(i915); i915_gem_restore_fences(i915); @@ -4697,17 +4502,18 @@ void i915_gem_resume(struct drm_i915_private *i915) intel_uc_resume(i915); /* Always reload a context for powersaving. */ - if (i915_gem_switch_to_kernel_context(i915)) + if (!load_power_context(i915)) goto err_wedged; out_unlock: - intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); mutex_unlock(&i915->drm.struct_mutex); return; err_wedged: - if (!i915_terminally_wedged(&i915->gpu_error)) { - DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); + if (!i915_reset_failed(i915)) { + dev_err(i915->drm.dev, + "Failed to re-initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(i915); } goto out_unlock; @@ -4777,6 +4583,8 @@ static int __i915_gem_restart_engines(void *data) } } + intel_engines_set_scheduler_caps(i915); + return 0; } @@ -4787,7 +4595,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) dev_priv->gt.last_init_time = ktime_get(); /* Double layer security blanket, see i915_gem_init() */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); @@ -4812,10 +4620,9 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) init_unused_rings(dev_priv); BUG_ON(!dev_priv->kernel_context); - if (i915_terminally_wedged(&dev_priv->gpu_error)) { - ret = -EIO; + ret = i915_terminally_wedged(dev_priv); + if (ret) goto out; - } ret = i915_ppgtt_init_hw(dev_priv); if (ret) { @@ -4843,14 +4650,14 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) if (ret) goto cleanup_uc; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); return 0; cleanup_uc: intel_uc_fini_hw(dev_priv); out: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); return ret; } @@ -4860,7 +4667,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) struct i915_gem_context *ctx; struct intel_engine_cs *engine; enum intel_engine_id id; - int err; + int err = 0; /* * As we reset the gpu during very early sanitisation, the current @@ -4893,36 +4700,27 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; } - err = i915_gem_switch_to_kernel_context(i915); - if (err) - goto err_active; - - if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) { - i915_gem_set_wedged(i915); - err = -EIO; /* Caller will declare us wedged */ + /* Flush the default context image to memory, and enable powersaving. */ + if (!load_power_context(i915)) { + err = -EIO; goto err_active; } - assert_kernel_context_is_current(i915); - - /* - * Immediately park the GPU so that we enable powersaving and - * treat it as idle. The next time we issue a request, we will - * unpark and start using the engine->pinned_default_state, otherwise - * it is in limbo and an early reset may fail. - */ - __i915_gem_park(i915); - for_each_engine(engine, i915, id) { + struct intel_context *ce; struct i915_vma *state; void *vaddr; - GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count); + ce = intel_context_lookup(ctx, engine); + if (!ce) + continue; - state = to_intel_context(ctx, engine)->state; + state = ce->state; if (!state) continue; + GEM_BUG_ON(intel_context_is_pinned(ce)); + /* * As we will hold a reference to the logical state, it will * not be torn down with the context, and importantly the @@ -4940,6 +4738,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) goto err_active; engine->default_state = i915_gem_object_get(state->obj); + i915_gem_object_set_cache_coherency(engine->default_state, + I915_CACHE_LLC); /* Check we can acquire the image of the context state */ vaddr = i915_gem_object_pin_map(engine->default_state, @@ -4978,19 +4778,10 @@ out_ctx: err_active: /* * If we have to abandon now, we expect the engines to be idle - * and ready to be torn-down. First try to flush any remaining - * request, ensure we are pointing at the kernel context and - * then remove it. + * and ready to be torn-down. The quickest way we can accomplish + * this is by declaring ourselves wedged. */ - if (WARN_ON(i915_gem_switch_to_kernel_context(i915))) - goto out_ctx; - - if (WARN_ON(i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT))) - goto out_ctx; - - i915_gem_contexts_lost(i915); + i915_gem_set_wedged(i915); goto out_ctx; } @@ -5072,7 +4863,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * just magically go away. */ mutex_lock(&dev_priv->drm.struct_mutex); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = i915_gem_init_ggtt(dev_priv); if (ret) { @@ -5134,7 +4925,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_init_hw; } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); return 0; @@ -5148,7 +4939,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) err_init_hw: mutex_unlock(&dev_priv->drm.struct_mutex); - WARN_ON(i915_gem_suspend(dev_priv)); + i915_gem_suspend(dev_priv); i915_gem_suspend_late(dev_priv); i915_gem_drain_workqueue(dev_priv); @@ -5169,7 +4960,7 @@ err_scratch: i915_gem_fini_scratch(dev_priv); err_ggtt: err_unlock: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); err_uc_misc: @@ -5188,7 +4979,7 @@ err_uc_misc: * wedged. But we only want to do this where the GPU is angry, * for all other failure, such as an allocation failure, bail. */ - if (!i915_terminally_wedged(&dev_priv->gpu_error)) { + if (!i915_reset_failed(dev_priv)) { i915_load_error(dev_priv, "Failed to initialize GPU, declaring it wedged!\n"); i915_gem_set_wedged(dev_priv); @@ -5301,36 +5092,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) int i915_gem_init_early(struct drm_i915_private *dev_priv) { - int err = -ENOMEM; - - dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); - if (!dev_priv->objects) - goto err_out; - - dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); - if (!dev_priv->vmas) - goto err_objects; - - dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0); - if (!dev_priv->luts) - goto err_vmas; - - dev_priv->requests = KMEM_CACHE(i915_request, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_TYPESAFE_BY_RCU); - if (!dev_priv->requests) - goto err_luts; - - dev_priv->dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!dev_priv->dependencies) - goto err_requests; - - dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); - if (!dev_priv->priorities) - goto err_dependencies; + int err; INIT_LIST_HEAD(&dev_priv->gt.active_rings); INIT_LIST_HEAD(&dev_priv->gt.closed_vma); @@ -5344,6 +5106,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) init_waitqueue_head(&dev_priv->gpu_error.wait_queue); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); mutex_init(&dev_priv->gpu_error.wedge_mutex); + init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0); @@ -5354,19 +5117,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); return 0; - -err_dependencies: - kmem_cache_destroy(dev_priv->dependencies); -err_requests: - kmem_cache_destroy(dev_priv->requests); -err_luts: - kmem_cache_destroy(dev_priv->luts); -err_vmas: - kmem_cache_destroy(dev_priv->vmas); -err_objects: - kmem_cache_destroy(dev_priv->objects); -err_out: - return err; } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) @@ -5376,15 +5126,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); WARN_ON(dev_priv->mm.object_count); - kmem_cache_destroy(dev_priv->priorities); - kmem_cache_destroy(dev_priv->dependencies); - kmem_cache_destroy(dev_priv->requests); - kmem_cache_destroy(dev_priv->luts); - kmem_cache_destroy(dev_priv->vmas); - kmem_cache_destroy(dev_priv->objects); - - /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ - rcu_barrier(); + cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu); i915_gemfs_fini(dev_priv); } diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index b0e4b976880c..5c073fe73664 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -75,12 +75,14 @@ struct drm_i915_private; #define I915_NUM_ENGINES 8 +#define I915_GEM_IDLE_TIMEOUT (HZ / 5) + void i915_gem_park(struct drm_i915_private *i915); void i915_gem_unpark(struct drm_i915_private *i915); static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) { - if (atomic_inc_return(&t->count) == 1) + if (!atomic_fetch_inc(&t->count)) tasklet_unlock_wait(t); } @@ -89,4 +91,9 @@ static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) return !atomic_read(&t->count); } +static inline bool __tasklet_enable(struct tasklet_struct *t) +{ + return atomic_dec_and_test(&t->count); +} + #endif /* __I915_GEM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 280813a4bf82..662da485e15f 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -88,12 +88,32 @@ #include <linux/log2.h> #include <drm/i915_drm.h> #include "i915_drv.h" +#include "i915_globals.h" #include "i915_trace.h" +#include "i915_user_extensions.h" #include "intel_lrc_reg.h" #include "intel_workarounds.h" +#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1) +#define I915_CONTEXT_PARAM_VM 0x9 + #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 +static struct i915_global_gem_context { + struct i915_global base; + struct kmem_cache *slab_luts; +} global; + +struct i915_lut_handle *i915_lut_handle_alloc(void) +{ + return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); +} + +void i915_lut_handle_free(struct i915_lut_handle *lut) +{ + return kmem_cache_free(global.slab_luts, lut); +} + static void lut_close(struct i915_gem_context *ctx) { struct i915_lut_handle *lut, *ln; @@ -102,14 +122,17 @@ static void lut_close(struct i915_gem_context *ctx) list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) { list_del(&lut->obj_link); - kmem_cache_free(ctx->i915->luts, lut); + i915_lut_handle_free(lut); } + INIT_LIST_HEAD(&ctx->handles_list); rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); + + vma->open_count--; __i915_gem_object_release_unless_active(vma->obj); } rcu_read_unlock(); @@ -206,25 +229,26 @@ static void release_hw_id(struct i915_gem_context *ctx) static void i915_gem_context_free(struct i915_gem_context *ctx) { - unsigned int n; + struct intel_context *it, *n; lockdep_assert_held(&ctx->i915->drm.struct_mutex); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + GEM_BUG_ON(!list_empty(&ctx->active_engines)); release_hw_id(ctx); i915_ppgtt_put(ctx->ppgtt); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { - struct intel_context *ce = &ctx->__engine[n]; + rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node) + intel_context_put(it); - if (ce->ops) - ce->ops->destroy(ce); - } + if (ctx->timeline) + i915_timeline_put(ctx->timeline); kfree(ctx->name); put_pid(ctx->pid); list_del(&ctx->link); + mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); } @@ -291,8 +315,6 @@ static void context_close(struct i915_gem_context *ctx) * the ppgtt). */ lut_close(ctx); - if (ctx->ppgtt) - i915_ppgtt_close(&ctx->ppgtt->vm); ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_put(ctx); @@ -307,7 +329,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; address_mode = INTEL_LEGACY_32B_CONTEXT; - if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) + if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm)) address_mode = INTEL_LEGACY_64B_CONTEXT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; @@ -322,134 +344,115 @@ static u32 default_desc_template(const struct drm_i915_private *i915, return desc; } -static void intel_context_retire(struct i915_active_request *active, - struct i915_request *rq) -{ - struct intel_context *ce = - container_of(active, typeof(*ce), active_tracker); - - intel_context_unpin(ce); -} - -void -intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine) -{ - ce->gem_context = ctx; - - INIT_LIST_HEAD(&ce->signal_link); - INIT_LIST_HEAD(&ce->signals); - - /* Use the whole device by default */ - ce->sseu = intel_device_default_sseu(ctx->i915); - - i915_active_request_init(&ce->active_tracker, - NULL, intel_context_retire); -} - static struct i915_gem_context * -__create_hw_context(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *file_priv) +__create_context(struct drm_i915_private *dev_priv) { struct i915_gem_context *ctx; - unsigned int n; - int ret; + int i; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (ctx == NULL) + if (!ctx) return ERR_PTR(-ENOMEM); kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); + INIT_LIST_HEAD(&ctx->active_engines); + mutex_init(&ctx->mutex); - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) - intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]); + ctx->hw_contexts = RB_ROOT; + spin_lock_init(&ctx->hw_contexts_lock); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - /* Default context will never have a file_priv */ - ret = DEFAULT_CONTEXT_HANDLE; - if (file_priv) { - ret = idr_alloc(&file_priv->context_idr, ctx, - DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL); - if (ret < 0) - goto err_lut; - } - ctx->user_handle = ret; - - ctx->file_priv = file_priv; - if (file_priv) { - ctx->pid = get_task_pid(current, PIDTYPE_PID); - ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x", - current->comm, - pid_nr(ctx->pid), - ctx->user_handle); - if (!ctx->name) { - ret = -ENOMEM; - goto err_pid; - } - } - /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there * is no remap info, it will be a NOP. */ ctx->remap_slice = ALL_L3_SLICES(dev_priv); i915_gem_context_set_bannable(ctx); + i915_gem_context_set_recoverable(ctx); + ctx->ring_size = 4 * PAGE_SIZE; ctx->desc_template = default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt); + for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) + ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; + return ctx; +} -err_pid: - put_pid(ctx->pid); - idr_remove(&file_priv->context_idr, ctx->user_handle); -err_lut: - context_close(ctx); - return ERR_PTR(ret); +static struct i915_hw_ppgtt * +__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt) +{ + struct i915_hw_ppgtt *old = ctx->ppgtt; + + ctx->ppgtt = i915_ppgtt_get(ppgtt); + ctx->desc_template = default_desc_template(ctx->i915, ppgtt); + + return old; } -static void __destroy_hw_context(struct i915_gem_context *ctx, - struct drm_i915_file_private *file_priv) +static void __assign_ppgtt(struct i915_gem_context *ctx, + struct i915_hw_ppgtt *ppgtt) { - idr_remove(&file_priv->context_idr, ctx->user_handle); - context_close(ctx); + if (ppgtt == ctx->ppgtt) + return; + + ppgtt = __set_ppgtt(ctx, ppgtt); + if (ppgtt) + i915_ppgtt_put(ppgtt); } static struct i915_gem_context * -i915_gem_create_context(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *file_priv) +i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) { struct i915_gem_context *ctx; lockdep_assert_held(&dev_priv->drm.struct_mutex); + BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE & + ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN); + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && + !HAS_EXECLISTS(dev_priv)) + return ERR_PTR(-EINVAL); + /* Reap the most stale context */ contexts_free_first(dev_priv); - ctx = __create_hw_context(dev_priv, file_priv); + ctx = __create_context(dev_priv); if (IS_ERR(ctx)) return ctx; if (HAS_FULL_PPGTT(dev_priv)) { struct i915_hw_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(dev_priv, file_priv); + ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); - __destroy_hw_context(ctx, file_priv); + context_close(ctx); return ERR_CAST(ppgtt); } - ctx->ppgtt = ppgtt; - ctx->desc_template = default_desc_template(dev_priv, ppgtt); + __assign_ppgtt(ctx, ppgtt); + i915_ppgtt_put(ppgtt); + } + + if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { + struct i915_timeline *timeline; + + timeline = i915_timeline_create(dev_priv, NULL); + if (IS_ERR(timeline)) { + context_close(ctx); + return ERR_CAST(timeline); + } + + ctx->timeline = timeline; } trace_i915_context_create(ctx); @@ -480,10 +483,17 @@ i915_gem_context_create_gvt(struct drm_device *dev) if (ret) return ERR_PTR(ret); - ctx = i915_gem_create_context(to_i915(dev), NULL); + ctx = i915_gem_create_context(to_i915(dev), 0); if (IS_ERR(ctx)) goto out; + ret = i915_gem_context_pin_hw_id(ctx); + if (ret) { + context_close(ctx); + ctx = ERR_PTR(ret); + goto out; + } + ctx->file_priv = ERR_PTR(-EBADF); i915_gem_context_set_closed(ctx); /* not user accessible */ i915_gem_context_clear_bannable(ctx); @@ -516,7 +526,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) struct i915_gem_context *ctx; int err; - ctx = i915_gem_create_context(i915, NULL); + ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; @@ -563,7 +573,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) GEM_BUG_ON(dev_priv->kernel_context); GEM_BUG_ON(dev_priv->preempt_context); - intel_engine_init_ctx_wa(dev_priv->engine[RCS]); + intel_engine_init_ctx_wa(dev_priv->engine[RCS0]); init_contexts(dev_priv); /* lowest priority; idle task */ @@ -624,31 +634,87 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915) static int context_idr_cleanup(int id, void *p, void *data) { - struct i915_gem_context *ctx = p; + context_close(p); + return 0; +} - context_close(ctx); +static int vm_idr_cleanup(int id, void *p, void *data) +{ + i915_ppgtt_put(p); return 0; } +static int gem_context_register(struct i915_gem_context *ctx, + struct drm_i915_file_private *fpriv) +{ + int ret; + + ctx->file_priv = fpriv; + if (ctx->ppgtt) + ctx->ppgtt->vm.file = fpriv; + + ctx->pid = get_task_pid(current, PIDTYPE_PID); + ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", + current->comm, pid_nr(ctx->pid)); + if (!ctx->name) { + ret = -ENOMEM; + goto err_pid; + } + + /* And finally expose ourselves to userspace via the idr */ + mutex_lock(&fpriv->context_idr_lock); + ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL); + mutex_unlock(&fpriv->context_idr_lock); + if (ret >= 0) + goto out; + + kfree(fetch_and_zero(&ctx->name)); +err_pid: + put_pid(fetch_and_zero(&ctx->pid)); +out: + return ret; +} + int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; + int err; + + mutex_init(&file_priv->context_idr_lock); + mutex_init(&file_priv->vm_idr_lock); idr_init(&file_priv->context_idr); + idr_init_base(&file_priv->vm_idr, 1); mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, file_priv); + ctx = i915_gem_create_context(i915, 0); mutex_unlock(&i915->drm.struct_mutex); if (IS_ERR(ctx)) { - idr_destroy(&file_priv->context_idr); - return PTR_ERR(ctx); + err = PTR_ERR(ctx); + goto err; } + err = gem_context_register(ctx, file_priv); + if (err < 0) + goto err_ctx; + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + GEM_BUG_ON(err > 0); return 0; + +err_ctx: + mutex_lock(&i915->drm.struct_mutex); + context_close(ctx); + mutex_unlock(&i915->drm.struct_mutex); +err: + idr_destroy(&file_priv->vm_idr); + idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->vm_idr_lock); + mutex_destroy(&file_priv->context_idr_lock); + return err; } void i915_gem_context_close(struct drm_file *file) @@ -659,6 +725,100 @@ void i915_gem_context_close(struct drm_file *file) idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); + mutex_destroy(&file_priv->context_idr_lock); + + idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); + idr_destroy(&file_priv->vm_idr); + mutex_destroy(&file_priv->vm_idr_lock); +} + +int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_vm_control *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_ppgtt *ppgtt; + int err; + + if (!HAS_FULL_PPGTT(i915)) + return -ENODEV; + + if (args->flags) + return -EINVAL; + + ppgtt = i915_ppgtt_create(i915); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); + + ppgtt->vm.file = file_priv; + + if (args->extensions) { + err = i915_user_extensions(u64_to_user_ptr(args->extensions), + NULL, 0, + ppgtt); + if (err) + goto err_put; + } + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + goto err_put; + + err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + if (err < 0) + goto err_unlock; + + GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */ + ppgtt->user_handle = err; + + mutex_unlock(&file_priv->vm_idr_lock); + + args->vm_id = err; + return 0; + +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return err; +} + +int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_vm_control *args = data; + struct i915_hw_ppgtt *ppgtt; + int err; + u32 id; + + if (args->flags) + return -EINVAL; + + if (args->extensions) + return -EINVAL; + + id = args->vm_id; + if (!id) + return -ENOENT; + + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; + + ppgtt = idr_remove(&file_priv->vm_idr, id); + if (ppgtt) { + GEM_BUG_ON(ppgtt->user_handle != id); + ppgtt->user_handle = 0; + } + + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; + + i915_ppgtt_put(ppgtt); + return 0; } static struct i915_request * @@ -671,10 +831,9 @@ last_request_on_engine(struct i915_timeline *timeline, rq = i915_active_request_raw(&timeline->last_request, &engine->i915->drm.struct_mutex); - if (rq && rq->engine == engine) { - GEM_TRACE("last request for %s on engine %s: %llx:%llu\n", - timeline->name, engine->name, - rq->fence.context, rq->fence.seqno); + if (rq && rq->engine->mask & engine->mask) { + GEM_TRACE("last request on engine %s: %llx:%llu\n", + engine->name, rq->fence.context, rq->fence.seqno); GEM_BUG_ON(rq->timeline != timeline); return rq; } @@ -682,81 +841,104 @@ last_request_on_engine(struct i915_timeline *timeline, return NULL; } -static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine) +struct context_barrier_task { + struct i915_active base; + void (*task)(void *data); + void *data; +}; + +static void cb_retire(struct i915_active *base) +{ + struct context_barrier_task *cb = container_of(base, typeof(*cb), base); + + if (cb->task) + cb->task(cb->data); + + i915_active_fini(&cb->base); + kfree(cb); +} + +I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault); +static int context_barrier_task(struct i915_gem_context *ctx, + unsigned long engines, + int (*emit)(struct i915_request *rq, void *data), + void (*task)(void *data), + void *data) { - struct drm_i915_private *i915 = engine->i915; - const struct intel_context * const ce = - to_intel_context(i915->kernel_context, engine); - struct i915_timeline *barrier = ce->ring->timeline; - struct intel_ring *ring; - bool any_active = false; + struct drm_i915_private *i915 = ctx->i915; + struct context_barrier_task *cb; + struct intel_context *ce, *next; + intel_wakeref_t wakeref; + int err = 0; lockdep_assert_held(&i915->drm.struct_mutex); - list_for_each_entry(ring, &i915->gt.active_rings, active_link) { - struct i915_request *rq; + GEM_BUG_ON(!task); - rq = last_request_on_engine(ring->timeline, engine); - if (!rq) - continue; + cb = kmalloc(sizeof(*cb), GFP_KERNEL); + if (!cb) + return -ENOMEM; - any_active = true; + i915_active_init(i915, &cb->base, cb_retire); + i915_active_acquire(&cb->base); - if (rq->hw_context == ce) + wakeref = intel_runtime_pm_get(i915); + rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) { + struct intel_engine_cs *engine = ce->engine; + struct i915_request *rq; + + if (!(engine->mask & engines)) continue; - /* - * Was this request submitted after the previous - * switch-to-kernel-context? - */ - if (!i915_timeline_sync_is_later(barrier, &rq->fence)) { - GEM_TRACE("%s needs barrier for %llx:%lld\n", - ring->timeline->name, - rq->fence.context, - rq->fence.seqno); - return false; + if (I915_SELFTEST_ONLY(context_barrier_inject_fault & + engine->mask)) { + err = -ENXIO; + break; + } + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; } - GEM_TRACE("%s has barrier after %llx:%lld\n", - ring->timeline->name, - rq->fence.context, - rq->fence.seqno); + err = 0; + if (emit) + err = emit(rq, data); + if (err == 0) + err = i915_active_ref(&cb->base, rq->fence.context, rq); + + i915_request_add(rq); + if (err) + break; } + intel_runtime_pm_put(i915, wakeref); - /* - * If any other timeline was still active and behind the last barrier, - * then our last switch-to-kernel-context must still be queued and - * will run last (leaving the engine in the kernel context when it - * eventually idles). - */ - if (any_active) - return true; + cb->task = err ? NULL : task; /* caller needs to unwind instead */ + cb->data = data; - /* The engine is idle; check that it is idling in the kernel context. */ - return engine->last_retired_context == ce; + i915_active_release(&cb->base); + + return err; } -int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, + unsigned long mask) { struct intel_engine_cs *engine; - enum intel_engine_id id; GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake)); lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!i915->kernel_context); - i915_retire_requests(i915); + /* Inoperable, so presume the GPU is safely pointing into the void! */ + if (i915_terminally_wedged(i915)) + return 0; - for_each_engine(engine, i915, id) { + for_each_engine_masked(engine, i915, mask, mask) { struct intel_ring *ring; struct i915_request *rq; - GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine)); - if (engine_has_kernel_context_barrier(engine)) - continue; - - GEM_TRACE("emit barrier on %s\n", engine->name); - rq = i915_request_alloc(engine, i915->kernel_context); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -779,7 +961,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) i915_sw_fence_await_sw_fence_gfp(&rq->submit, &prev->submit, I915_FENCE_GFP); - i915_timeline_sync_set(rq->timeline, &prev->fence); } i915_request_add(rq); @@ -788,183 +969,172 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915) return 0; } -static bool client_is_banned(struct drm_i915_file_private *file_priv) +static int get_ppgtt(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; -} - -int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_i915_gem_context_create *args = data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_gem_context *ctx; + struct drm_i915_file_private *file_priv = ctx->file_priv; + struct i915_hw_ppgtt *ppgtt; int ret; - if (!DRIVER_CAPS(dev_priv)->has_logical_contexts) + return -EINVAL; /* nothing to see here; please move along */ + + if (!ctx->ppgtt) return -ENODEV; - if (args->pad != 0) - return -EINVAL; + /* XXX rcu acquire? */ + ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (ret) + return ret; - if (client_is_banned(file_priv)) { - DRM_DEBUG("client %s[%d] banned from creating ctx\n", - current->comm, - pid_nr(get_task_pid(current, PIDTYPE_PID))); + ppgtt = i915_ppgtt_get(ctx->ppgtt); + mutex_unlock(&ctx->i915->drm.struct_mutex); - return -EIO; + ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (ret) + goto err_put; + + if (!ppgtt->user_handle) { + ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL); + GEM_BUG_ON(!ret); + if (ret < 0) + goto err_unlock; + + ppgtt->user_handle = ret; + i915_ppgtt_get(ppgtt); } - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + args->size = 0; + args->value = ppgtt->user_handle; - ctx = i915_gem_create_context(dev_priv, file_priv); - mutex_unlock(&dev->struct_mutex); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ret = 0; +err_unlock: + mutex_unlock(&file_priv->vm_idr_lock); +err_put: + i915_ppgtt_put(ppgtt); + return ret; +} - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); +static void set_ppgtt_barrier(void *data) +{ + struct i915_hw_ppgtt *old = data; - args->ctx_id = ctx->user_handle; - DRM_DEBUG("HW context %d created\n", args->ctx_id); + if (INTEL_GEN(old->vm.i915) < 8) + gen6_ppgtt_unpin_all(old); - return 0; + i915_ppgtt_put(old); } -int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +static int emit_ppgtt_update(struct i915_request *rq, void *data) { - struct drm_i915_gem_context_destroy *args = data; - struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_gem_context *ctx; - int ret; + struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt; + struct intel_engine_cs *engine = rq->engine; + u32 *cs; + int i; - if (args->pad != 0) - return -EINVAL; + if (i915_vm_is_4lvl(&ppgtt->vm)) { + const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4); - if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) - return -ENOENT; + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; + *cs++ = MI_LOAD_REGISTER_IMM(2); - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - goto out; + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, 0)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, 0)); + *cs++ = lower_32_bits(pd_daddr); - __destroy_hw_context(ctx, file_priv); - mutex_unlock(&dev->struct_mutex); + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = lower_32_bits(pd_daddr); + } + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + } else { + /* ppGTT is not part of the legacy context image */ + gen6_ppgtt_pin(ppgtt); + } -out: - i915_gem_context_put(ctx); return 0; } -static int get_sseu(struct i915_gem_context *ctx, - struct drm_i915_gem_context_param *args) +static int set_ppgtt(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - struct drm_i915_gem_context_param_sseu user_sseu; - struct intel_engine_cs *engine; - struct intel_context *ce; - int ret; - - if (args->size == 0) - goto out; - else if (args->size < sizeof(user_sseu)) - return -EINVAL; - - if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), - sizeof(user_sseu))) - return -EFAULT; + struct drm_i915_file_private *file_priv = ctx->file_priv; + struct i915_hw_ppgtt *ppgtt, *old; + int err; - if (user_sseu.flags || user_sseu.rsvd) - return -EINVAL; + return -EINVAL; /* nothing to see here; please move along */ - engine = intel_engine_lookup_user(ctx->i915, - user_sseu.engine_class, - user_sseu.engine_instance); - if (!engine) + if (args->size) return -EINVAL; - /* Only use for mutex here is to serialize get_param and set_param. */ - ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); - if (ret) - return ret; - - ce = to_intel_context(ctx, engine); - - user_sseu.slice_mask = ce->sseu.slice_mask; - user_sseu.subslice_mask = ce->sseu.subslice_mask; - user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; - user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; + if (!ctx->ppgtt) + return -ENODEV; - mutex_unlock(&ctx->i915->drm.struct_mutex); + if (upper_32_bits(args->value)) + return -ENOENT; - if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, - sizeof(user_sseu))) - return -EFAULT; + err = mutex_lock_interruptible(&file_priv->vm_idr_lock); + if (err) + return err; -out: - args->size = sizeof(user_sseu); + ppgtt = idr_find(&file_priv->vm_idr, args->value); + if (ppgtt) { + GEM_BUG_ON(ppgtt->user_handle != args->value); + i915_ppgtt_get(ppgtt); + } + mutex_unlock(&file_priv->vm_idr_lock); + if (!ppgtt) + return -ENOENT; - return 0; -} + err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + if (err) + goto out; -int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_gem_context_param *args = data; - struct i915_gem_context *ctx; - int ret = 0; + if (ppgtt == ctx->ppgtt) + goto unlock; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; + /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ + lut_close(ctx); - switch (args->param) { - case I915_CONTEXT_PARAM_BAN_PERIOD: - ret = -EINVAL; - break; - case I915_CONTEXT_PARAM_NO_ZEROMAP: - args->size = 0; - args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); - break; - case I915_CONTEXT_PARAM_GTT_SIZE: - args->size = 0; + old = __set_ppgtt(ctx, ppgtt); - if (ctx->ppgtt) - args->value = ctx->ppgtt->vm.total; - else if (to_i915(dev)->mm.aliasing_ppgtt) - args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; - else - args->value = to_i915(dev)->ggtt.vm.total; - break; - case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: - args->size = 0; - args->value = i915_gem_context_no_error_capture(ctx); - break; - case I915_CONTEXT_PARAM_BANNABLE: - args->size = 0; - args->value = i915_gem_context_is_bannable(ctx); - break; - case I915_CONTEXT_PARAM_PRIORITY: - args->size = 0; - args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; - break; - case I915_CONTEXT_PARAM_SSEU: - ret = get_sseu(ctx, args); - break; - default: - ret = -EINVAL; - break; + /* + * We need to flush any requests using the current ppgtt before + * we release it as the requests do not hold a reference themselves, + * only indirectly through the context. + */ + err = context_barrier_task(ctx, ALL_ENGINES, + emit_ppgtt_update, + set_ppgtt_barrier, + old); + if (err) { + ctx->ppgtt = old; + ctx->desc_template = default_desc_template(ctx->i915, old); + i915_ppgtt_put(ppgtt); } - i915_gem_context_put(ctx); - return ret; +unlock: + mutex_unlock(&ctx->i915->drm.struct_mutex); + +out: + i915_ppgtt_put(ppgtt); + return err; } static int gen8_emit_rpcs_config(struct i915_request *rq, @@ -993,23 +1163,28 @@ static int gen8_emit_rpcs_config(struct i915_request *rq, } static int -gen8_modify_rpcs_gpu(struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_sseu sseu) +gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) { - struct drm_i915_private *i915 = engine->i915; + struct drm_i915_private *i915 = ce->engine->i915; struct i915_request *rq, *prev; intel_wakeref_t wakeref; int ret; - GEM_BUG_ON(!ce->pin_count); + lockdep_assert_held(&ce->pin_mutex); - lockdep_assert_held(&i915->drm.struct_mutex); + /* + * If the context is not idle, we have to submit an ordered request to + * modify its context image via the kernel context (writing to our own + * image, or into the registers directory, does not stick). Pristine + * and idle contexts will be configured on pinning. + */ + if (!intel_context_is_pinned(ce)) + return 0; /* Submitting requests etc needs the hw awake. */ wakeref = intel_runtime_pm_get(i915); - rq = i915_request_alloc(engine, i915->kernel_context); + rq = i915_request_alloc(ce->engine, i915->kernel_context); if (IS_ERR(rq)) { ret = PTR_ERR(rq); goto out_put; @@ -1057,27 +1232,26 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx, struct intel_engine_cs *engine, struct intel_sseu sseu) { - struct intel_context *ce = to_intel_context(ctx, engine); + struct intel_context *ce; int ret = 0; GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8); - GEM_BUG_ON(engine->id != RCS); + GEM_BUG_ON(engine->id != RCS0); + + ce = intel_context_pin_lock(ctx, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); /* Nothing to do if unmodified. */ if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) - return 0; - - /* - * If context is not idle we have to submit an ordered request to modify - * its context image via the kernel context. Pristine and idle contexts - * will be configured on pinning. - */ - if (ce->pin_count) - ret = gen8_modify_rpcs_gpu(ce, engine, sseu); + goto unlock; + ret = gen8_modify_rpcs(ce, sseu); if (!ret) ce->sseu = sseu; +unlock: + intel_context_pin_unlock(ce); return ret; } @@ -1242,22 +1416,12 @@ static int set_sseu(struct i915_gem_context *ctx, return 0; } -int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +static int ctx_setparam(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) { - struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_gem_context_param *args = data; - struct i915_gem_context *ctx; int ret = 0; - ctx = i915_gem_context_lookup(file_priv, args->ctx_id); - if (!ctx) - return -ENOENT; - switch (args->param) { - case I915_CONTEXT_PARAM_BAN_PERIOD: - ret = -EINVAL; - break; case I915_CONTEXT_PARAM_NO_ZEROMAP: if (args->size) ret = -EINVAL; @@ -1266,6 +1430,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, else clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); break; + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: if (args->size) ret = -EINVAL; @@ -1274,6 +1439,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, else i915_gem_context_clear_no_error_capture(ctx); break; + case I915_CONTEXT_PARAM_BANNABLE: if (args->size) ret = -EINVAL; @@ -1285,13 +1451,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, i915_gem_context_clear_bannable(ctx); break; + case I915_CONTEXT_PARAM_RECOVERABLE: + if (args->size) + ret = -EINVAL; + else if (args->value) + i915_gem_context_set_recoverable(ctx); + else + i915_gem_context_clear_recoverable(ctx); + break; + case I915_CONTEXT_PARAM_PRIORITY: { s64 priority = args->value; if (args->size) ret = -EINVAL; - else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) + else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) ret = -ENODEV; else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || priority < I915_CONTEXT_MIN_USER_PRIORITY) @@ -1304,14 +1479,266 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, I915_USER_PRIORITY(priority); } break; + case I915_CONTEXT_PARAM_SSEU: ret = set_sseu(ctx, args); break; + + case I915_CONTEXT_PARAM_VM: + ret = set_ppgtt(ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; break; } + return ret; +} + +struct create_ext { + struct i915_gem_context *ctx; + struct drm_i915_file_private *fpriv; +}; + +static int create_setparam(struct i915_user_extension __user *ext, void *data) +{ + struct drm_i915_gem_context_create_ext_setparam local; + const struct create_ext *arg = data; + + if (copy_from_user(&local, ext, sizeof(local))) + return -EFAULT; + + if (local.param.ctx_id) + return -EINVAL; + + return ctx_setparam(arg->ctx, &local.param); +} + +static const i915_user_extension_fn create_extensions[] = { + [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, +}; + +static bool client_is_banned(struct drm_i915_file_private *file_priv) +{ + return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; +} + +int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct drm_i915_gem_context_create_ext *args = data; + struct create_ext ext_data; + int ret; + + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return -ENODEV; + + if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) + return -EINVAL; + + ret = i915_terminally_wedged(i915); + if (ret) + return ret; + + ext_data.fpriv = file->driver_priv; + if (client_is_banned(ext_data.fpriv)) { + DRM_DEBUG("client %s[%d] banned from creating ctx\n", + current->comm, + pid_nr(get_task_pid(current, PIDTYPE_PID))); + return -EIO; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ext_data.ctx = i915_gem_create_context(i915, args->flags); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(ext_data.ctx)) + return PTR_ERR(ext_data.ctx); + + if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { + ret = i915_user_extensions(u64_to_user_ptr(args->extensions), + create_extensions, + ARRAY_SIZE(create_extensions), + &ext_data); + if (ret) + goto err_ctx; + } + + ret = gem_context_register(ext_data.ctx, ext_data.fpriv); + if (ret < 0) + goto err_ctx; + + args->ctx_id = ret; + DRM_DEBUG("HW context %d created\n", args->ctx_id); + + return 0; + +err_ctx: + mutex_lock(&dev->struct_mutex); + context_close(ext_data.ctx); + mutex_unlock(&dev->struct_mutex); + return ret; +} + +int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_context_destroy *args = data; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_gem_context *ctx; + + if (args->pad != 0) + return -EINVAL; + + if (!args->ctx_id) + return -ENOENT; + + if (mutex_lock_interruptible(&file_priv->context_idr_lock)) + return -EINTR; + + ctx = idr_remove(&file_priv->context_idr, args->ctx_id); + mutex_unlock(&file_priv->context_idr_lock); + if (!ctx) + return -ENOENT; + + mutex_lock(&dev->struct_mutex); + context_close(ctx); + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +static int get_sseu(struct i915_gem_context *ctx, + struct drm_i915_gem_context_param *args) +{ + struct drm_i915_gem_context_param_sseu user_sseu; + struct intel_engine_cs *engine; + struct intel_context *ce; + + if (args->size == 0) + goto out; + else if (args->size < sizeof(user_sseu)) + return -EINVAL; + + if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), + sizeof(user_sseu))) + return -EFAULT; + + if (user_sseu.flags || user_sseu.rsvd) + return -EINVAL; + + engine = intel_engine_lookup_user(ctx->i915, + user_sseu.engine_class, + user_sseu.engine_instance); + if (!engine) + return -EINVAL; + + ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */ + if (IS_ERR(ce)) + return PTR_ERR(ce); + + user_sseu.slice_mask = ce->sseu.slice_mask; + user_sseu.subslice_mask = ce->sseu.subslice_mask; + user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; + user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; + + intel_context_pin_unlock(ce); + + if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, + sizeof(user_sseu))) + return -EFAULT; + +out: + args->size = sizeof(user_sseu); + + return 0; +} + +int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_context_param *args = data; + struct i915_gem_context *ctx; + int ret = 0; + + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + + switch (args->param) { + case I915_CONTEXT_PARAM_NO_ZEROMAP: + args->size = 0; + args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); + break; + + case I915_CONTEXT_PARAM_GTT_SIZE: + args->size = 0; + if (ctx->ppgtt) + args->value = ctx->ppgtt->vm.total; + else if (to_i915(dev)->mm.aliasing_ppgtt) + args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total; + else + args->value = to_i915(dev)->ggtt.vm.total; + break; + + case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: + args->size = 0; + args->value = i915_gem_context_no_error_capture(ctx); + break; + + case I915_CONTEXT_PARAM_BANNABLE: + args->size = 0; + args->value = i915_gem_context_is_bannable(ctx); + break; + + case I915_CONTEXT_PARAM_RECOVERABLE: + args->size = 0; + args->value = i915_gem_context_is_recoverable(ctx); + break; + + case I915_CONTEXT_PARAM_PRIORITY: + args->size = 0; + args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; + break; + + case I915_CONTEXT_PARAM_SSEU: + ret = get_sseu(ctx, args); + break; + + case I915_CONTEXT_PARAM_VM: + ret = get_ppgtt(ctx, args); + break; + + case I915_CONTEXT_PARAM_BAN_PERIOD: + default: + ret = -EINVAL; + break; + } + + i915_gem_context_put(ctx); + return ret; +} + +int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_gem_context_param *args = data; + struct i915_gem_context *ctx; + int ret; + + ctx = i915_gem_context_lookup(file_priv, args->ctx_id); + if (!ctx) + return -ENOENT; + + ret = ctx_setparam(ctx, args); + i915_gem_context_put(ctx); return ret; } @@ -1385,3 +1812,28 @@ out_unlock: #include "selftests/mock_context.c" #include "selftests/i915_gem_context.c" #endif + +static void i915_global_gem_context_shrink(void) +{ + kmem_cache_shrink(global.slab_luts); +} + +static void i915_global_gem_context_exit(void) +{ + kmem_cache_destroy(global.slab_luts); +} + +static struct i915_global_gem_context global = { { + .shrink = i915_global_gem_context_shrink, + .exit = i915_global_gem_context_exit, +} }; + +int __init i915_global_gem_context_init(void) +{ + global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); + if (!global.slab_luts) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index ca150a764c24..edc6ba3f0288 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h @@ -25,210 +25,16 @@ #ifndef __I915_GEM_CONTEXT_H__ #define __I915_GEM_CONTEXT_H__ -#include <linux/bitops.h> -#include <linux/list.h> -#include <linux/radix-tree.h> +#include "i915_gem_context_types.h" #include "i915_gem.h" #include "i915_scheduler.h" +#include "intel_context.h" #include "intel_device_info.h" -struct pid; - struct drm_device; struct drm_file; -struct drm_i915_private; -struct drm_i915_file_private; -struct i915_hw_ppgtt; -struct i915_request; -struct i915_vma; -struct intel_ring; - -#define DEFAULT_CONTEXT_HANDLE 0 - -struct intel_context; - -struct intel_context_ops { - void (*unpin)(struct intel_context *ce); - void (*destroy)(struct intel_context *ce); -}; - -/* - * Powergating configuration for a particular (context,engine). - */ -struct intel_sseu { - u8 slice_mask; - u8 subslice_mask; - u8 min_eus_per_subslice; - u8 max_eus_per_subslice; -}; - -/** - * struct i915_gem_context - client state - * - * The struct i915_gem_context represents the combined view of the driver and - * logical hardware state for a particular client. - */ -struct i915_gem_context { - /** i915: i915 device backpointer */ - struct drm_i915_private *i915; - - /** file_priv: owning file descriptor */ - struct drm_i915_file_private *file_priv; - - /** - * @ppgtt: unique address space (GTT) - * - * In full-ppgtt mode, each context has its own address space ensuring - * complete seperation of one client from all others. - * - * In other modes, this is a NULL pointer with the expectation that - * the caller uses the shared global GTT. - */ - struct i915_hw_ppgtt *ppgtt; - - /** - * @pid: process id of creator - * - * Note that who created the context may not be the principle user, - * as the context may be shared across a local socket. However, - * that should only affect the default context, all contexts created - * explicitly by the client are expected to be isolated. - */ - struct pid *pid; - - /** - * @name: arbitrary name - * - * A name is constructed for the context from the creator's process - * name, pid and user handle in order to uniquely identify the - * context in messages. - */ - const char *name; - - /** link: place with &drm_i915_private.context_list */ - struct list_head link; - struct llist_node free_link; - - /** - * @ref: reference count - * - * A reference to a context is held by both the client who created it - * and on each request submitted to the hardware using the request - * (to ensure the hardware has access to the state until it has - * finished all pending writes). See i915_gem_context_get() and - * i915_gem_context_put() for access. - */ - struct kref ref; - - /** - * @rcu: rcu_head for deferred freeing. - */ - struct rcu_head rcu; - - /** - * @user_flags: small set of booleans controlled by the user - */ - unsigned long user_flags; -#define UCONTEXT_NO_ZEROMAP 0 -#define UCONTEXT_NO_ERROR_CAPTURE 1 -#define UCONTEXT_BANNABLE 2 - - /** - * @flags: small set of booleans - */ - unsigned long flags; -#define CONTEXT_BANNED 0 -#define CONTEXT_CLOSED 1 -#define CONTEXT_FORCE_SINGLE_SUBMISSION 2 - - /** - * @hw_id: - unique identifier for the context - * - * The hardware needs to uniquely identify the context for a few - * functions like fault reporting, PASID, scheduling. The - * &drm_i915_private.context_hw_ida is used to assign a unqiue - * id for the lifetime of the context. - * - * @hw_id_pin_count: - number of times this context had been pinned - * for use (should be, at most, once per engine). - * - * @hw_id_link: - all contexts with an assigned id are tracked - * for possible repossession. - */ - unsigned int hw_id; - atomic_t hw_id_pin_count; - struct list_head hw_id_link; - - /** - * @user_handle: userspace identifier - * - * A unique per-file identifier is generated from - * &drm_i915_file_private.contexts. - */ - u32 user_handle; - - struct i915_sched_attr sched; - - /** engine: per-engine logical HW state */ - struct intel_context { - struct i915_gem_context *gem_context; - struct intel_engine_cs *active; - struct list_head signal_link; - struct list_head signals; - struct i915_vma *state; - struct intel_ring *ring; - u32 *lrc_reg_state; - u64 lrc_desc; - int pin_count; - - /** - * active_tracker: Active tracker for the external rq activity - * on this intel_context object. - */ - struct i915_active_request active_tracker; - - const struct intel_context_ops *ops; - - /** sseu: Control eu/slice partitioning */ - struct intel_sseu sseu; - } __engine[I915_NUM_ENGINES]; - - /** ring_size: size for allocating the per-engine ring buffer */ - u32 ring_size; - /** desc_template: invariant fields for the HW context descriptor */ - u32 desc_template; - - /** guilty_count: How many times this context has caused a GPU hang. */ - atomic_t guilty_count; - /** - * @active_count: How many times this context was active during a GPU - * hang, but did not cause it. - */ - atomic_t active_count; - -#define CONTEXT_SCORE_GUILTY 10 -#define CONTEXT_SCORE_BAN_THRESHOLD 40 - /** ban_score: Accumulated score of all hangs caused by this context. */ - atomic_t ban_score; - - /** remap_slice: Bitmask of cache lines that need remapping */ - u8 remap_slice; - - /** handles_vma: rbtree to look up our context specific obj/vma for - * the user handle. (user handles are per fd, but the binding is - * per vm, which may be one per context or shared with the global GTT) - */ - struct radix_tree_root handles_vma; - - /** handles_list: reverse list of all the rbtree entries in use for - * this context, which allows us to free all the allocations on - * context close. - */ - struct list_head handles_list; -}; - static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx) { return test_bit(CONTEXT_CLOSED, &ctx->flags); @@ -270,6 +76,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx) clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags); } +static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx) +{ + return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx) +{ + set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + +static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx) +{ + clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); +} + static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) { return test_bit(CONTEXT_BANNED, &ctx->flags); @@ -305,45 +126,11 @@ static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx) atomic_dec(&ctx->hw_id_pin_count); } -static inline bool i915_gem_context_is_default(const struct i915_gem_context *c) -{ - return c->user_handle == DEFAULT_CONTEXT_HANDLE; -} - static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) { return !ctx->file_priv; } -static inline struct intel_context * -to_intel_context(struct i915_gem_context *ctx, - const struct intel_engine_cs *engine) -{ - return &ctx->__engine[engine->id]; -} - -static inline struct intel_context * -intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) -{ - return engine->context_pin(engine, ctx); -} - -static inline void __intel_context_pin(struct intel_context *ce) -{ - GEM_BUG_ON(!ce->pin_count); - ce->pin_count++; -} - -static inline void intel_context_unpin(struct intel_context *ce) -{ - GEM_BUG_ON(!ce->pin_count); - if (--ce->pin_count) - return; - - GEM_BUG_ON(!ce->ops); - ce->ops->unpin(ce); -} - /* i915_gem_context.c */ int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); void i915_gem_contexts_lost(struct drm_i915_private *dev_priv); @@ -354,12 +141,18 @@ int i915_gem_context_open(struct drm_i915_private *i915, void i915_gem_context_close(struct drm_file *file); int i915_switch_context(struct i915_request *rq); -int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv); +int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, + unsigned long engine_mask); void i915_gem_context_release(struct kref *ctx_ref); struct i915_gem_context * i915_gem_context_create_gvt(struct drm_device *dev); +int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); +int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); + int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, @@ -386,8 +179,7 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx) kref_put(&ctx->ref, i915_gem_context_release); } -void intel_context_init(struct intel_context *ce, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine); +struct i915_lut_handle *i915_lut_handle_alloc(void); +void i915_lut_handle_free(struct i915_lut_handle *lut); #endif /* !__I915_GEM_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h new file mode 100644 index 000000000000..e2ec58b10fb2 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_context_types.h @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_CONTEXT_TYPES_H__ +#define __I915_GEM_CONTEXT_TYPES_H__ + +#include <linux/atomic.h> +#include <linux/list.h> +#include <linux/llist.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/radix-tree.h> +#include <linux/rbtree.h> +#include <linux/rcupdate.h> +#include <linux/types.h> + +#include "i915_scheduler.h" +#include "intel_context_types.h" + +struct pid; + +struct drm_i915_private; +struct drm_i915_file_private; +struct i915_hw_ppgtt; +struct i915_timeline; +struct intel_ring; + +/** + * struct i915_gem_context - client state + * + * The struct i915_gem_context represents the combined view of the driver and + * logical hardware state for a particular client. + */ +struct i915_gem_context { + /** i915: i915 device backpointer */ + struct drm_i915_private *i915; + + /** file_priv: owning file descriptor */ + struct drm_i915_file_private *file_priv; + + struct i915_timeline *timeline; + + /** + * @ppgtt: unique address space (GTT) + * + * In full-ppgtt mode, each context has its own address space ensuring + * complete seperation of one client from all others. + * + * In other modes, this is a NULL pointer with the expectation that + * the caller uses the shared global GTT. + */ + struct i915_hw_ppgtt *ppgtt; + + /** + * @pid: process id of creator + * + * Note that who created the context may not be the principle user, + * as the context may be shared across a local socket. However, + * that should only affect the default context, all contexts created + * explicitly by the client are expected to be isolated. + */ + struct pid *pid; + + /** + * @name: arbitrary name + * + * A name is constructed for the context from the creator's process + * name, pid and user handle in order to uniquely identify the + * context in messages. + */ + const char *name; + + /** link: place with &drm_i915_private.context_list */ + struct list_head link; + struct llist_node free_link; + + /** + * @ref: reference count + * + * A reference to a context is held by both the client who created it + * and on each request submitted to the hardware using the request + * (to ensure the hardware has access to the state until it has + * finished all pending writes). See i915_gem_context_get() and + * i915_gem_context_put() for access. + */ + struct kref ref; + + /** + * @rcu: rcu_head for deferred freeing. + */ + struct rcu_head rcu; + + /** + * @user_flags: small set of booleans controlled by the user + */ + unsigned long user_flags; +#define UCONTEXT_NO_ZEROMAP 0 +#define UCONTEXT_NO_ERROR_CAPTURE 1 +#define UCONTEXT_BANNABLE 2 +#define UCONTEXT_RECOVERABLE 3 + + /** + * @flags: small set of booleans + */ + unsigned long flags; +#define CONTEXT_BANNED 0 +#define CONTEXT_CLOSED 1 +#define CONTEXT_FORCE_SINGLE_SUBMISSION 2 + + /** + * @hw_id: - unique identifier for the context + * + * The hardware needs to uniquely identify the context for a few + * functions like fault reporting, PASID, scheduling. The + * &drm_i915_private.context_hw_ida is used to assign a unqiue + * id for the lifetime of the context. + * + * @hw_id_pin_count: - number of times this context had been pinned + * for use (should be, at most, once per engine). + * + * @hw_id_link: - all contexts with an assigned id are tracked + * for possible repossession. + */ + unsigned int hw_id; + atomic_t hw_id_pin_count; + struct list_head hw_id_link; + + struct list_head active_engines; + struct mutex mutex; + + struct i915_sched_attr sched; + + /** hw_contexts: per-engine logical HW state */ + struct rb_root hw_contexts; + spinlock_t hw_contexts_lock; + + /** ring_size: size for allocating the per-engine ring buffer */ + u32 ring_size; + /** desc_template: invariant fields for the HW context descriptor */ + u32 desc_template; + + /** guilty_count: How many times this context has caused a GPU hang. */ + atomic_t guilty_count; + /** + * @active_count: How many times this context was active during a GPU + * hang, but did not cause it. + */ + atomic_t active_count; + + /** + * @hang_timestamp: The last time(s) this context caused a GPU hang + */ + unsigned long hang_timestamp[2]; +#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */ + + /** remap_slice: Bitmask of cache lines that need remapping */ + u8 remap_slice; + + /** handles_vma: rbtree to look up our context specific obj/vma for + * the user handle. (user handles are per fd, but the binding is + * per vm, which may be one per context or shared with the global GTT) + */ + struct radix_tree_root handles_vma; + + /** handles_list: reverse list of all the rbtree entries in use for + * this context, which allows us to free all the allocations on + * context close. + */ + struct list_head handles_list; +}; + +#endif /* __I915_GEM_CONTEXT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 02f7298bfe57..5a101a9462d8 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); } @@ -300,7 +301,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, get_dma_buf(dma_buf); - obj = i915_gem_object_alloc(to_i915(dev)); + obj = i915_gem_object_alloc(); if (obj == NULL) { ret = -ENOMEM; goto fail_detach; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 68d74c50ac39..060f5903544a 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -38,31 +38,21 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl { static bool ggtt_is_idle(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (i915->gt.active_requests) - return false; - - for_each_engine(engine, i915, id) { - if (!intel_engine_has_kernel_context(engine)) - return false; - } - - return true; + return !i915->gt.active_requests; } static int ggtt_flush(struct drm_i915_private *i915) { int err; - /* Not everything in the GGTT is tracked via vma (otherwise we + /* + * Not everything in the GGTT is tracked via vma (otherwise we * could evict as required with minimal stalling) so we are forced * to idle the GPU and explicitly retire outstanding requests in * the hopes that we can then remove contexts and the like only * bound by their active reference. */ - err = i915_gem_switch_to_kernel_context(i915); + err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines); if (err) return err; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 02adcaf6ebea..3d672c9edb94 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb) * keeping all of their resources pinned. */ - ce = to_intel_context(eb->ctx, eb->engine); - if (!ce->ring) /* first use, assume empty! */ + ce = intel_context_lookup(eb->ctx, eb->engine); + if (!ce || !ce->ring) /* first use, assume empty! */ return 0; rq = __eb_wait_for_ring(ce->ring); @@ -849,12 +849,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) } vma = i915_vma_instance(obj, eb->vm, NULL); - if (unlikely(IS_ERR(vma))) { + if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; } - lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL); + lut = i915_lut_handle_alloc(); if (unlikely(!lut)) { err = -ENOMEM; goto err_obj; @@ -862,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) err = radix_tree_insert(handles_vma, handle, vma); if (unlikely(err)) { - kmem_cache_free(eb->i915->luts, lut); + i915_lut_handle_free(lut); goto err_obj; } @@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache) { GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; + + __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size); i915_gem_object_unpin_map(cache->rq->batch->obj); + i915_gem_chipset_flush(cache->rq->i915); i915_request_add(cache->rq); @@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (IS_ERR(cmd)) return PTR_ERR(cmd); - err = i915_gem_object_set_to_wc_domain(obj, false); - if (err) - goto err_unmap; - batch = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); @@ -1957,7 +1956,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq) u32 *cs; int i; - if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) { + if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) { DRM_DEBUG("sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -2082,11 +2081,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, #define I915_USER_RINGS (4) static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = { - [I915_EXEC_DEFAULT] = RCS, - [I915_EXEC_RENDER] = RCS, - [I915_EXEC_BLT] = BCS, - [I915_EXEC_BSD] = VCS, - [I915_EXEC_VEBOX] = VECS + [I915_EXEC_DEFAULT] = RCS0, + [I915_EXEC_RENDER] = RCS0, + [I915_EXEC_BLT] = BCS0, + [I915_EXEC_BSD] = VCS0, + [I915_EXEC_VEBOX] = VECS0 }; static struct intel_engine_cs * @@ -2109,7 +2108,7 @@ eb_select_engine(struct drm_i915_private *dev_priv, return NULL; } - if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) { + if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) { unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK; if (bsd_idx == I915_EXEC_BSD_DEFAULT) { @@ -2312,10 +2311,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (args->flags & I915_EXEC_IS_PINNED) eb.batch_flags |= I915_DISPATCH_PINNED; - eb.engine = eb_select_engine(eb.i915, file, args); - if (!eb.engine) - return -EINVAL; - if (args->flags & I915_EXEC_FENCE_IN) { in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2)); if (!in_fence) @@ -2340,6 +2335,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, if (unlikely(err)) goto err_destroy; + eb.engine = eb_select_engine(eb.i915, file, args); + if (!eb.engine) { + err = -EINVAL; + goto err_engine; + } + /* * Take a local wakeref for preparing to dispatch the execbuf as * we expect to access the hardware fairly frequently in the @@ -2505,6 +2506,7 @@ err_unlock: mutex_unlock(&dev->struct_mutex); err_rpm: intel_runtime_pm_put(eb.i915, wakeref); +err_engine: i915_gem_context_put(eb.ctx); err_destroy: eb_destroy(&eb); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index e037e94792f3..3084f52e3372 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -210,6 +210,7 @@ static int fence_update(struct drm_i915_fence_reg *fence, struct i915_vma *vma) { intel_wakeref_t wakeref; + struct i915_vma *old; int ret; if (vma) { @@ -229,49 +230,55 @@ static int fence_update(struct drm_i915_fence_reg *fence, return ret; } - if (fence->vma) { - struct i915_vma *old = fence->vma; - + old = xchg(&fence->vma, NULL); + if (old) { ret = i915_active_request_retire(&old->last_fence, &old->obj->base.dev->struct_mutex); - if (ret) + if (ret) { + fence->vma = old; return ret; + } i915_vma_flush_writes(old); - } - if (fence->vma && fence->vma != vma) { - /* Ensure that all userspace CPU access is completed before + /* + * Ensure that all userspace CPU access is completed before * stealing the fence. */ - GEM_BUG_ON(fence->vma->fence != fence); - i915_vma_revoke_mmap(fence->vma); - - fence->vma->fence = NULL; - fence->vma = NULL; + if (old != vma) { + GEM_BUG_ON(old->fence != fence); + i915_vma_revoke_mmap(old); + old->fence = NULL; + } list_move(&fence->link, &fence->i915->mm.fence_list); } - /* We only need to update the register itself if the device is awake. + /* + * We only need to update the register itself if the device is awake. * If the device is currently powered down, we will defer the write * to the runtime resume, see i915_gem_restore_fences(). + * + * This only works for removing the fence register, on acquisition + * the caller must hold the rpm wakeref. The fence register must + * be cleared before we can use any other fences to ensure that + * the new fences do not overlap the elided clears, confusing HW. */ wakeref = intel_runtime_pm_get_if_in_use(fence->i915); - if (wakeref) { - fence_write(fence, vma); - intel_runtime_pm_put(fence->i915, wakeref); + if (!wakeref) { + GEM_BUG_ON(vma); + return 0; } - if (vma) { - if (fence->vma != vma) { - vma->fence = fence; - fence->vma = vma; - } + WRITE_ONCE(fence->vma, vma); + fence_write(fence, vma); + if (vma) { + vma->fence = fence; list_move_tail(&fence->link, &fence->i915->mm.fence_list); } + intel_runtime_pm_put(fence->i915, wakeref); return 0; } @@ -436,32 +443,6 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence) } /** - * i915_gem_revoke_fences - revoke fence state - * @dev_priv: i915 device private - * - * Removes all GTT mmappings via the fence registers. This forces any user - * of the fence to reacquire that fence before continuing with their access. - * One use is during GPU reset where the fence register is lost and we need to - * revoke concurrent userspace access via GTT mmaps until the hardware has been - * reset and the fence registers have been restored. - */ -void i915_gem_revoke_fences(struct drm_i915_private *dev_priv) -{ - int i; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); - - for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i]; - - GEM_BUG_ON(fence->vma && fence->vma->fence != fence); - - if (fence->vma) - i915_vma_revoke_mmap(fence->vma); - } -} - -/** * i915_gem_restore_fences - restore fence state * @dev_priv: i915 device private * @@ -473,9 +454,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) { int i; + rcu_read_lock(); /* keep obj alive as we dereference */ for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - struct i915_vma *vma = reg->vma; + struct i915_vma *vma = READ_ONCE(reg->vma); GEM_BUG_ON(vma && vma->fence != reg); @@ -483,18 +465,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv) * Commit delayed tiling changes if we have an object still * attached to the fence, otherwise just clear the fence. */ - if (vma && !i915_gem_object_is_tiled(vma->obj)) { - GEM_BUG_ON(!reg->dirty); - GEM_BUG_ON(i915_vma_has_userfault(vma)); - - list_move(®->link, &dev_priv->mm.fence_list); - vma->fence = NULL; + if (vma && !i915_gem_object_is_tiled(vma->obj)) vma = NULL; - } fence_write(reg, vma); - reg->vma = vma; } + rcu_read_unlock(); } /** @@ -609,8 +585,38 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) */ swizzle_x = I915_BIT_6_SWIZZLE_NONE; swizzle_y = I915_BIT_6_SWIZZLE_NONE; - } else if (IS_MOBILE(dev_priv) || - IS_I915G(dev_priv) || IS_I945G(dev_priv)) { + } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) { + /* The 965, G33, and newer, have a very flexible memory + * configuration. It will enable dual-channel mode + * (interleaving) on as much memory as it can, and the GPU + * will additionally sometimes enable different bit 6 + * swizzling for tiled objects from the CPU. + * + * Here's what I found on the G965: + * slot fill memory size swizzling + * 0A 0B 1A 1B 1-ch 2-ch + * 512 0 0 0 512 0 O + * 512 0 512 0 16 1008 X + * 512 0 0 512 16 1008 X + * 0 512 0 512 16 1008 X + * 1024 1024 1024 0 2048 1024 O + * + * We could probably detect this based on either the DRB + * matching, which was the case for the swizzling required in + * the table above, or from the 1-ch value being less than + * the minimum size of a rank. + * + * Reports indicate that the swizzling actually + * varies depending upon page placement inside the + * channels, i.e. we see swizzled pages where the + * banks of memory are paired and unswizzled on the + * uneven portion, so leave that as unknown. + */ + if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } + } else { u32 dcc; /* On 9xx chipsets, channel interleave by the CPU is @@ -660,37 +666,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv) swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } - } else { - /* The 965, G33, and newer, have a very flexible memory - * configuration. It will enable dual-channel mode - * (interleaving) on as much memory as it can, and the GPU - * will additionally sometimes enable different bit 6 - * swizzling for tiled objects from the CPU. - * - * Here's what I found on the G965: - * slot fill memory size swizzling - * 0A 0B 1A 1B 1-ch 2-ch - * 512 0 0 0 512 0 O - * 512 0 512 0 16 1008 X - * 512 0 0 512 16 1008 X - * 0 512 0 512 16 1008 X - * 1024 1024 1024 0 2048 1024 O - * - * We could probably detect this based on either the DRB - * matching, which was the case for the swizzling required in - * the table above, or from the 1-ch value being less than - * the minimum size of a rank. - * - * Reports indicate that the swizzling actually - * varies depending upon page placement inside the - * channels, i.e. we see swizzled pages where the - * banks of memory are paired and unswizzled on the - * uneven portion, so leave that as unknown. - */ - if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { - swizzle_x = I915_BIT_6_SWIZZLE_9_10; - swizzle_y = I915_BIT_6_SWIZZLE_9; - } } if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || @@ -790,8 +765,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, int i; if (obj->bit_17 == NULL) { - obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), - sizeof(long), GFP_KERNEL); + obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL); if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index d646d37eec2f..736c845eb77f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) * for all. */ size = I915_GTT_PAGE_SIZE_4K; - if (i915_vm_is_48bit(vm) && + if (i915_vm_is_4lvl(vm) && HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) { size = I915_GTT_PAGE_SIZE_64K; gfp |= __GFP_NOWARN; @@ -613,7 +613,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) vm->scratch_page.page = page; vm->scratch_page.daddr = addr; - vm->scratch_page.order = order; + vm->scratch_order = order; return 0; unmap_page: @@ -632,10 +632,11 @@ skip: static void cleanup_scratch_page(struct i915_address_space *vm) { struct i915_page_dma *p = &vm->scratch_page; + int order = vm->scratch_order; - dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT, + dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT, PCI_DMA_BIDIRECTIONAL); - __free_pages(p->page, p->order); + __free_pages(p->page, order); } static struct i915_page_table *alloc_pt(struct i915_address_space *vm) @@ -726,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp) pdp->page_directory = NULL; } -static inline bool use_4lvl(const struct i915_address_space *vm) -{ - return i915_vm_is_48bit(vm); -} - static struct i915_page_directory_pointer * alloc_pdp(struct i915_address_space *vm) { struct i915_page_directory_pointer *pdp; int ret = -ENOMEM; - GEM_BUG_ON(!use_4lvl(vm)); + GEM_BUG_ON(!i915_vm_is_4lvl(vm)); pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); if (!pdp) @@ -766,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm, { __pdp_fini(pdp); - if (!use_4lvl(vm)) + if (!i915_vm_is_4lvl(vm)) return; cleanup_px(vm, pdp); @@ -791,14 +787,15 @@ static void gen8_initialize_pml4(struct i915_address_space *vm, memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); } -/* PDE TLBs are a pain to invalidate on GEN8+. When we modify +/* + * PDE TLBs are a pain to invalidate on GEN8+. When we modify * the page table structures, we mark them dirty so that * context switching/execlist queuing code takes extra steps * to ensure that tlbs are flushed. */ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) { - ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask; + ppgtt->pd_dirty_engines = ALL_ENGINES; } /* Removes entries from a single page table, releasing it if it's empty. @@ -809,8 +806,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, u64 start, u64 length) { unsigned int num_entries = gen8_pte_count(start, length); - unsigned int pte = gen8_pte_index(start); - unsigned int pte_end = pte + num_entries; gen8_pte_t *vaddr; GEM_BUG_ON(num_entries > pt->used_ptes); @@ -820,8 +815,7 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm, return true; vaddr = kmap_atomic_px(pt); - while (pte < pte_end) - vaddr[pte++] = vm->scratch_pte; + memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries); kunmap_atomic(vaddr); return false; @@ -872,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, gen8_ppgtt_pdpe_t *vaddr; pdp->page_directory[pdpe] = pd; - if (!use_4lvl(vm)) + if (!i915_vm_is_4lvl(vm)) return; vaddr = kmap_atomic_px(pdp); @@ -937,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, struct i915_page_directory_pointer *pdp; unsigned int pml4e; - GEM_BUG_ON(!use_4lvl(vm)); + GEM_BUG_ON(!i915_vm_is_4lvl(vm)); gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { GEM_BUG_ON(pdp == vm->scratch_pdp); @@ -1219,7 +1213,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) GEM_BUG_ON(!clone->has_read_only); - vm->scratch_page.order = clone->scratch_page.order; + vm->scratch_order = clone->scratch_order; vm->scratch_pte = clone->scratch_pte; vm->scratch_pt = clone->scratch_pt; vm->scratch_pd = clone->scratch_pd; @@ -1248,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) goto free_pt; } - if (use_4lvl(vm)) { + if (i915_vm_is_4lvl(vm)) { vm->scratch_pdp = alloc_pdp(vm); if (IS_ERR(vm->scratch_pdp)) { ret = PTR_ERR(vm->scratch_pdp); @@ -1258,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) gen8_initialize_pt(vm, vm->scratch_pt); gen8_initialize_pd(vm, vm->scratch_pd); - if (use_4lvl(vm)) + if (i915_vm_is_4lvl(vm)) gen8_initialize_pdp(vm, vm->scratch_pdp); return 0; @@ -1280,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) enum vgt_g2v_type msg; int i; - if (use_4lvl(vm)) { + if (i915_vm_is_4lvl(vm)) { const u64 daddr = px_dma(&ppgtt->pml4); I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); @@ -1310,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm) if (!vm->scratch_page.daddr) return; - if (use_4lvl(vm)) + if (i915_vm_is_4lvl(vm)) free_pdp(vm, vm->scratch_pdp); free_pd(vm, vm->scratch_pd); free_pt(vm, vm->scratch_pt); @@ -1356,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) if (intel_vgpu_active(dev_priv)) gen8_ppgtt_notify_vgt(ppgtt, false); - if (use_4lvl(vm)) + if (i915_vm_is_4lvl(vm)) gen8_ppgtt_cleanup_4lvl(ppgtt); else gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp); @@ -1519,6 +1513,23 @@ unwind: return -ENOMEM; } +static void ppgtt_init(struct drm_i915_private *i915, + struct i915_hw_ppgtt *ppgtt) +{ + kref_init(&ppgtt->ref); + + ppgtt->vm.i915 = i915; + ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size); + + i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); + + ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; + ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; + ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; + ppgtt->vm.vma_ops.clear_pages = clear_pages; +} + /* * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each @@ -1535,20 +1546,11 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - kref_init(&ppgtt->ref); - - ppgtt->vm.i915 = i915; - ppgtt->vm.dma = &i915->drm.pdev->dev; - - ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ? - 1ULL << 48 : - 1ULL << 32; + ppgtt_init(i915, ppgtt); /* From bdw, there is support for read-only pages in the PPGTT. */ ppgtt->vm.has_read_only = true; - i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); - /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ @@ -1559,7 +1561,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) if (err) goto err_free; - if (use_4lvl(&ppgtt->vm)) { + if (i915_vm_is_4lvl(&ppgtt->vm)) { err = setup_px(&ppgtt->vm, &ppgtt->pml4); if (err) goto err_scratch; @@ -1592,11 +1594,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) ppgtt->vm.cleanup = gen8_ppgtt_cleanup; - ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->vm.vma_ops.clear_pages = clear_pages; - return ppgtt; err_scratch: @@ -1672,8 +1669,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, while (num_entries) { struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++]; - const unsigned int end = min(pte + num_entries, GEN6_PTES); - const unsigned int count = end - pte; + const unsigned int count = min(num_entries, GEN6_PTES - pte); gen6_pte_t *vaddr; GEM_BUG_ON(pt == vm->scratch_pt); @@ -1693,9 +1689,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, */ vaddr = kmap_atomic_px(pt); - do { - vaddr[pte++] = scratch_pte; - } while (pte < end); + memset32(vaddr + pte, scratch_pte, count); kunmap_atomic(vaddr); pte = 0; @@ -1913,7 +1907,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size) GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(size > ggtt->vm.total); - vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL); + vma = i915_vma_alloc(); if (!vma) return ERR_PTR(-ENOMEM); @@ -1943,6 +1937,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base) struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); int err; + GEM_BUG_ON(ppgtt->base.vm.closed); + /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt * which will be pinned into every active context. @@ -1981,6 +1977,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base) i915_vma_unpin(ppgtt->vma); } +void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base) +{ + struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base); + + if (!ppgtt->pin_count) + return; + + ppgtt->pin_count = 0; + i915_vma_unpin(ppgtt->vma); +} + static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) { struct i915_ggtt * const ggtt = &i915->ggtt; @@ -1991,25 +1998,13 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); - kref_init(&ppgtt->base.ref); - - ppgtt->base.vm.i915 = i915; - ppgtt->base.vm.dma = &i915->drm.pdev->dev; - - ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE; - - i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT); + ppgtt_init(i915, &ppgtt->base); ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup; - ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma; - ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma; - ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages; - ppgtt->base.vm.vma_ops.clear_pages = clear_pages; - ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; err = gen6_ppgtt_init_scratch(ppgtt); @@ -2087,8 +2082,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915) } struct i915_hw_ppgtt * -i915_ppgtt_create(struct drm_i915_private *i915, - struct drm_i915_file_private *fpriv) +i915_ppgtt_create(struct drm_i915_private *i915) { struct i915_hw_ppgtt *ppgtt; @@ -2096,19 +2090,11 @@ i915_ppgtt_create(struct drm_i915_private *i915, if (IS_ERR(ppgtt)) return ppgtt; - ppgtt->vm.file = fpriv; - trace_i915_ppgtt_create(&ppgtt->vm); return ppgtt; } -void i915_ppgtt_close(struct i915_address_space *vm) -{ - GEM_BUG_ON(vm->closed); - vm->closed = true; -} - static void ppgtt_destroy_vma(struct i915_address_space *vm) { struct list_head *phases[] = { @@ -2675,7 +2661,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) struct i915_hw_ppgtt *ppgtt; int err; - ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM)); + ppgtt = i915_ppgtt_create(i915); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt); @@ -3701,7 +3687,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) } ret = 0; - if (unlikely(IS_ERR(vma->pages))) { + if (IS_ERR(vma->pages)) { ret = PTR_ERR(vma->pages); vma->pages = NULL; DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 03ade71b8d9a..83ded9fc761a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -213,7 +213,6 @@ struct i915_vma; struct i915_page_dma { struct page *page; - int order; union { dma_addr_t daddr; @@ -293,6 +292,7 @@ struct i915_address_space { #define VM_CLASS_PPGTT 1 u64 scratch_pte; + int scratch_order; struct i915_page_dma scratch_page; struct i915_page_table *scratch_pt; struct i915_page_directory *scratch_pd; @@ -348,7 +348,7 @@ struct i915_address_space { #define i915_is_ggtt(vm) ((vm)->is_ggtt) static inline bool -i915_vm_is_48bit(const struct i915_address_space *vm) +i915_vm_is_4lvl(const struct i915_address_space *vm) { return (vm->total - 1) >> 32; } @@ -356,7 +356,7 @@ i915_vm_is_48bit(const struct i915_address_space *vm) static inline bool i915_vm_has_scratch_64K(struct i915_address_space *vm) { - return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K); + return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); } /* The Graphics Translation Table is the way in which GEN hardware translates a @@ -390,12 +390,14 @@ struct i915_hw_ppgtt { struct i915_address_space vm; struct kref ref; - unsigned long pd_dirty_rings; + unsigned long pd_dirty_engines; union { struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */ struct i915_page_directory_pointer pdp; /* GEN8+ */ struct i915_page_directory pd; /* GEN6-7 */ }; + + u32 user_handle; }; struct gen6_hw_ppgtt { @@ -488,7 +490,7 @@ static inline u32 gen6_pde_index(u32 addr) static inline unsigned int i915_pdpes_per_pdp(const struct i915_address_space *vm) { - if (i915_vm_is_48bit(vm)) + if (i915_vm_is_4lvl(vm)) return GEN8_PML4ES_PER_PML4; return GEN8_3LVL_PDPES; @@ -603,15 +605,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv); int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv); + +struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); void i915_ppgtt_release(struct kref *kref); -struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv, - struct drm_i915_file_private *fpriv); -void i915_ppgtt_close(struct i915_address_space *vm); -static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) + +static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt) { - if (ppgtt) - kref_get(&ppgtt->ref); + kref_get(&ppgtt->ref); + return ppgtt; } + static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) { if (ppgtt) @@ -620,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt) int gen6_ppgtt_pin(struct i915_hw_ppgtt *base); void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base); +void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base); void i915_check_and_clear_faults(struct drm_i915_private *dev_priv); void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index fddde1033e74..ab627ed1269c 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -193,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c index aab8cdd80e6d..ac6a5ab84586 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.c +++ b/drivers/gpu/drm/i915/i915_gem_object.c @@ -24,6 +24,22 @@ #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_globals.h" + +static struct i915_global_object { + struct i915_global base; + struct kmem_cache *slab_objects; +} global; + +struct drm_i915_gem_object *i915_gem_object_alloc(void) +{ + return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL); +} + +void i915_gem_object_free(struct drm_i915_gem_object *obj) +{ + return kmem_cache_free(global.slab_objects, obj); +} /** * Mark up the object's coherency levels for a given cache_level @@ -46,3 +62,29 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, obj->cache_dirty = !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); } + +static void i915_global_objects_shrink(void) +{ + kmem_cache_shrink(global.slab_objects); +} + +static void i915_global_objects_exit(void) +{ + kmem_cache_destroy(global.slab_objects); +} + +static struct i915_global_object global = { { + .shrink = i915_global_objects_shrink, + .exit = i915_global_objects_exit, +} }; + +int __init i915_global_objects_init(void) +{ + global.slab_objects = + KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN); + if (!global.slab_objects) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index fab040331cdb..1a24dc97e4fd 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -304,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem) return container_of(gem, struct drm_i915_gem_object, base); } +struct drm_i915_gem_object *i915_gem_object_alloc(void); +void i915_gem_object_free(struct drm_i915_gem_object *obj); + /** * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle * @filp: DRM file private date @@ -500,4 +503,3 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); #endif - diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index 90baf9086d0a..9440024c763f 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -42,7 +42,7 @@ struct intel_render_state { static const struct intel_renderstate_rodata * render_state_get_rodata(const struct intel_engine_cs *engine) { - if (engine->id != RCS) + if (engine->id != RCS0) return NULL; switch (INTEL_GEN(engine->i915)) { @@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so, drm_clflush_virt_range(d, i * sizeof(u32)); kunmap_atomic(d); - ret = i915_gem_object_set_to_gtt_domain(so->obj, false); + ret = 0; out: i915_gem_obj_finish_shmem_access(so->obj); return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 74a9661479ca..0a8082cfc761 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *obj; unsigned int cache_level; - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return NULL; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 16cc9ddbce34..a9b5329dae3b 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -301,11 +301,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { if (!obj->bit_17) { - obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT), - sizeof(long), GFP_KERNEL); + obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT, + GFP_KERNEL); } } else { - kfree(obj->bit_17); + bitmap_free(obj->bit_17); obj->bit_17 = NULL; } diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 1d3f9a31ad61..ad0087127144 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -795,7 +795,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENODEV; } - obj = i915_gem_object_alloc(dev_priv); + obj = i915_gem_object_alloc(); if (obj == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c new file mode 100644 index 000000000000..2f5c72e2a9d1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_globals.c @@ -0,0 +1,135 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/slab.h> +#include <linux/workqueue.h> + +#include "i915_active.h" +#include "i915_gem_context.h" +#include "i915_gem_object.h" +#include "i915_globals.h" +#include "i915_request.h" +#include "i915_scheduler.h" +#include "i915_vma.h" + +static LIST_HEAD(globals); + +void __init i915_global_register(struct i915_global *global) +{ + GEM_BUG_ON(!global->shrink); + GEM_BUG_ON(!global->exit); + + list_add_tail(&global->link, &globals); +} + +static void __i915_globals_cleanup(void) +{ + struct i915_global *global, *next; + + list_for_each_entry_safe_reverse(global, next, &globals, link) + global->exit(); +} + +static __initconst int (* const initfn[])(void) = { + i915_global_active_init, + i915_global_context_init, + i915_global_gem_context_init, + i915_global_objects_init, + i915_global_request_init, + i915_global_scheduler_init, + i915_global_vma_init, +}; + +int __init i915_globals_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(initfn); i++) { + int err; + + err = initfn[i](); + if (err) { + __i915_globals_cleanup(); + return err; + } + } + + return 0; +} + +static void i915_globals_shrink(void) +{ + struct i915_global *global; + + /* + * kmem_cache_shrink() discards empty slabs and reorders partially + * filled slabs to prioritise allocating from the mostly full slabs, + * with the aim of reducing fragmentation. + */ + list_for_each_entry(global, &globals, link) + global->shrink(); +} + +static atomic_t active; +static atomic_t epoch; +struct park_work { + struct rcu_work work; + int epoch; +}; + +static void __i915_globals_park(struct work_struct *work) +{ + struct park_work *wrk = container_of(work, typeof(*wrk), work.work); + + /* Confirm nothing woke up in the last grace period */ + if (wrk->epoch == atomic_read(&epoch)) + i915_globals_shrink(); + + kfree(wrk); +} + +void i915_globals_park(void) +{ + struct park_work *wrk; + + /* + * Defer shrinking the global slab caches (and other work) until + * after a RCU grace period has completed with no activity. This + * is to try and reduce the latency impact on the consumers caused + * by us shrinking the caches the same time as they are trying to + * allocate, with the assumption being that if we idle long enough + * for an RCU grace period to elapse since the last use, it is likely + * to be longer until we need the caches again. + */ + if (!atomic_dec_and_test(&active)) + return; + + wrk = kmalloc(sizeof(*wrk), GFP_KERNEL); + if (!wrk) + return; + + wrk->epoch = atomic_inc_return(&epoch); + INIT_RCU_WORK(&wrk->work, __i915_globals_park); + queue_rcu_work(system_wq, &wrk->work); +} + +void i915_globals_unpark(void) +{ + atomic_inc(&epoch); + atomic_inc(&active); +} + +void __exit i915_globals_exit(void) +{ + /* Flush any residual park_work */ + rcu_barrier(); + flush_scheduled_work(); + + __i915_globals_cleanup(); + + /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */ + rcu_barrier(); +} diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h new file mode 100644 index 000000000000..04c1ce107fc0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_globals.h @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef _I915_GLOBALS_H_ +#define _I915_GLOBALS_H_ + +typedef void (*i915_global_func_t)(void); + +struct i915_global { + struct list_head link; + + i915_global_func_t shrink; + i915_global_func_t exit; +}; + +void i915_global_register(struct i915_global *global); + +int i915_globals_init(void); +void i915_globals_park(void); +void i915_globals_unpark(void); +void i915_globals_exit(void); + +/* constructors */ +int i915_global_active_init(void); +int i915_global_context_init(void); +int i915_global_gem_context_init(void); +int i915_global_objects_init(void); +int i915_global_request_init(void); +int i915_global_scheduler_init(void); +int i915_global_vma_init(void); + +#endif /* _I915_GLOBALS_H_ */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9a65341fec09..a2a98ccda421 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -380,19 +380,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, err_printf(m, "%s [%d]:\n", name, count); while (count--) { - err_printf(m, " %08x_%08x %8u %02x %02x %02x", + err_printf(m, " %08x_%08x %8u %02x %02x", upper_32_bits(err->gtt_offset), lower_32_bits(err->gtt_offset), err->size, err->read_domains, - err->write_domain, - err->wseqno); + err->write_domain); err_puts(m, tiling_flag(err->tiling)); err_puts(m, dirty_flag(err->dirty)); err_puts(m, purgeable_flag(err->purgeable)); err_puts(m, err->userptr ? " userptr" : ""); - err_puts(m, err->engine != -1 ? " " : ""); - err_puts(m, engine_name(m->i915, err->engine)); err_puts(m, i915_cache_level_str(m->i915, err->cache_level)); if (err->name) @@ -414,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, err_printf(m, " INSTDONE: 0x%08x\n", ee->instdone.instdone); - if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3) + if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3) return; err_printf(m, " SC_INSTDONE: 0x%08x\n", @@ -434,11 +431,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, ee->instdone.row[slice][subslice]); } -static const char *bannable(const struct drm_i915_error_context *ctx) -{ - return ctx->bannable ? "" : " (unbannable)"; -} - static void error_print_request(struct drm_i915_error_state_buf *m, const char *prefix, const struct drm_i915_error_request *erq, @@ -447,9 +439,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m, if (!erq->seqno) return; - err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", - prefix, erq->pid, erq->ban_score, - erq->context, erq->seqno, + err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n", + prefix, erq->pid, erq->context, erq->seqno, test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &erq->flags) ? "!" : "", test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, @@ -463,10 +454,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct drm_i915_error_context *ctx) { - err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n", - header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id, - ctx->sched_attr.priority, ctx->ban_score, bannable(ctx), - ctx->guilty, ctx->active); + err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n", + header, ctx->comm, ctx->pid, ctx->hw_id, + ctx->sched_attr.priority, ctx->guilty, ctx->active); } static void error_print_engine(struct drm_i915_error_state_buf *m, @@ -512,13 +502,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, if (INTEL_GEN(m->i915) >= 6) { err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi); err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg); - err_printf(m, " SYNC_0: 0x%08x\n", - ee->semaphore_mboxes[0]); - err_printf(m, " SYNC_1: 0x%08x\n", - ee->semaphore_mboxes[1]); - if (HAS_VEBOX(m->i915)) - err_printf(m, " SYNC_2: 0x%08x\n", - ee->semaphore_mboxes[2]); } if (HAS_PPGTT(m->i915)) { err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode); @@ -533,8 +516,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, ee->vm_info.pp_dir_base); } } - err_printf(m, " seqno: 0x%08x\n", ee->seqno); - err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno); err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n", @@ -688,12 +669,10 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (!error->engine[i].context.pid) continue; - err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n", + err_printf(m, "Active process (on ring %s): %s [%d]\n", engine_name(m->i915, i), error->engine[i].context.comm, - error->engine[i].context.pid, - error->engine[i].context.ban_score, - bannable(&error->engine[i].context)); + error->engine[i].context.pid); } err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Suspend count: %u\n", error->suspend_count); @@ -779,13 +758,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (obj) { err_puts(m, m->i915->engine[i]->name); if (ee->context.pid) - err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)", + err_printf(m, " (submitted by %s [%d])", ee->context.comm, - ee->context.pid, - ee->context.handle, - ee->context.hw_id, - ee->context.ban_score, - bannable(&ee->context)); + ee->context.pid); err_printf(m, " --- gtt_offset = 0x%08x %08x\n", upper_32_bits(obj->gtt_offset), lower_32_bits(obj->gtt_offset)); @@ -1061,27 +1036,6 @@ i915_error_object_create(struct drm_i915_private *i915, return dst; } -/* The error capture is special as tries to run underneath the normal - * locking rules - so we use the raw version of the i915_active_request lookup. - */ -static inline u32 -__active_get_seqno(struct i915_active_request *active) -{ - struct i915_request *request; - - request = __i915_active_request_peek(active); - return request ? request->global_seqno : 0; -} - -static inline int -__active_get_engine_id(struct i915_active_request *active) -{ - struct i915_request *request; - - request = __i915_active_request_peek(active); - return request ? request->engine->id : -1; -} - static void capture_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma) { @@ -1090,9 +1044,6 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->size = obj->base.size; err->name = obj->base.name; - err->wseqno = __active_get_seqno(&obj->frontbuffer_write); - err->engine = __active_get_engine_id(&obj->frontbuffer_write); - err->gtt_offset = vma->node.start; err->read_domains = obj->read_domains; err->write_domain = obj->write_domain; @@ -1178,18 +1129,6 @@ static void gem_record_fences(struct i915_gpu_state *error) error->nfence = i; } -static void gen6_record_semaphore_state(struct intel_engine_cs *engine, - struct drm_i915_error_engine *ee) -{ - struct drm_i915_private *dev_priv = engine->i915; - - ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base)); - ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base)); - if (HAS_VEBOX(dev_priv)) - ee->semaphore_mboxes[2] = - I915_READ(RING_SYNC_2(engine->mmio_base)); -} - static void error_record_engine_registers(struct i915_gpu_state *error, struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) @@ -1197,44 +1136,40 @@ static void error_record_engine_registers(struct i915_gpu_state *error, struct drm_i915_private *dev_priv = engine->i915; if (INTEL_GEN(dev_priv) >= 6) { - ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); - if (INTEL_GEN(dev_priv) >= 8) { + ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL); + if (INTEL_GEN(dev_priv) >= 8) ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG); - } else { - gen6_record_semaphore_state(engine, ee); + else ee->fault_reg = I915_READ(RING_FAULT_REG(engine)); - } } if (INTEL_GEN(dev_priv) >= 4) { - ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); - ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); - ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - ee->instps = I915_READ(RING_INSTPS(engine->mmio_base)); - ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); + ee->faddr = ENGINE_READ(engine, RING_DMA_FADD); + ee->ipeir = ENGINE_READ(engine, RING_IPEIR); + ee->ipehr = ENGINE_READ(engine, RING_IPEHR); + ee->instps = ENGINE_READ(engine, RING_INSTPS); + ee->bbaddr = ENGINE_READ(engine, RING_BBADDR); if (INTEL_GEN(dev_priv) >= 8) { - ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; - ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; + ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32; + ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32; } - ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base)); + ee->bbstate = ENGINE_READ(engine, RING_BBSTATE); } else { - ee->faddr = I915_READ(DMA_FADD_I8XX); - ee->ipeir = I915_READ(IPEIR); - ee->ipehr = I915_READ(IPEHR); + ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX); + ee->ipeir = ENGINE_READ(engine, IPEIR); + ee->ipehr = ENGINE_READ(engine, IPEHR); } intel_engine_get_instdone(engine, &ee->instdone); - ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); + ee->instpm = ENGINE_READ(engine, RING_INSTPM); ee->acthd = intel_engine_get_active_head(engine); - ee->seqno = intel_engine_get_seqno(engine); - ee->last_seqno = intel_engine_last_submit(engine); - ee->start = I915_READ_START(engine); - ee->head = I915_READ_HEAD(engine); - ee->tail = I915_READ_TAIL(engine); - ee->ctl = I915_READ_CTL(engine); + ee->start = ENGINE_READ(engine, RING_START); + ee->head = ENGINE_READ(engine, RING_HEAD); + ee->tail = ENGINE_READ(engine, RING_TAIL); + ee->ctl = ENGINE_READ(engine, RING_CTL); if (INTEL_GEN(dev_priv) > 2) - ee->mode = I915_READ_MODE(engine); + ee->mode = ENGINE_READ(engine, RING_MI_MODE); if (!HWS_NEEDS_PHYSICAL(dev_priv)) { i915_reg_t mmio; @@ -1242,16 +1177,17 @@ static void error_record_engine_registers(struct i915_gpu_state *error, if (IS_GEN(dev_priv, 7)) { switch (engine->id) { default: - case RCS: + MISSING_CASE(engine->id); + case RCS0: mmio = RENDER_HWS_PGA_GEN7; break; - case BCS: + case BCS0: mmio = BLT_HWS_PGA_GEN7; break; - case VCS: + case VCS0: mmio = BSD_HWS_PGA_GEN7; break; - case VECS: + case VECS0: mmio = VEBOX_HWS_PGA_GEN7; break; } @@ -1278,10 +1214,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error, if (IS_GEN(dev_priv, 6)) ee->vm_info.pp_dir_base = - I915_READ(RING_PP_DIR_BASE_READ(engine)); + ENGINE_READ(engine, RING_PP_DIR_BASE_READ); else if (IS_GEN(dev_priv, 7)) ee->vm_info.pp_dir_base = - I915_READ(RING_PP_DIR_BASE(engine)); + ENGINE_READ(engine, RING_PP_DIR_BASE); else if (INTEL_GEN(dev_priv) >= 8) for (i = 0; i < 4; i++) { ee->vm_info.pdp[i] = @@ -1299,10 +1235,9 @@ static void record_request(struct i915_request *request, struct i915_gem_context *ctx = request->gem_context; erq->flags = request->fence.flags; - erq->context = ctx->hw_id; + erq->context = request->fence.context; + erq->seqno = request->fence.seqno; erq->sched_attr = request->sched.attr; - erq->ban_score = atomic_read(&ctx->ban_score); - erq->seqno = request->global_seqno; erq->jiffies = request->emitted_jiffies; erq->start = i915_ggtt_offset(request->ring->vma); erq->head = request->head; @@ -1393,11 +1328,8 @@ static void record_context(struct drm_i915_error_context *e, rcu_read_unlock(); } - e->handle = ctx->user_handle; e->hw_id = ctx->hw_id; e->sched_attr = ctx->sched; - e->ban_score = atomic_read(&ctx->ban_score); - e->bannable = i915_gem_context_is_bannable(ctx); e->guilty = atomic_read(&ctx->guilty_count); e->active = atomic_read(&ctx->active_count); } @@ -1476,7 +1408,7 @@ static void gem_record_rings(struct i915_gpu_state *error) error_record_engine_registers(error, engine, ee); error_record_engine_execlists(engine, ee); - request = i915_gem_find_active_request(engine); + request = intel_engine_find_active_request(engine); if (request) { struct i915_gem_context *ctx = request->gem_context; struct intel_ring *ring; @@ -1669,7 +1601,7 @@ static void capture_reg_state(struct i915_gpu_state *error) } if (INTEL_GEN(dev_priv) >= 5) - error->ccid = I915_READ(CCID); + error->ccid = I915_READ(CCID(RENDER_RING_BASE)); /* 3: Feature specific registers */ if (IS_GEN_RANGE(dev_priv, 6, 7)) { @@ -1721,7 +1653,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg) i915_error_generate_code(error, engines)); if (engines) { /* Just show the first executing process, more is confusing */ - i = ffs(engines); + i = __ffs(engines); len += scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", in %s [%d]", diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 53b1f22dd365..302a14240b45 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -94,8 +94,6 @@ struct i915_gpu_state { u32 cpu_ring_head; u32 cpu_ring_tail; - u32 last_seqno; - /* Register state */ u32 start; u32 tail; @@ -108,24 +106,19 @@ struct i915_gpu_state { u32 bbstate; u32 instpm; u32 instps; - u32 seqno; u64 bbaddr; u64 acthd; u32 fault_reg; u64 faddr; u32 rc_psmi; /* sleep state */ - u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; struct intel_instdone instdone; struct drm_i915_error_context { char comm[TASK_COMM_LEN]; pid_t pid; - u32 handle; u32 hw_id; - int ban_score; int active; int guilty; - bool bannable; struct i915_sched_attr sched_attr; } context; @@ -149,7 +142,6 @@ struct i915_gpu_state { long jiffies; pid_t pid; u32 context; - int ban_score; u32 seqno; u32 start; u32 head; @@ -170,7 +162,6 @@ struct i915_gpu_state { struct drm_i915_error_buffer { u32 size; u32 name; - u32 wseqno; u64 gtt_offset; u32 read_domains; u32 write_domain; @@ -179,7 +170,6 @@ struct i915_gpu_state { u32 dirty:1; u32 purgeable:1; u32 userptr:1; - s32 engine:4; u32 cache_level:3; } *active_bo[I915_NUM_ENGINES], *pinned_bo; u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count; @@ -205,38 +195,12 @@ struct i915_gpu_error { atomic_t pending_fb_pin; /** - * State variable controlling the reset flow and count - * - * This is a counter which gets incremented when reset is triggered, - * - * Before the reset commences, the I915_RESET_BACKOFF bit is set - * meaning that any waiters holding onto the struct_mutex should - * relinquish the lock immediately in order for the reset to start. - * - * If reset is not completed successfully, the I915_WEDGE bit is - * set meaning that hardware is terminally sour and there is no - * recovery. All waiters on the reset_queue will be woken when - * that happens. - * - * This counter is used by the wait_seqno code to notice that reset - * event happened and it needs to restart the entire ioctl (since most - * likely the seqno it waited for won't ever signal anytime soon). - * - * This is important for lock-free wait paths, where no contended lock - * naturally enforces the correct ordering between the bail-out of the - * waiter and the gpu reset work code. - */ - unsigned long reset_count; - - /** * flags: Control various stages of the GPU reset * - * #I915_RESET_BACKOFF - When we start a reset, we want to stop any - * other users acquiring the struct_mutex. To do this we set the - * #I915_RESET_BACKOFF bit in the error flags when we detect a reset - * and then check for that bit before acquiring the struct_mutex (in - * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a - * secondary role in preventing two concurrent global reset attempts. + * #I915_RESET_BACKOFF - When we start a global reset, we need to + * serialise with any other users attempting to do the same, and + * any global resources that may be clobber by the reset (such as + * FENCE registers). * * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to * acquire the struct_mutex to reset an engine, we need an explicit @@ -255,6 +219,9 @@ struct i915_gpu_error { #define I915_RESET_ENGINE 2 #define I915_WEDGED (BITS_PER_LONG - 1) + /** Number of times the device has been reset (global) */ + u32 reset_count; + /** Number of times an engine has been reset */ u32 reset_engine_count[I915_NUM_ENGINES]; @@ -272,6 +239,8 @@ struct i915_gpu_error { */ wait_queue_head_t reset_queue; + struct srcu_struct reset_backoff_srcu; + struct i915_gpu_restart *restart; }; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 441d2674b272..455b2bf691b5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -30,6 +30,7 @@ #include <linux/sysrq.h> #include <linux/slab.h> +#include <linux/cpuidle.h> #include <linux/circ_buf.h> #include <drm/drm_irq.h> #include <drm/drm_drv.h> @@ -268,7 +269,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; u32 dw; lockdep_assert_held(&i915->irq_lock); @@ -748,13 +749,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, POSTING_READ(reg); } +static bool i915_has_asle(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->opregion.asle) + return false; + + return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); +} + /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion * @dev_priv: i915 device private */ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) { - if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) + if (!i915_has_asle(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); @@ -1288,6 +1297,18 @@ static void gen6_pm_rps_work(struct work_struct *work) rps->last_adj = adj; + /* + * Limit deboosting and boosting to keep ourselves at the extremes + * when in the respective power modes (i.e. slowly decrease frequencies + * while in the HIGH_POWER zone and slowly increase frequencies while + * in the LOW_POWER zone). On idle, we will hit the timeout and drop + * to the next level quickly, and conversely if busy we expect to + * hit a waitboost and rapidly switch into max power. + */ + if ((adj < 0 && rps->power.mode == HIGH_POWER) || + (adj > 0 && rps->power.mode == LOW_POWER)) + rps->last_adj = 0; + /* sysfs frequency interfaces may have snuck in while servicing the * interrupt */ @@ -1415,20 +1436,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (gt_iir & ILK_BSD_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); } static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) { if (gt_iir & GT_RENDER_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (gt_iir & GT_BSD_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); if (gt_iir & GT_BLT_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]); if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | @@ -1459,12 +1480,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) static void gen8_gt_irq_ack(struct drm_i915_private *i915, u32 master_ctl, u32 gt_iir[4]) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ GEN8_GT_BCS_IRQ | \ + GEN8_GT_VCS0_IRQ | \ GEN8_GT_VCS1_IRQ | \ - GEN8_GT_VCS2_IRQ | \ GEN8_GT_VECS_IRQ | \ GEN8_GT_PM_IRQ | \ GEN8_GT_GUC_IRQ) @@ -1475,7 +1496,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915, raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); } - if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { + if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); if (likely(gt_iir[1])) raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); @@ -1498,21 +1519,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915, u32 master_ctl, u32 gt_iir[4]) { if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { - gen8_cs_irq_handler(i915->engine[RCS], + gen8_cs_irq_handler(i915->engine[RCS0], gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); - gen8_cs_irq_handler(i915->engine[BCS], + gen8_cs_irq_handler(i915->engine[BCS0], gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); } - if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { - gen8_cs_irq_handler(i915->engine[VCS], + if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) { + gen8_cs_irq_handler(i915->engine[VCS0], + gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT); + gen8_cs_irq_handler(i915->engine[VCS1], gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); - gen8_cs_irq_handler(i915->engine[VCS2], - gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); } if (master_ctl & GEN8_GT_VECS_IRQ) { - gen8_cs_irq_handler(i915->engine[VECS], + gen8_cs_irq_handler(i915->engine[VECS0], gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); } @@ -1693,7 +1714,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - u32 crcs[5]; + u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; + + trace_intel_pipe_crc(crtc, crcs); spin_lock(&pipe_crc->lock); /* @@ -1712,11 +1735,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, } spin_unlock(&pipe_crc->lock); - crcs[0] = crc0; - crcs[1] = crc1; - crcs[2] = crc2; - crcs[3] = crc3; - crcs[4] = crc4; drm_crtc_add_crc_entry(&crtc->base, true, drm_crtc_accurate_vblank_count(&crtc->base), crcs); @@ -1792,13 +1810,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (INTEL_GEN(dev_priv) >= 8) return; - if (HAS_VEBOX(dev_priv)) { - if (pm_iir & PM_VEBOX_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]); + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]); - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); - } + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); } static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) @@ -2667,6 +2683,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); } +static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) +{ + u32 mask = GEN8_AUX_CHANNEL_A; + + if (INTEL_GEN(dev_priv) >= 9) + mask |= GEN9_AUX_CHANNEL_B | + GEN9_AUX_CHANNEL_C | + GEN9_AUX_CHANNEL_D; + + if (IS_CNL_WITH_PORT_F(dev_priv)) + mask |= CNL_AUX_CHANNEL_F; + + if (INTEL_GEN(dev_priv) >= 11) + mask |= ICL_AUX_CHANNEL_E | + CNL_AUX_CHANNEL_F; + + return mask; +} + static irqreturn_t gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) { @@ -2722,20 +2757,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(GEN8_DE_PORT_IIR, iir); ret = IRQ_HANDLED; - tmp_mask = GEN8_AUX_CHANNEL_A; - if (INTEL_GEN(dev_priv) >= 9) - tmp_mask |= GEN9_AUX_CHANNEL_B | - GEN9_AUX_CHANNEL_C | - GEN9_AUX_CHANNEL_D; - - if (INTEL_GEN(dev_priv) >= 11) - tmp_mask |= ICL_AUX_CHANNEL_E; - - if (IS_CNL_WITH_PORT_F(dev_priv) || - INTEL_GEN(dev_priv) >= 11) - tmp_mask |= CNL_AUX_CHANNEL_F; - - if (iir & tmp_mask) { + if (iir & gen8_de_port_aux_mask(dev_priv)) { dp_aux_irq_handler(dev_priv); found = true; } @@ -2816,11 +2838,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_handler(dev_priv, iir); - else if (HAS_PCH_SPT(dev_priv) || - HAS_PCH_KBP(dev_priv) || - HAS_PCH_CNP(dev_priv)) + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) spt_irq_handler(dev_priv, iir); else cpt_irq_handler(dev_priv, iir); @@ -2857,7 +2877,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs) static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = to_i915(arg); - void __iomem * const regs = dev_priv->regs; + void __iomem * const regs = dev_priv->uncore.regs; u32 master_ctl; u32 gt_iir[4]; @@ -2891,7 +2911,7 @@ static u32 gen11_gt_engine_identity(struct drm_i915_private * const i915, const unsigned int bank, const unsigned int bit) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; u32 timeout_ts; u32 ident; @@ -2975,7 +2995,7 @@ static void gen11_gt_bank_handler(struct drm_i915_private * const i915, const unsigned int bank) { - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; unsigned long intr_dw; unsigned int bit; @@ -3018,7 +3038,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915, static u32 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) { - void __iomem * const regs = dev_priv->regs; + void __iomem * const regs = dev_priv->uncore.regs; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -3059,7 +3079,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = to_i915(arg); - void __iomem * const regs = i915->regs; + void __iomem * const regs = i915->uncore.regs; u32 master_ctl; u32 gu_misc_iir; @@ -3112,6 +3132,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) return 0; } +static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + if (dev_priv->i945gm_vblank.enabled++ == 0) + schedule_work(&dev_priv->i945gm_vblank.work); + + return i8xx_enable_vblank(dev, pipe); +} + static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -3176,6 +3206,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } +static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + i8xx_disable_vblank(dev, pipe); + + if (--dev_priv->i945gm_vblank.enabled == 0) + schedule_work(&dev_priv->i945gm_vblank.work); +} + static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -3209,6 +3249,60 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } +static void i945gm_vblank_work_func(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, struct drm_i915_private, i945gm_vblank.work); + + /* + * Vblank interrupts fail to wake up the device from C3, + * hence we want to prevent C3 usage while vblank interrupts + * are enabled. + */ + pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos, + READ_ONCE(dev_priv->i945gm_vblank.enabled) ? + dev_priv->i945gm_vblank.c3_disable_latency : + PM_QOS_DEFAULT_VALUE); +} + +static int cstate_disable_latency(const char *name) +{ + const struct cpuidle_driver *drv; + int i; + + drv = cpuidle_get_driver(); + if (!drv) + return 0; + + for (i = 0; i < drv->state_count; i++) { + const struct cpuidle_state *state = &drv->states[i]; + + if (!strcmp(state->name, name)) + return state->exit_latency ? + state->exit_latency - 1 : 0; + } + + return 0; +} + +static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv) +{ + INIT_WORK(&dev_priv->i945gm_vblank.work, + i945gm_vblank_work_func); + + dev_priv->i945gm_vblank.c3_disable_latency = + cstate_disable_latency("C3"); + pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos, + PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); +} + +static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv) +{ + cancel_work_sync(&dev_priv->i945gm_vblank.work); + pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos); +} + static void ibx_irq_reset(struct drm_i915_private *dev_priv) { if (HAS_PCH_NOP(dev_priv)) @@ -3340,7 +3434,7 @@ static void gen8_irq_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); int pipe; - gen8_master_intr_disable(dev_priv->regs); + gen8_master_intr_disable(dev_priv->uncore.regs); gen8_gt_irq_reset(dev_priv); @@ -3382,7 +3476,7 @@ static void gen11_irq_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - gen11_master_intr_disable(dev_priv->regs); + gen11_master_intr_disable(dev_priv->uncore.regs); gen11_gt_irq_reset(dev_priv); @@ -3402,7 +3496,7 @@ static void gen11_irq_reset(struct drm_device *dev) GEN3_IRQ_RESET(GEN11_GU_MISC_); GEN3_IRQ_RESET(GEN8_PCU_); - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) GEN3_IRQ_RESET(SDE); } @@ -3583,7 +3677,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) gen11_hpd_detection_setup(dev_priv); - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_hpd_irq_setup(dev_priv); } @@ -3767,7 +3861,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) * RPS interrupts will get enabled/disabled on demand when RPS * itself is enabled/disabled. */ - if (HAS_VEBOX(dev_priv)) { + if (HAS_ENGINE(dev_priv, VECS0)) { pm_irqs |= PM_VEBOX_USER_INTERRUPT; dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; } @@ -3879,18 +3973,21 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) { /* These are interrupts we'll toggle with the ring mask register */ u32 gt_interrupts[] = { - GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, - GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | - GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, + (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT), + + (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT | + GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT), + 0, - GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | - GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT - }; + + (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | + GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT) + }; dev_priv->pm_ier = 0x0; dev_priv->pm_imr = ~dev_priv->pm_ier; @@ -3984,7 +4081,7 @@ static int gen8_irq_postinstall(struct drm_device *dev) if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_postinstall(dev); - gen8_master_intr_enable(dev_priv->regs); + gen8_master_intr_enable(dev_priv->uncore.regs); return 0; } @@ -4036,7 +4133,7 @@ static int gen11_irq_postinstall(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 gu_misc_masked = GEN11_GU_MISC_GSE; - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_postinstall(dev); gen11_gt_irq_postinstall(dev_priv); @@ -4046,7 +4143,7 @@ static int gen11_irq_postinstall(struct drm_device *dev) I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - gen11_master_intr_enable(dev_priv->regs); + gen11_master_intr_enable(dev_priv->uncore.regs); POSTING_READ(GEN11_GFX_MSTR_IRQ); return 0; @@ -4218,7 +4315,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) I915_WRITE16(IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i8xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4326,7 +4423,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4471,10 +4568,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_WRITE(IIR, iir); if (iir & I915_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]); if (iir & I915_BSD_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]); + intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); @@ -4503,6 +4600,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) struct intel_rps *rps = &dev_priv->gt_pm.rps; int i; + if (IS_I945GM(dev_priv)) + i945gm_vblank_work_init(dev_priv); + intel_hpd_init_work(dev_priv); INIT_WORK(&rps->work, gen6_pm_rps_work); @@ -4542,13 +4642,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) else if (INTEL_GEN(dev_priv) >= 3) dev->driver->get_vblank_counter = i915_get_vblank_counter; - /* - * Opt out of the vblank disable timer on everything except gen2. - * Gen2 doesn't have a hardware frame counter and so depends on - * vblank interrupts to produce sane vblank seuquence numbers. - */ - if (!IS_GEN(dev_priv, 2)) - dev->vblank_disable_immediate = true; + dev->vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on * power domain. vlv/chv can disable it at runtime and need @@ -4605,8 +4699,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->disable_vblank = gen8_disable_vblank; if (IS_GEN9_LP(dev_priv)) dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; - else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || - HAS_PCH_CNP(dev_priv)) + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; else dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; @@ -4626,6 +4719,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_uninstall = i8xx_irq_reset; dev->driver->enable_vblank = i8xx_enable_vblank; dev->driver->disable_vblank = i8xx_disable_vblank; + } else if (IS_I945GM(dev_priv)) { + dev->driver->irq_preinstall = i915_irq_reset; + dev->driver->irq_postinstall = i915_irq_postinstall; + dev->driver->irq_uninstall = i915_irq_reset; + dev->driver->irq_handler = i915_irq_handler; + dev->driver->enable_vblank = i945gm_enable_vblank; + dev->driver->disable_vblank = i945gm_disable_vblank; } else if (IS_GEN(dev_priv, 3)) { dev->driver->irq_preinstall = i915_irq_reset; dev->driver->irq_postinstall = i915_irq_postinstall; @@ -4656,6 +4756,9 @@ void intel_irq_fini(struct drm_i915_private *i915) { int i; + if (IS_I945GM(i915)) + i945gm_vblank_work_fini(i915); + for (i = 0; i < MAX_L3_SLICES; ++i) kfree(i915->l3_parity.remap_info[i]); } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 66f82f3f050f..a7e1611af26d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -28,14 +28,44 @@ #include <drm/drm_drv.h> -#include "i915_active.h" #include "i915_drv.h" +#include "i915_globals.h" #include "i915_selftest.h" #define PLATFORM(x) .platform = (x), .platform_mask = BIT(x) #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1) -#define GEN_DEFAULT_PIPEOFFSETS \ +#define I845_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + } + +#define I9XX_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + } + +#define IVB_PIPE_OFFSETS \ + .pipe_offsets = { \ + [TRANSCODER_A] = PIPE_A_OFFSET, \ + [TRANSCODER_B] = PIPE_B_OFFSET, \ + [TRANSCODER_C] = PIPE_C_OFFSET, \ + }, \ + .trans_offsets = { \ + [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ + [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ + [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ + } + +#define HSW_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -49,7 +79,7 @@ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \ } -#define GEN_CHV_PIPEOFFSETS \ +#define CHV_PIPE_OFFSETS \ .pipe_offsets = { \ [TRANSCODER_A] = PIPE_A_OFFSET, \ [TRANSCODER_B] = PIPE_B_OFFSET, \ @@ -61,11 +91,30 @@ [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \ } -#define CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } +#define I845_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + } + +#define I9XX_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = CURSOR_B_OFFSET, \ + } + +#define CHV_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = CURSOR_B_OFFSET, \ + [PIPE_C] = CHV_CURSOR_C_OFFSET, \ + } #define IVB_CURSOR_OFFSETS \ - .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = IVB_CURSOR_B_OFFSET, \ + [PIPE_C] = IVB_CURSOR_C_OFFSET, \ + } #define BDW_COLORS \ .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } @@ -75,7 +124,7 @@ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ } #define GLK_COLORS \ - .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \ + .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ DRM_COLOR_LUT_EQUAL_CHANNELS, \ } @@ -85,7 +134,25 @@ #define GEN_DEFAULT_PAGE_SIZES \ .page_sizes = I915_GTT_PAGE_SIZE_4K -#define GEN2_FEATURES \ +#define I830_FEATURES \ + GEN(2), \ + .is_mobile = 1, \ + .num_pipes = 2, \ + .display.has_overlay = 1, \ + .display.cursor_needs_physical = 1, \ + .display.overlay_needs_physical = 1, \ + .display.has_gmch = 1, \ + .gpu_reset_clobbers_display = true, \ + .hws_needs_physical = 1, \ + .unfenced_needs_alignment = 1, \ + .engine_mask = BIT(RCS0), \ + .has_snoop = true, \ + .has_coherent_ggtt = false, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + GEN_DEFAULT_PAGE_SIZES + +#define I845_FEATURES \ GEN(2), \ .num_pipes = 1, \ .display.has_overlay = 1, \ @@ -94,37 +161,31 @@ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .ring_mask = RENDER_RING, \ + .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I845_PIPE_OFFSETS, \ + I845_CURSOR_OFFSETS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_i830_info = { - GEN2_FEATURES, + I830_FEATURES, PLATFORM(INTEL_I830), - .is_mobile = 1, - .display.cursor_needs_physical = 1, - .num_pipes = 2, /* legal, last one wins */ }; static const struct intel_device_info intel_i845g_info = { - GEN2_FEATURES, + I845_FEATURES, PLATFORM(INTEL_I845G), }; static const struct intel_device_info intel_i85x_info = { - GEN2_FEATURES, + I830_FEATURES, PLATFORM(INTEL_I85X), - .is_mobile = 1, - .num_pipes = 2, /* legal, last one wins */ - .display.cursor_needs_physical = 1, .display.has_fbc = 1, }; static const struct intel_device_info intel_i865g_info = { - GEN2_FEATURES, + I845_FEATURES, PLATFORM(INTEL_I865G), }; @@ -133,12 +194,12 @@ static const struct intel_device_info intel_i865g_info = { .num_pipes = 2, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .ring_mask = RENDER_RING, \ + .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, @@ -210,12 +271,12 @@ static const struct intel_device_info intel_pineview_info = { .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .ring_mask = RENDER_RING, \ + .engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_i965g_info = { GEN4_FEATURES, @@ -239,7 +300,7 @@ static const struct intel_device_info intel_i965gm_info = { static const struct intel_device_info intel_g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), - .ring_mask = RENDER_RING | BSD_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -249,7 +310,7 @@ static const struct intel_device_info intel_gm45_info = { .is_mobile = 1, .display.has_fbc = 1, .display.supports_tv = 1, - .ring_mask = RENDER_RING | BSD_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -257,14 +318,14 @@ static const struct intel_device_info intel_gm45_info = { GEN(5), \ .num_pipes = 2, \ .display.has_hotplug = 1, \ - .ring_mask = RENDER_RING | BSD_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ /* ilk does support rc6, but we do not implement [power] contexts */ \ .has_rc6 = 0, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + GEN_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_ironlake_d_info = { GEN5_FEATURES, @@ -283,15 +344,16 @@ static const struct intel_device_info intel_ironlake_m_info = { .num_pipes = 2, \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .ppgtt = INTEL_PPGTT_ALIASING, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - CURSOR_OFFSETS + .ppgtt_type = INTEL_PPGTT_ALIASING, \ + .ppgtt_size = 31, \ + I9XX_PIPE_OFFSETS, \ + I9XX_CURSOR_OFFSETS, \ + GEN_DEFAULT_PAGE_SIZES #define SNB_D_PLATFORM \ GEN6_FEATURES, \ @@ -328,15 +390,16 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { .num_pipes = 3, \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .ppgtt = INTEL_PPGTT_FULL, \ - GEN_DEFAULT_PIPEOFFSETS, \ - GEN_DEFAULT_PAGE_SIZES, \ - IVB_CURSOR_OFFSETS + .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_size = 31, \ + IVB_PIPE_OFFSETS, \ + IVB_CURSOR_OFFSETS, \ + GEN_DEFAULT_PAGE_SIZES #define IVB_D_PLATFORM \ GEN7_FEATURES, \ @@ -386,24 +449,26 @@ static const struct intel_device_info intel_valleyview_info = { .has_rc6 = 1, .display.has_gmch = 1, .display.has_hotplug = 1, - .ppgtt = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), .display_mmio_offset = VLV_DISPLAY_BASE, + I9XX_PIPE_OFFSETS, + I9XX_CURSOR_OFFSETS, GEN_DEFAULT_PAGE_SIZES, - GEN_DEFAULT_PIPEOFFSETS, - CURSOR_OFFSETS }; #define G75_FEATURES \ GEN7_FEATURES, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ .display.has_psr = 1, \ .display.has_dp_mst = 1, \ .has_rc6p = 0 /* RC6p removed-by HSW */, \ + HSW_PIPE_OFFSETS, \ .has_runtime_pm = 1 #define HSW_PLATFORM \ @@ -433,7 +498,8 @@ static const struct intel_device_info intel_haswell_gt3_info = { .page_sizes = I915_GTT_PAGE_SIZE_4K | \ I915_GTT_PAGE_SIZE_2M, \ .has_logical_ring_contexts = 1, \ - .ppgtt = INTEL_PPGTT_FULL_4LVL, \ + .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_size = 48, \ .has_64bit_reloc = 1, \ .has_reset_engine = 1 @@ -462,7 +528,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = { static const struct intel_device_info intel_broadwell_gt3_info = { BDW_PLATFORM, .gt = 3, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .engine_mask = + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; static const struct intel_device_info intel_cherryview_info = { @@ -471,21 +538,22 @@ static const struct intel_device_info intel_cherryview_info = { .num_pipes = 3, .display.has_hotplug = 1, .is_lp = 1, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, .has_logical_ring_contexts = 1, .display.has_gmch = 1, - .ppgtt = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_size = 32, .has_reset_engine = 1, .has_snoop = true, .has_coherent_ggtt = false, .display_mmio_offset = VLV_DISPLAY_BASE, - GEN_DEFAULT_PAGE_SIZES, - GEN_CHV_PIPEOFFSETS, - CURSOR_OFFSETS, + CHV_PIPE_OFFSETS, + CHV_CURSOR_OFFSETS, CHV_COLORS, + GEN_DEFAULT_PAGE_SIZES, }; #define GEN9_DEFAULT_PAGE_SIZES \ @@ -521,7 +589,8 @@ static const struct intel_device_info intel_skylake_gt2_info = { #define SKL_GT3_PLUS_PLATFORM \ SKL_PLATFORM, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING + .engine_mask = \ + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) static const struct intel_device_info intel_skylake_gt3_info = { @@ -538,7 +607,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { GEN(9), \ .is_lp = 1, \ .display.has_hotplug = 1, \ - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ + .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .num_pipes = 3, \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ @@ -552,15 +621,16 @@ static const struct intel_device_info intel_skylake_gt4_info = { .has_logical_ring_contexts = 1, \ .has_logical_ring_preemption = 1, \ .has_guc = 1, \ - .ppgtt = INTEL_PPGTT_FULL_4LVL, \ + .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_size = 48, \ .has_reset_engine = 1, \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .display.has_ipc = 1, \ - GEN9_DEFAULT_PAGE_SIZES, \ - GEN_DEFAULT_PIPEOFFSETS, \ + HSW_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ - BDW_COLORS + BDW_COLORS, \ + GEN9_DEFAULT_PAGE_SIZES static const struct intel_device_info intel_broxton_info = { GEN9_LP_FEATURES, @@ -592,7 +662,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = { static const struct intel_device_info intel_kabylake_gt3_info = { KBL_PLATFORM, .gt = 3, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .engine_mask = + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; #define CFL_PLATFORM \ @@ -612,7 +683,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = { static const struct intel_device_info intel_coffeelake_gt3_info = { CFL_PLATFORM, .gt = 3, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .engine_mask = + BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; #define GEN10_FEATURES \ @@ -648,13 +720,22 @@ static const struct intel_device_info intel_cannonlake_info = { }, \ GEN(11), \ .ddb_size = 2048, \ - .has_logical_ring_elsq = 1 + .has_logical_ring_elsq = 1, \ + .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 } static const struct intel_device_info intel_icelake_11_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), + .engine_mask = + BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), +}; + +static const struct intel_device_info intel_elkhartlake_info = { + GEN11_FEATURES, + PLATFORM(INTEL_ELKHARTLAKE), .is_alpha_support = 1, - .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING, + .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0), + .ppgtt_size = 36, }; #undef GEN @@ -722,8 +803,11 @@ static const struct pci_device_id pciidlist[] = { INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), + INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info), INTEL_CNL_IDS(&intel_cannonlake_info), INTEL_ICL_11_IDS(&intel_icelake_11_info), + INTEL_EHL_IDS(&intel_elkhartlake_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -801,7 +885,9 @@ static int __init i915_init(void) bool use_kms = true; int err; - i915_global_active_init(); + err = i915_globals_init(); + if (err) + return err; err = i915_mock_selftests(); if (err) @@ -834,7 +920,7 @@ static void __exit i915_exit(void) return; pci_unregister_driver(&i915_pci_driver); - i915_global_active_exit(); + i915_globals_exit(); } module_init(i915_init); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 9ebf99f3d8d3..39a4804091d7 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream, static struct intel_context *oa_pin_context(struct drm_i915_private *i915, struct i915_gem_context *ctx) { - struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_engine_cs *engine = i915->engine[RCS0]; struct intel_context *ce; int ret; @@ -1364,7 +1364,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(dev_priv); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_runtime_pm_put(dev_priv, stream->wakeref); if (stream->ctx) @@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv) goto unlock; } - ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC); - if (ret) - goto err_unref; + i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); /* PreHSW required 512K alignment, HSW requires 16M */ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0); @@ -1629,13 +1627,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv) * It's fine to put out-of-date values into these per-context registers * in the case that the OA unit has been disabled. */ -static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, - u32 *reg_state, - const struct i915_oa_config *oa_config) +static void +gen8_update_reg_state_unlocked(struct intel_context *ce, + u32 *reg_state, + const struct i915_oa_config *oa_config) { - struct drm_i915_private *dev_priv = ctx->i915; - u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; - u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; + struct drm_i915_private *i915 = ce->gem_context->i915; + u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset; + u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset; /* The MMIO offsets for Flex EU registers aren't contiguous */ i915_reg_t flex_regs[] = { EU_PERF_CNTL0, @@ -1649,8 +1648,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, int i; CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, - (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | - (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | + (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) | GEN8_OA_COUNTER_RESUME); for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { @@ -1678,10 +1677,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, CTX_REG(reg_state, state_offset, flex_regs[i], value); } - CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - gen8_make_rpcs(dev_priv, - &to_intel_context(ctx, - dev_priv->engine[RCS])->sseu)); + CTX_REG(reg_state, + CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, + gen8_make_rpcs(i915, &ce->sseu)); } /* @@ -1711,7 +1709,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx, static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, const struct i915_oa_config *oa_config) { - struct intel_engine_cs *engine = dev_priv->engine[RCS]; + struct intel_engine_cs *engine = dev_priv->engine[RCS0]; unsigned int map_type = i915_coherent_map_type(dev_priv); struct i915_gem_context *ctx; struct i915_request *rq; @@ -1740,11 +1738,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, /* Update all contexts now that we've stalled the submission. */ list_for_each_entry(ctx, &dev_priv->contexts.list, link) { - struct intel_context *ce = to_intel_context(ctx, engine); + struct intel_context *ce = intel_context_lookup(ctx, engine); u32 *regs; /* OA settings will be set upon first use */ - if (!ce->state) + if (!ce || !ce->state) continue; regs = i915_gem_object_pin_map(ce->state->obj, map_type); @@ -1754,7 +1752,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv, ce->state->obj->mm.dirty = true; regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs); - gen8_update_reg_state_unlocked(ctx, regs, oa_config); + gen8_update_reg_state_unlocked(ce, regs, oa_config); i915_gem_object_unpin_map(ce->state->obj); } @@ -1922,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) static void gen7_oa_disable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = &stream->dev_priv->uncore; - I915_WRITE(GEN7_OACONTROL, 0); - if (intel_wait_for_register(dev_priv, + intel_uncore_write(uncore, GEN7_OACONTROL, 0); + if (intel_wait_for_register(uncore, GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0, 50)) DRM_ERROR("wait for OA to be disabled timed out\n"); @@ -1933,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream) static void gen8_oa_disable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = &stream->dev_priv->uncore; - I915_WRITE(GEN8_OACONTROL, 0); - if (intel_wait_for_register(dev_priv, + intel_uncore_write(uncore, GEN8_OACONTROL, 0); + if (intel_wait_for_register(uncore, GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0, 50)) DRM_ERROR("wait for OA to be disabled timed out\n"); @@ -2093,7 +2091,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * references will effectively disable RC6. */ stream->wakeref = intel_runtime_pm_get(dev_priv); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = alloc_oa_buffer(dev_priv); if (ret) @@ -2127,7 +2125,7 @@ err_lock: err_oa_buf_alloc: put_oa_config(dev_priv, stream->oa_config); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_runtime_pm_put(dev_priv, stream->wakeref); err_config: @@ -2138,17 +2136,17 @@ err_config: } void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - u32 *reg_state) + struct intel_context *ce, + u32 *regs) { struct i915_perf_stream *stream; - if (engine->id != RCS) + if (engine->class != RENDER_CLASS) return; stream = engine->i915->perf.oa.exclusive_stream; if (stream) - gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config); + gen8_update_reg_state_unlocked(ce, regs, stream->oa_config); } /** @@ -2881,12 +2879,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv) sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr); - if (IS_HASWELL(dev_priv)) { - i915_perf_load_test_config_hsw(dev_priv); - } else if (IS_BROADWELL(dev_priv)) { - i915_perf_load_test_config_bdw(dev_priv); - } else if (IS_CHERRYVIEW(dev_priv)) { - i915_perf_load_test_config_chv(dev_priv); + if (INTEL_GEN(dev_priv) >= 11) { + i915_perf_load_test_config_icl(dev_priv); + } else if (IS_CANNONLAKE(dev_priv)) { + i915_perf_load_test_config_cnl(dev_priv); + } else if (IS_COFFEELAKE(dev_priv)) { + if (IS_CFL_GT2(dev_priv)) + i915_perf_load_test_config_cflgt2(dev_priv); + if (IS_CFL_GT3(dev_priv)) + i915_perf_load_test_config_cflgt3(dev_priv); + } else if (IS_GEMINILAKE(dev_priv)) { + i915_perf_load_test_config_glk(dev_priv); + } else if (IS_KABYLAKE(dev_priv)) { + if (IS_KBL_GT2(dev_priv)) + i915_perf_load_test_config_kblgt2(dev_priv); + else if (IS_KBL_GT3(dev_priv)) + i915_perf_load_test_config_kblgt3(dev_priv); + } else if (IS_BROXTON(dev_priv)) { + i915_perf_load_test_config_bxt(dev_priv); } else if (IS_SKYLAKE(dev_priv)) { if (IS_SKL_GT2(dev_priv)) i915_perf_load_test_config_sklgt2(dev_priv); @@ -2894,25 +2904,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv) i915_perf_load_test_config_sklgt3(dev_priv); else if (IS_SKL_GT4(dev_priv)) i915_perf_load_test_config_sklgt4(dev_priv); - } else if (IS_BROXTON(dev_priv)) { - i915_perf_load_test_config_bxt(dev_priv); - } else if (IS_KABYLAKE(dev_priv)) { - if (IS_KBL_GT2(dev_priv)) - i915_perf_load_test_config_kblgt2(dev_priv); - else if (IS_KBL_GT3(dev_priv)) - i915_perf_load_test_config_kblgt3(dev_priv); - } else if (IS_GEMINILAKE(dev_priv)) { - i915_perf_load_test_config_glk(dev_priv); - } else if (IS_COFFEELAKE(dev_priv)) { - if (IS_CFL_GT2(dev_priv)) - i915_perf_load_test_config_cflgt2(dev_priv); - if (IS_CFL_GT3(dev_priv)) - i915_perf_load_test_config_cflgt3(dev_priv); - } else if (IS_CANNONLAKE(dev_priv)) { - i915_perf_load_test_config_cnl(dev_priv); - } else if (IS_ICELAKE(dev_priv)) { - i915_perf_load_test_config_icl(dev_priv); - } + } else if (IS_CHERRYVIEW(dev_priv)) { + i915_perf_load_test_config_chv(dev_priv); + } else if (IS_BROADWELL(dev_priv)) { + i915_perf_load_test_config_bdw(dev_priv); + } else if (IS_HASWELL(dev_priv)) { + i915_perf_load_test_config_hsw(dev_priv); +} if (dev_priv->perf.oa.test_config.id == 0) goto sysfs_error; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 13d70b90dd0f..46a52da3db29 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -5,6 +5,7 @@ */ #include <linux/irq.h> +#include <linux/pm_runtime.h> #include "i915_pmu.h" #include "intel_ringbuffer.h" #include "i915_drv.h" @@ -101,7 +102,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) * * Use RCS as proxy for all engines. */ - else if (intel_engine_supports_stats(i915->engine[RCS])) + else if (intel_engine_supports_stats(i915->engine[RCS0])) enable &= ~BIT(I915_SAMPLE_BUSY); /* @@ -148,14 +149,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) spin_unlock_irq(&i915->pmu.lock); } -static bool grab_forcewake(struct drm_i915_private *i915, bool fw) -{ - if (!fw) - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); - - return true; -} - static void add_sample(struct i915_pmu_sample *sample, u32 val) { @@ -168,49 +161,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; - bool fw = false; + unsigned long flags; if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) return; - if (!dev_priv->gt.awake) - return; - - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = 0; + if (READ_ONCE(dev_priv->gt.awake)) + wakeref = intel_runtime_pm_get_if_in_use(dev_priv); if (!wakeref) return; + spin_lock_irqsave(&dev_priv->uncore.lock, flags); for_each_engine(engine, dev_priv, id) { - u32 current_seqno = intel_engine_get_seqno(engine); - u32 last_seqno = intel_engine_last_submit(engine); + struct intel_engine_pmu *pmu = &engine->pmu; + bool busy; u32 val; - val = !i915_seqno_passed(current_seqno, last_seqno); - - if (val) - add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY], - period_ns); - - if (val && (engine->pmu.enable & - (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) { - fw = grab_forcewake(dev_priv, fw); - - val = I915_READ_FW(RING_CTL(engine->mmio_base)); - } else { - val = 0; - } + val = I915_READ_FW(RING_CTL(engine->mmio_base)); + if (val == 0) /* powerwell off => engine idle */ + continue; if (val & RING_WAIT) - add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT], - period_ns); - + add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); if (val & RING_WAIT_SEMAPHORE) - add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA], - period_ns); - } + add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); - if (fw) - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + /* + * While waiting on a semaphore or event, MI_MODE reports the + * ring as idle. However, previously using the seqno, and with + * execlists sampling, we account for the ring waiting as the + * engine being busy. Therefore, we record the sample as being + * busy if either waiting or !idle. + */ + busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); + if (!busy) { + val = I915_READ_FW(RING_MI_MODE(engine->mmio_base)); + busy = !(val & MODE_IDLE); + } + if (busy) + add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); + } + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); intel_runtime_pm_put(dev_priv, wakeref); } @@ -483,7 +475,6 @@ static u64 get_rc6(struct drm_i915_private *i915) * counter value. */ spin_lock_irqsave(&i915->pmu.lock, flags); - spin_lock(&kdev->power.lock); /* * After the above branch intel_runtime_pm_get_if_in_use failed @@ -496,16 +487,13 @@ static u64 get_rc6(struct drm_i915_private *i915) * suspended and if not we cannot do better than report the last * known RC6 value. */ - if (kdev->power.runtime_status == RPM_SUSPENDED) { - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + if (pm_runtime_status_suspended(kdev)) { + val = pm_runtime_suspended_time(kdev); - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_time_last = val; - val = jiffies_to_nsecs(val); + val -= i915->pmu.suspended_time_last; val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; @@ -515,7 +503,6 @@ static u64 get_rc6(struct drm_i915_private *i915) val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; } - spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index b3728c5f13e7..4fc4f2478301 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -97,9 +97,9 @@ struct i915_pmu { */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; /** - * @suspended_jiffies_last: Cached suspend time from PM core. + * @suspended_time_last: Cached suspend time from PM core. */ - unsigned long suspended_jiffies_last; + u64 suspended_time_last; /** * @i915_attr: Memory block holding device attributes. */ diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index eeaa3d506d95..969e514916ab 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -52,7 +52,7 @@ enum vgt_g2v_type { /* * VGT capabilities type */ -#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2) +#define VGT_CAPS_FULL_PPGTT BIT(2) #define VGT_CAPS_HWSP_EMULATION BIT(3) #define VGT_CAPS_HUGE_GTT BIT(4) diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index cbcb957b7141..782183b78f49 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -10,12 +10,34 @@ #include "i915_query.h" #include <uapi/drm/i915_drm.h> +static int copy_query_item(void *query_hdr, size_t query_sz, + u32 total_length, + struct drm_i915_query_item *query_item) +{ + if (query_item->length == 0) + return total_length; + + if (query_item->length < total_length) + return -EINVAL; + + if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr), + query_sz)) + return -EFAULT; + + if (!access_ok(u64_to_user_ptr(query_item->data_ptr), + total_length)) + return -EFAULT; + + return 0; +} + static int query_topology_info(struct drm_i915_private *dev_priv, struct drm_i915_query_item *query_item) { const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; + int ret; if (query_item->flags != 0) return -EINVAL; @@ -33,23 +55,14 @@ static int query_topology_info(struct drm_i915_private *dev_priv, total_length = sizeof(topo) + slice_length + subslice_length + eu_length; - if (query_item->length == 0) - return total_length; - - if (query_item->length < total_length) - return -EINVAL; - - if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr), - sizeof(topo))) - return -EFAULT; + ret = copy_query_item(&topo, sizeof(topo), total_length, + query_item); + if (ret != 0) + return ret; if (topo.flags != 0) return -EINVAL; - if (!access_ok(u64_to_user_ptr(query_item->data_ptr), - total_length)) - return -EFAULT; - memset(&topo, 0, sizeof(topo)); topo.max_slices = sseu->max_slices; topo.max_subslices = sseu->max_subslices; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cf80c5d8af34..c866379a521b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -25,6 +25,9 @@ #ifndef _I915_REG_H_ #define _I915_REG_H_ +#include <linux/bitfield.h> +#include <linux/bits.h> + /** * DOC: The i915 register macro definition style guide * @@ -59,15 +62,13 @@ * significant to least significant bit. Indent the register content macros * using two extra spaces between ``#define`` and the macro name. * - * For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field - * contents so that they are already shifted in place, and can be directly - * OR'd. For convenience, function-like macros may be used to define bit fields, - * but do note that the macros may be needed to read as well as write the - * register contents. + * Define bit fields using ``REG_GENMASK(h, l)``. Define bit field contents + * using ``REG_FIELD_PREP(mask, value)``. This will define the values already + * shifted in place, so they can be directly OR'd together. For convenience, + * function-like macros may be used to define bit fields, but do note that the + * macros may be needed to read as well as write the register contents. * - * Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in - * the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix - * to the name. + * Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name. * * Group the register and its contents together without blank lines, separate * from other registers and their contents with one blank line. @@ -105,17 +106,78 @@ * #define _FOO_A 0xf000 * #define _FOO_B 0xf001 * #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B) - * #define FOO_ENABLE (1 << 31) - * #define FOO_MODE_MASK (0xf << 16) - * #define FOO_MODE_SHIFT 16 - * #define FOO_MODE_BAR (0 << 16) - * #define FOO_MODE_BAZ (1 << 16) - * #define FOO_MODE_QUX_SNB (2 << 16) + * #define FOO_ENABLE REG_BIT(31) + * #define FOO_MODE_MASK REG_GENMASK(19, 16) + * #define FOO_MODE_BAR REG_FIELD_PREP(FOO_MODE_MASK, 0) + * #define FOO_MODE_BAZ REG_FIELD_PREP(FOO_MODE_MASK, 1) + * #define FOO_MODE_QUX_SNB REG_FIELD_PREP(FOO_MODE_MASK, 2) * * #define BAR _MMIO(0xb000) * #define GEN8_BAR _MMIO(0xb888) */ +/** + * REG_BIT() - Prepare a u32 bit value + * @__n: 0-based bit number + * + * Local wrapper for BIT() to force u32, with compile time checks. + * + * @return: Value with bit @__n set. + */ +#define REG_BIT(__n) \ + ((u32)(BIT(__n) + \ + BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \ + ((__n) < 0 || (__n) > 31)))) + +/** + * REG_GENMASK() - Prepare a continuous u32 bitmask + * @__high: 0-based high bit + * @__low: 0-based low bit + * + * Local wrapper for GENMASK() to force u32, with compile time checks. + * + * @return: Continuous bitmask from @__high to @__low, inclusive. + */ +#define REG_GENMASK(__high, __low) \ + ((u32)(GENMASK(__high, __low) + \ + BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \ + __builtin_constant_p(__low) && \ + ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) + +/* + * Local integer constant expression version of is_power_of_2(). + */ +#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0)) + +/** + * REG_FIELD_PREP() - Prepare a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to put in the field + + * Local copy of FIELD_PREP() to generate an integer constant expression, force + * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). + * + * @return: @__val masked and shifted into the field defined by @__mask. + */ +#define REG_FIELD_PREP(__mask, __val) \ + ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ + BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ + BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \ + BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ + BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) + +/** + * REG_FIELD_GET() - Extract a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to extract the bitfield value from + * + * Local wrapper for FIELD_GET() to force u32 and for consistency with + * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). + * + * @return: Masked and shifted value of the field defined by @__mask in @__val. + */ +#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val)) + typedef struct { u32 reg; } i915_reg_t; @@ -210,14 +272,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* Engine ID */ -#define RCS_HW 0 -#define VCS_HW 1 -#define BCS_HW 2 -#define VECS_HW 3 -#define VCS2_HW 4 -#define VCS3_HW 6 -#define VCS4_HW 7 -#define VECS2_HW 12 +#define RCS0_HW 0 +#define VCS0_HW 1 +#define BCS0_HW 2 +#define VECS0_HW 3 +#define VCS1_HW 4 +#define VCS2_HW 6 +#define VCS3_HW 7 +#define VECS1_HW 12 /* Engine class */ @@ -372,9 +434,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014) #define GEN11_VECS_SFC_USAGE_BIT (1 << 0) -#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228) -#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518) -#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220) +#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) +#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) +#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) #define PP_DIR_DCLV_2G 0xffffffff #define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4) @@ -1044,7 +1106,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) /* See configdb bunit SB addr map */ #define BUNIT_REG_BISOC 0x11 -#define PUNIT_REG_DSPFREQ 0x36 +/* PUNIT_REG_*SSPM0 */ +#define _SSPM0_SSC(val) ((val) << 0) +#define SSPM0_SSC_MASK _SSPM0_SSC(0x3) +#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0) +#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1) +#define SSPM0_SSC_RESET _SSPM0_SSC(0x2) +#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3) +#define _SSPM0_SSS(val) ((val) << 24) +#define SSPM0_SSS_MASK _SSPM0_SSS(0x3) +#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0) +#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1) +#define SSPM0_SSS_RESET _SSPM0_SSS(0x2) +#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3) + +/* PUNIT_REG_*SSPM1 */ +#define SSPM1_FREQSTAT_SHIFT 24 +#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT) +#define SSPM1_FREQGUAR_SHIFT 8 +#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT) +#define SSPM1_FREQ_SHIFT 0 +#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT) + +#define PUNIT_REG_VEDSSPM0 0x32 +#define PUNIT_REG_VEDSSPM1 0x33 + +#define PUNIT_REG_DSPSSPM 0x36 #define DSPFREQSTAT_SHIFT_CHV 24 #define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) #define DSPFREQGUAR_SHIFT_CHV 8 @@ -1069,6 +1156,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe)) #define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe)) +#define PUNIT_REG_ISPSSPM0 0x39 +#define PUNIT_REG_ISPSSPM1 0x3a + /* * i915_power_well_id: * @@ -1860,13 +1950,13 @@ enum i915_power_well_id { #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) -#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ +#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ _CNL_PORT_TX_DW4_LN0_AE))) #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) #define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port)) #define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port)) -#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) +#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port)) #define LOADGEN_SELECT (1 << 31) #define POST_CURSOR_1(x) ((x) << 12) #define POST_CURSOR_1_MASK (0x3F << 12) @@ -1893,11 +1983,11 @@ enum i915_power_well_id { #define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) #define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) #define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) -#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) +#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) -#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \ +#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \ _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) #define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C @@ -1908,8 +1998,8 @@ enum i915_power_well_id { #define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C #define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C #define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C -#define MG_TX1_LINK_PARAMS(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ +#define MG_TX1_LINK_PARAMS(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ MG_TX_LINK_PARAMS_TX1LN1_PORT1) @@ -1921,8 +2011,8 @@ enum i915_power_well_id { #define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC #define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC #define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC -#define MG_TX2_LINK_PARAMS(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ +#define MG_TX2_LINK_PARAMS(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ MG_TX_LINK_PARAMS_TX2LN1_PORT1) #define CRI_USE_FS32 (1 << 5) @@ -1935,8 +2025,8 @@ enum i915_power_well_id { #define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C #define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C #define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C -#define MG_TX1_PISO_READLOAD(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ +#define MG_TX1_PISO_READLOAD(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ MG_TX_PISO_READLOAD_TX1LN1_PORT1) @@ -1948,8 +2038,8 @@ enum i915_power_well_id { #define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC #define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC #define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC -#define MG_TX2_PISO_READLOAD(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ +#define MG_TX2_PISO_READLOAD(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ MG_TX_PISO_READLOAD_TX2LN1_PORT1) #define CRI_CALCINIT (1 << 1) @@ -1962,8 +2052,8 @@ enum i915_power_well_id { #define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 #define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 #define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 -#define MG_TX1_SWINGCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ +#define MG_TX1_SWINGCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ MG_TX_SWINGCTRL_TX1LN0_PORT2, \ MG_TX_SWINGCTRL_TX1LN1_PORT1) @@ -1975,8 +2065,8 @@ enum i915_power_well_id { #define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 #define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 #define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 -#define MG_TX2_SWINGCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ +#define MG_TX2_SWINGCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ MG_TX_SWINGCTRL_TX2LN0_PORT2, \ MG_TX_SWINGCTRL_TX2LN1_PORT1) #define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) @@ -1990,8 +2080,8 @@ enum i915_power_well_id { #define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 #define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 #define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 -#define MG_TX1_DRVCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ +#define MG_TX1_DRVCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ MG_TX_DRVCTRL_TX1LN1_TXPORT1) @@ -2003,8 +2093,8 @@ enum i915_power_well_id { #define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 #define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 #define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 -#define MG_TX2_DRVCTRL(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \ +#define MG_TX2_DRVCTRL(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ MG_TX_DRVCTRL_TX2LN0_PORT2, \ MG_TX_DRVCTRL_TX2LN1_PORT1) #define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) @@ -2023,8 +2113,8 @@ enum i915_power_well_id { #define MG_CLKHUB_LN1_PORT3 0x16A79C #define MG_CLKHUB_LN0_PORT4 0x16B39C #define MG_CLKHUB_LN1_PORT4 0x16B79C -#define MG_CLKHUB(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \ +#define MG_CLKHUB(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \ MG_CLKHUB_LN0_PORT2, \ MG_CLKHUB_LN1_PORT1) #define CFG_LOW_RATE_LKREN_EN (1 << 11) @@ -2037,8 +2127,8 @@ enum i915_power_well_id { #define MG_TX_DCC_TX1LN1_PORT3 0x16A510 #define MG_TX_DCC_TX1LN0_PORT4 0x16B110 #define MG_TX_DCC_TX1LN1_PORT4 0x16B510 -#define MG_TX1_DCC(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \ +#define MG_TX1_DCC(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \ MG_TX_DCC_TX1LN0_PORT2, \ MG_TX_DCC_TX1LN1_PORT1) #define MG_TX_DCC_TX2LN0_PORT1 0x168090 @@ -2049,8 +2139,8 @@ enum i915_power_well_id { #define MG_TX_DCC_TX2LN1_PORT3 0x16A490 #define MG_TX_DCC_TX2LN0_PORT4 0x16B090 #define MG_TX_DCC_TX2LN1_PORT4 0x16B490 -#define MG_TX2_DCC(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \ +#define MG_TX2_DCC(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \ MG_TX_DCC_TX2LN0_PORT2, \ MG_TX_DCC_TX2LN1_PORT1) #define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) @@ -2065,8 +2155,8 @@ enum i915_power_well_id { #define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 #define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 #define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 -#define MG_DP_MODE(port, ln) \ - MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \ +#define MG_DP_MODE(ln, port) \ + MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \ MG_DP_MODE_LN0_ACU_PORT2, \ MG_DP_MODE_LN1_ACU_PORT1) #define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) @@ -2478,12 +2568,12 @@ enum i915_power_well_id { #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA _MMIO(0x2088) /* 965GM+ only */ #define PWRCTX_EN (1 << 0) -#define IPEIR _MMIO(0x2088) -#define IPEHR _MMIO(0x208c) +#define IPEIR(base) _MMIO((base) + 0x88) +#define IPEHR(base) _MMIO((base) + 0x8c) #define GEN2_INSTDONE _MMIO(0x2090) #define NOPID _MMIO(0x2094) #define HWSTAM _MMIO(0x2098) -#define DMA_FADD_I8XX _MMIO(0x20d0) +#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0) #define RING_BBSTATE(base) _MMIO((base) + 0x110) #define RING_BB_PPGTT (1 << 5) #define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */ @@ -2657,7 +2747,7 @@ enum i915_power_well_id { #define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */ #define INSTPM_TLB_INVALIDATE (1 << 9) #define INSTPM_SYNC_FLUSH (1 << 5) -#define ACTHD _MMIO(0x20c8) +#define ACTHD(base) _MMIO((base) + 0xc8) #define MEM_MODE _MMIO(0x20cc) #define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */ #define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */ @@ -2863,7 +2953,7 @@ enum i915_power_well_id { #define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) #define GEN11_GT_VDBOX_DISABLE_MASK 0xff #define GEN11_GT_VEBOX_DISABLE_SHIFT 16 -#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) +#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT) #define GEN11_EU_DISABLE _MMIO(0x9134) #define GEN11_EU_DIS_MASK 0xFF @@ -3857,7 +3947,7 @@ enum i915_power_well_id { /* * Logical Context regs */ -#define CCID _MMIO(0x2180) +#define CCID(base) _MMIO((base) + 0x180) #define CCID_EN BIT(0) #define CCID_EXTENDED_STATE_RESTORE BIT(2) #define CCID_EXTENDED_STATE_SAVE BIT(3) @@ -3989,6 +4079,15 @@ enum { /* Pipe A CRC regs */ #define _PIPE_CRC_CTL_A 0x60050 #define PIPE_CRC_ENABLE (1 << 31) +/* skl+ source selection */ +#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28) +#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28) +#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28) +#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28) +#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28) +#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28) +#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28) +#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28) /* ivb+ source selection */ #define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) #define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) @@ -4168,6 +4267,7 @@ enum { #define EDP_PSR_TP2_TP3_TIME_100us (1 << 8) #define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8) #define EDP_PSR_TP2_TP3_TIME_0us (3 << 8) +#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */ #define EDP_PSR_TP1_TIME_500us (0 << 4) #define EDP_PSR_TP1_TIME_100us (1 << 4) #define EDP_PSR_TP1_TIME_2500us (2 << 4) @@ -4612,13 +4712,14 @@ enum { #define VIDEO_DIP_ENABLE (1 << 31) #define VIDEO_DIP_PORT(port) ((port) << 29) #define VIDEO_DIP_PORT_MASK (3 << 29) -#define VIDEO_DIP_ENABLE_GCP (1 << 25) +#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */ #define VIDEO_DIP_ENABLE_AVI (1 << 21) #define VIDEO_DIP_ENABLE_VENDOR (2 << 21) -#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) +#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */ #define VIDEO_DIP_ENABLE_SPD (8 << 21) #define VIDEO_DIP_SELECT_AVI (0 << 19) #define VIDEO_DIP_SELECT_VENDOR (1 << 19) +#define VIDEO_DIP_SELECT_GAMUT (2 << 19) #define VIDEO_DIP_SELECT_SPD (3 << 19) #define VIDEO_DIP_SELECT_MASK (3 << 19) #define VIDEO_DIP_FREQ_ONCE (0 << 16) @@ -4653,18 +4754,17 @@ enum { #define _PP_STATUS 0x61200 #define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS) -#define PP_ON (1 << 31) +#define PP_ON REG_BIT(31) #define _PP_CONTROL_1 0xc7204 #define _PP_CONTROL_2 0xc7304 #define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \ _PP_CONTROL_2) -#define POWER_CYCLE_DELAY_MASK (0x1f << 4) -#define POWER_CYCLE_DELAY_SHIFT 4 -#define VDD_OVERRIDE_FORCE (1 << 3) -#define BACKLIGHT_ENABLE (1 << 2) -#define PWR_DOWN_ON_RESET (1 << 1) -#define PWR_STATE_TARGET (1 << 0) +#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4) +#define VDD_OVERRIDE_FORCE REG_BIT(3) +#define BACKLIGHT_ENABLE REG_BIT(2) +#define PWR_DOWN_ON_RESET REG_BIT(1) +#define PWR_STATE_TARGET REG_BIT(0) /* * Indicates that all dependencies of the panel are on: * @@ -4672,62 +4772,53 @@ enum { * - pipe enabled * - LVDS/DVOB/DVOC on */ -#define PP_READY (1 << 30) -#define PP_SEQUENCE_NONE (0 << 28) -#define PP_SEQUENCE_POWER_UP (1 << 28) -#define PP_SEQUENCE_POWER_DOWN (2 << 28) -#define PP_SEQUENCE_MASK (3 << 28) -#define PP_SEQUENCE_SHIFT 28 -#define PP_CYCLE_DELAY_ACTIVE (1 << 27) -#define PP_SEQUENCE_STATE_MASK 0x0000000f -#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) -#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) -#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) -#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) -#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) -#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) -#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) -#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) -#define PP_SEQUENCE_STATE_RESET (0xf << 0) +#define PP_READY REG_BIT(30) +#define PP_SEQUENCE_MASK REG_GENMASK(29, 28) +#define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0) +#define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1) +#define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2) +#define PP_CYCLE_DELAY_ACTIVE REG_BIT(27) +#define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0) +#define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0) +#define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1) +#define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2) +#define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3) +#define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8) +#define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9) +#define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa) +#define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb) +#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf) #define _PP_CONTROL 0x61204 #define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL) -#define PANEL_UNLOCK_REGS (0xabcd << 16) -#define PANEL_UNLOCK_MASK (0xffff << 16) -#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0 -#define BXT_POWER_CYCLE_DELAY_SHIFT 4 -#define EDP_FORCE_VDD (1 << 3) -#define EDP_BLC_ENABLE (1 << 2) -#define PANEL_POWER_RESET (1 << 1) -#define PANEL_POWER_ON (1 << 0) +#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16) +#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd) +#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4) +#define EDP_FORCE_VDD REG_BIT(3) +#define EDP_BLC_ENABLE REG_BIT(2) +#define PANEL_POWER_RESET REG_BIT(1) +#define PANEL_POWER_ON REG_BIT(0) #define _PP_ON_DELAYS 0x61208 #define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS) -#define PANEL_PORT_SELECT_SHIFT 30 -#define PANEL_PORT_SELECT_MASK (3 << 30) -#define PANEL_PORT_SELECT_LVDS (0 << 30) -#define PANEL_PORT_SELECT_DPA (1 << 30) -#define PANEL_PORT_SELECT_DPC (2 << 30) -#define PANEL_PORT_SELECT_DPD (3 << 30) -#define PANEL_PORT_SELECT_VLV(port) ((port) << 30) -#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000 -#define PANEL_POWER_UP_DELAY_SHIFT 16 -#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff -#define PANEL_LIGHT_ON_DELAY_SHIFT 0 +#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30) +#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0) +#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1) +#define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2) +#define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3) +#define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port) +#define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16) +#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0) #define _PP_OFF_DELAYS 0x6120C #define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS) -#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000 -#define PANEL_POWER_DOWN_DELAY_SHIFT 16 -#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff -#define PANEL_LIGHT_OFF_DELAY_SHIFT 0 +#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16) +#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0) #define _PP_DIVISOR 0x61210 #define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR) -#define PP_REFERENCE_DIVIDER_MASK 0xffffff00 -#define PP_REFERENCE_DIVIDER_SHIFT 8 -#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f -#define PANEL_POWER_CYCLE_DELAY_SHIFT 0 +#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8) +#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0) /* Panel fitting */ #define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230) @@ -5590,9 +5681,15 @@ enum { #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 #define PIPECONF_PIPE_LOCKED (1 << 25) -#define PIPECONF_PALETTE 0 -#define PIPECONF_GAMMA (1 << 24) #define PIPECONF_FORCE_BORDER (1 << 25) +#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */ +#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */ +#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */ +#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */ +#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */ +#define PIPECONF_GAMMA_MODE_SHIFT 24 #define PIPECONF_INTERLACE_MASK (7 << 21) #define PIPECONF_INTERLACE_MASK_HSW (3 << 21) /* Note that pre-gen3 does not support interlaced display directly. Panel @@ -5998,6 +6095,7 @@ enum { #define _CUR_WM_TRANS_A_0 0x70168 #define _CUR_WM_TRANS_B_0 0x71168 #define PLANE_WM_EN (1 << 31) +#define PLANE_WM_IGNORE_LINES (1 << 30) #define PLANE_WM_LINES_SHIFT 14 #define PLANE_WM_LINES_MASK 0x1f #define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */ @@ -6124,7 +6222,7 @@ enum { #define MCURSOR_PIPE_SELECT_SHIFT 28 #define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28) #define MCURSOR_GAMMA_ENABLE (1 << 26) -#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) +#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ #define MCURSOR_ROTATE_180 (1 << 15) #define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14) #define _CURABASE 0x70084 @@ -6179,7 +6277,7 @@ enum { #define DISPPLANE_RGBA888 (0xf << 26) #define DISPPLANE_STEREO_ENABLE (1 << 25) #define DISPPLANE_STEREO_DISABLE 0 -#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) +#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ #define DISPPLANE_SEL_PIPE_SHIFT 24 #define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT) #define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT) @@ -7114,11 +7212,12 @@ enum { #define _GAMMA_MODE_A 0x4a480 #define _GAMMA_MODE_B 0x4ac80 #define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) -#define GAMMA_MODE_MODE_MASK (3 << 0) -#define GAMMA_MODE_MODE_8BIT (0 << 0) -#define GAMMA_MODE_MODE_10BIT (1 << 0) -#define GAMMA_MODE_MODE_12BIT (2 << 0) -#define GAMMA_MODE_MODE_SPLIT (3 << 0) +#define PRE_CSC_GAMMA_ENABLE (1 << 31) +#define POST_CSC_GAMMA_ENABLE (1 << 30) +#define GAMMA_MODE_MODE_8BIT (0 << 0) +#define GAMMA_MODE_MODE_10BIT (1 << 0) +#define GAMMA_MODE_MODE_12BIT (2 << 0) +#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* DMC/CSR */ #define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4) @@ -7213,8 +7312,8 @@ enum { #define GEN8_GT_VECS_IRQ (1 << 6) #define GEN8_GT_GUC_IRQ (1 << 5) #define GEN8_GT_PM_IRQ (1 << 4) -#define GEN8_GT_VCS2_IRQ (1 << 3) -#define GEN8_GT_VCS1_IRQ (1 << 2) +#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */ +#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */ #define GEN8_GT_BCS_IRQ (1 << 1) #define GEN8_GT_RCS_IRQ (1 << 0) @@ -7235,8 +7334,8 @@ enum { #define GEN8_RCS_IRQ_SHIFT 0 #define GEN8_BCS_IRQ_SHIFT 16 -#define GEN8_VCS1_IRQ_SHIFT 0 -#define GEN8_VCS2_IRQ_SHIFT 16 +#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */ +#define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */ #define GEN8_VECS_IRQ_SHIFT 0 #define GEN8_WD_IRQ_SHIFT 16 @@ -7622,13 +7721,13 @@ enum { #define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2) /*GEN11 chicken */ -#define _PIPEA_CHICKEN 0x70038 -#define _PIPEB_CHICKEN 0x71038 -#define _PIPEC_CHICKEN 0x72038 -#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) -#define PM_FILL_MAINTAIN_DBUF_FULLNESS (1 << 0) -#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\ - _PIPEB_CHICKEN) +#define _PIPEA_CHICKEN 0x70038 +#define _PIPEB_CHICKEN 0x71038 +#define _PIPEC_CHICKEN 0x72038 +#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\ + _PIPEB_CHICKEN) +#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15) +#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) /* PCH */ @@ -8098,10 +8197,11 @@ enum { #define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4 #define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A) +#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) #define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4) -#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A) +#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4) #define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4) #define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4) @@ -9252,7 +9352,7 @@ enum skl_power_gate { #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ _TRANS_DDI_FUNC_CTL2_A) #define PORT_SYNC_MODE_ENABLE (1 << 4) -#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) +#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0) #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 @@ -9750,7 +9850,7 @@ enum skl_power_gate { #define DPLL_CFGCR1_KDIV(x) ((x) << 6) #define DPLL_CFGCR1_KDIV_1 (1 << 6) #define DPLL_CFGCR1_KDIV_2 (2 << 6) -#define DPLL_CFGCR1_KDIV_4 (4 << 6) +#define DPLL_CFGCR1_KDIV_3 (4 << 6) #define DPLL_CFGCR1_PDIV_MASK (0xf << 2) #define DPLL_CFGCR1_PDIV_SHIFT (2) #define DPLL_CFGCR1_PDIV(x) ((x) << 2) @@ -9819,16 +9919,29 @@ enum skl_power_gate { #define BXT_DRAM_WIDTH_X64 (0x3 << 4) #define BXT_DRAM_SIZE_MASK (0x7 << 6) #define BXT_DRAM_SIZE_SHIFT 6 -#define BXT_DRAM_SIZE_4GB (0x0 << 6) -#define BXT_DRAM_SIZE_6GB (0x1 << 6) -#define BXT_DRAM_SIZE_8GB (0x2 << 6) -#define BXT_DRAM_SIZE_12GB (0x3 << 6) -#define BXT_DRAM_SIZE_16GB (0x4 << 6) +#define BXT_DRAM_SIZE_4GBIT (0x0 << 6) +#define BXT_DRAM_SIZE_6GBIT (0x1 << 6) +#define BXT_DRAM_SIZE_8GBIT (0x2 << 6) +#define BXT_DRAM_SIZE_12GBIT (0x3 << 6) +#define BXT_DRAM_SIZE_16GBIT (0x4 << 6) +#define BXT_DRAM_TYPE_MASK (0x7 << 22) +#define BXT_DRAM_TYPE_SHIFT 22 +#define BXT_DRAM_TYPE_DDR3 (0x0 << 22) +#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22) +#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22) +#define BXT_DRAM_TYPE_DDR4 (0x4 << 22) #define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666 #define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04) #define SKL_REQ_DATA_MASK (0xF << 0) +#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000) +#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0) +#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0) +#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0) +#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0) +#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0) + #define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C) #define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010) #define SKL_DRAM_S_SHIFT 16 @@ -9840,8 +9953,21 @@ enum skl_power_gate { #define SKL_DRAM_WIDTH_X32 (0x2 << 8) #define SKL_DRAM_RANK_MASK (0x1 << 10) #define SKL_DRAM_RANK_SHIFT 10 -#define SKL_DRAM_RANK_SINGLE (0x0 << 10) -#define SKL_DRAM_RANK_DUAL (0x1 << 10) +#define SKL_DRAM_RANK_1 (0x0 << 10) +#define SKL_DRAM_RANK_2 (0x1 << 10) +#define SKL_DRAM_RANK_MASK (0x1 << 10) +#define CNL_DRAM_SIZE_MASK 0x7F +#define CNL_DRAM_WIDTH_MASK (0x3 << 7) +#define CNL_DRAM_WIDTH_SHIFT 7 +#define CNL_DRAM_WIDTH_X8 (0x0 << 7) +#define CNL_DRAM_WIDTH_X16 (0x1 << 7) +#define CNL_DRAM_WIDTH_X32 (0x2 << 7) +#define CNL_DRAM_RANK_MASK (0x3 << 9) +#define CNL_DRAM_RANK_SHIFT 9 +#define CNL_DRAM_RANK_1 (0x0 << 9) +#define CNL_DRAM_RANK_2 (0x1 << 9) +#define CNL_DRAM_RANK_3 (0x2 << 9) +#define CNL_DRAM_RANK_4 (0x3 << 9) /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, * since on HSW we can't write to it using I915_WRITE. */ @@ -9886,10 +10012,14 @@ enum skl_power_gate { #define _PIPE_A_CSC_COEFF_BU 0x4901c #define _PIPE_A_CSC_COEFF_RV_GV 0x49020 #define _PIPE_A_CSC_COEFF_BV 0x49024 + #define _PIPE_A_CSC_MODE 0x49028 -#define CSC_BLACK_SCREEN_OFFSET (1 << 2) -#define CSC_POSITION_BEFORE_GAMMA (1 << 1) -#define CSC_MODE_YUV_TO_RGB (1 << 0) +#define ICL_CSC_ENABLE (1 << 31) +#define ICL_OUTPUT_CSC_ENABLE (1 << 30) +#define CSC_BLACK_SCREEN_OFFSET (1 << 2) +#define CSC_POSITION_BEFORE_GAMMA (1 << 1) +#define CSC_MODE_YUV_TO_RGB (1 << 0) + #define _PIPE_A_CSC_PREOFF_HI 0x49030 #define _PIPE_A_CSC_PREOFF_ME 0x49034 #define _PIPE_A_CSC_PREOFF_LO 0x49038 @@ -9925,6 +10055,70 @@ enum skl_power_gate { #define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) #define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) +/* Pipe Output CSC */ +#define _PIPE_A_OUTPUT_CSC_COEFF_RY_GY 0x49050 +#define _PIPE_A_OUTPUT_CSC_COEFF_BY 0x49054 +#define _PIPE_A_OUTPUT_CSC_COEFF_RU_GU 0x49058 +#define _PIPE_A_OUTPUT_CSC_COEFF_BU 0x4905c +#define _PIPE_A_OUTPUT_CSC_COEFF_RV_GV 0x49060 +#define _PIPE_A_OUTPUT_CSC_COEFF_BV 0x49064 +#define _PIPE_A_OUTPUT_CSC_PREOFF_HI 0x49068 +#define _PIPE_A_OUTPUT_CSC_PREOFF_ME 0x4906c +#define _PIPE_A_OUTPUT_CSC_PREOFF_LO 0x49070 +#define _PIPE_A_OUTPUT_CSC_POSTOFF_HI 0x49074 +#define _PIPE_A_OUTPUT_CSC_POSTOFF_ME 0x49078 +#define _PIPE_A_OUTPUT_CSC_POSTOFF_LO 0x4907c + +#define _PIPE_B_OUTPUT_CSC_COEFF_RY_GY 0x49150 +#define _PIPE_B_OUTPUT_CSC_COEFF_BY 0x49154 +#define _PIPE_B_OUTPUT_CSC_COEFF_RU_GU 0x49158 +#define _PIPE_B_OUTPUT_CSC_COEFF_BU 0x4915c +#define _PIPE_B_OUTPUT_CSC_COEFF_RV_GV 0x49160 +#define _PIPE_B_OUTPUT_CSC_COEFF_BV 0x49164 +#define _PIPE_B_OUTPUT_CSC_PREOFF_HI 0x49168 +#define _PIPE_B_OUTPUT_CSC_PREOFF_ME 0x4916c +#define _PIPE_B_OUTPUT_CSC_PREOFF_LO 0x49170 +#define _PIPE_B_OUTPUT_CSC_POSTOFF_HI 0x49174 +#define _PIPE_B_OUTPUT_CSC_POSTOFF_ME 0x49178 +#define _PIPE_B_OUTPUT_CSC_POSTOFF_LO 0x4917c + +#define PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe,\ + _PIPE_A_OUTPUT_CSC_COEFF_RY_GY,\ + _PIPE_B_OUTPUT_CSC_COEFF_RY_GY) +#define PIPE_CSC_OUTPUT_COEFF_BY(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_BY, \ + _PIPE_B_OUTPUT_CSC_COEFF_BY) +#define PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_RU_GU, \ + _PIPE_B_OUTPUT_CSC_COEFF_RU_GU) +#define PIPE_CSC_OUTPUT_COEFF_BU(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_BU, \ + _PIPE_B_OUTPUT_CSC_COEFF_BU) +#define PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_RV_GV, \ + _PIPE_B_OUTPUT_CSC_COEFF_RV_GV) +#define PIPE_CSC_OUTPUT_COEFF_BV(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_COEFF_BV, \ + _PIPE_B_OUTPUT_CSC_COEFF_BV) +#define PIPE_CSC_OUTPUT_PREOFF_HI(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_PREOFF_HI, \ + _PIPE_B_OUTPUT_CSC_PREOFF_HI) +#define PIPE_CSC_OUTPUT_PREOFF_ME(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_PREOFF_ME, \ + _PIPE_B_OUTPUT_CSC_PREOFF_ME) +#define PIPE_CSC_OUTPUT_PREOFF_LO(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_PREOFF_LO, \ + _PIPE_B_OUTPUT_CSC_PREOFF_LO) +#define PIPE_CSC_OUTPUT_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_POSTOFF_HI, \ + _PIPE_B_OUTPUT_CSC_POSTOFF_HI) +#define PIPE_CSC_OUTPUT_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_POSTOFF_ME, \ + _PIPE_B_OUTPUT_CSC_POSTOFF_ME) +#define PIPE_CSC_OUTPUT_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, \ + _PIPE_A_OUTPUT_CSC_POSTOFF_LO, \ + _PIPE_B_OUTPUT_CSC_POSTOFF_LO) + /* pipe degamma/gamma LUTs on IVB+ */ #define _PAL_PREC_INDEX_A 0x4A400 #define _PAL_PREC_INDEX_B 0x4AC00 diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index c2a5c48c7541..e9c2094ab8ea 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -22,16 +22,31 @@ * */ -#include <linux/prefetch.h> #include <linux/dma-fence-array.h> +#include <linux/irq_work.h> +#include <linux/prefetch.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/signal.h> #include "i915_drv.h" #include "i915_active.h" +#include "i915_globals.h" #include "i915_reset.h" +struct execute_cb { + struct list_head link; + struct irq_work work; + struct i915_sw_fence *fence; +}; + +static struct i915_global_request { + struct i915_global base; + struct kmem_cache *slab_requests; + struct kmem_cache *slab_dependencies; + struct kmem_cache *slab_execute_cbs; +} global; + static const char *i915_fence_get_driver_name(struct dma_fence *fence) { return "i915"; @@ -51,7 +66,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return "signaled"; - return to_request(fence)->timeline->name; + return to_request(fence)->gem_context->name ?: "[i915]"; } static bool i915_fence_signaled(struct dma_fence *fence) @@ -68,7 +83,9 @@ static signed long i915_fence_wait(struct dma_fence *fence, bool interruptible, signed long timeout) { - return i915_request_wait(to_request(fence), interruptible, timeout); + return i915_request_wait(to_request(fence), + interruptible | I915_WAIT_PRIORITY, + timeout); } static void i915_fence_release(struct dma_fence *fence) @@ -84,7 +101,7 @@ static void i915_fence_release(struct dma_fence *fence) */ i915_sw_fence_fini(&rq->submit); - kmem_cache_free(rq->i915->requests, rq); + kmem_cache_free(global.slab_requests, rq); } const struct dma_fence_ops i915_fence_ops = { @@ -150,7 +167,6 @@ static void advance_ring(struct i915_request *request) * is just about to be. Either works, if we miss the last two * noops - they are safe to be replayed on a reset. */ - GEM_TRACE("marking %s as inactive\n", ring->timeline->name); tail = READ_ONCE(request->tail); list_del(&ring->active_link); } else { @@ -177,12 +193,10 @@ static void free_capture_list(struct i915_request *request) static void __retire_engine_request(struct intel_engine_cs *engine, struct i915_request *rq) { - GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n", + GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n", __func__, engine->name, rq->fence.context, rq->fence.seqno, - rq->global_seqno, - hwsp_seqno(rq), - intel_engine_get_seqno(engine)); + hwsp_seqno(rq)); GEM_BUG_ON(!i915_request_completed(rq)); @@ -241,12 +255,10 @@ static void i915_request_retire(struct i915_request *request) { struct i915_active_request *active, *next; - GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n", + GEM_TRACE("%s fence %llx:%lld, current %d\n", request->engine->name, request->fence.context, request->fence.seqno, - request->global_seqno, - hwsp_seqno(request), - intel_engine_get_seqno(request->engine)); + hwsp_seqno(request)); lockdep_assert_held(&request->i915->drm.struct_mutex); GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); @@ -288,15 +300,13 @@ static void i915_request_retire(struct i915_request *request) i915_request_remove_from_client(request); - /* Retirement decays the ban score as it is a sign of ctx progress */ - atomic_dec_if_positive(&request->gem_context->ban_score); intel_context_unpin(request->hw_context); __retire_engine_upto(request->engine, request); unreserve_gt(request->i915); - i915_sched_node_fini(request->i915, &request->sched); + i915_sched_node_fini(&request->sched); i915_request_put(request); } @@ -305,12 +315,10 @@ void i915_request_retire_upto(struct i915_request *rq) struct intel_ring *ring = rq->ring; struct i915_request *tmp; - GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n", + GEM_TRACE("%s fence %llx:%lld, current %d\n", rq->engine->name, rq->fence.context, rq->fence.seqno, - rq->global_seqno, - hwsp_seqno(rq), - intel_engine_get_seqno(rq->engine)); + hwsp_seqno(rq)); lockdep_assert_held(&rq->i915->drm.struct_mutex); GEM_BUG_ON(!i915_request_completed(rq)); @@ -326,9 +334,67 @@ void i915_request_retire_upto(struct i915_request *rq) } while (tmp != rq); } -static u32 timeline_get_seqno(struct i915_timeline *tl) +static void irq_execute_cb(struct irq_work *wrk) { - return tl->seqno += 1 + tl->has_initial_breadcrumb; + struct execute_cb *cb = container_of(wrk, typeof(*cb), work); + + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); +} + +static void __notify_execute_cb(struct i915_request *rq) +{ + struct execute_cb *cb; + + lockdep_assert_held(&rq->lock); + + if (list_empty(&rq->execute_cb)) + return; + + list_for_each_entry(cb, &rq->execute_cb, link) + irq_work_queue(&cb->work); + + /* + * XXX Rollback on __i915_request_unsubmit() + * + * In the future, perhaps when we have an active time-slicing scheduler, + * it will be interesting to unsubmit parallel execution and remove + * busywaits from the GPU until their master is restarted. This is + * quite hairy, we have to carefully rollback the fence and do a + * preempt-to-idle cycle on the target engine, all the while the + * master execute_cb may refire. + */ + INIT_LIST_HEAD(&rq->execute_cb); +} + +static int +i915_request_await_execution(struct i915_request *rq, + struct i915_request *signal, + gfp_t gfp) +{ + struct execute_cb *cb; + + if (i915_request_is_active(signal)) + return 0; + + cb = kmem_cache_alloc(global.slab_execute_cbs, gfp); + if (!cb) + return -ENOMEM; + + cb->fence = &rq->submit; + i915_sw_fence_await(cb->fence); + init_irq_work(&cb->work, irq_execute_cb); + + spin_lock_irq(&signal->lock); + if (i915_request_is_active(signal)) { + i915_sw_fence_complete(cb->fence); + kmem_cache_free(global.slab_execute_cbs, cb); + } else { + list_add_tail(&cb->link, &signal->execute_cb); + } + spin_unlock_irq(&signal->lock); + + return 0; } static void move_to_timeline(struct i915_request *request, @@ -342,42 +408,33 @@ static void move_to_timeline(struct i915_request *request, spin_unlock(&request->timeline->lock); } -static u32 next_global_seqno(struct i915_timeline *tl) -{ - if (!++tl->seqno) - ++tl->seqno; - return tl->seqno; -} - void __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - u32 seqno; - GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n", + GEM_TRACE("%s fence %llx:%lld -> current %d\n", engine->name, request->fence.context, request->fence.seqno, - engine->timeline.seqno + 1, - hwsp_seqno(request), - intel_engine_get_seqno(engine)); + hwsp_seqno(request)); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->timeline.lock); - GEM_BUG_ON(request->global_seqno); - - seqno = next_global_seqno(&engine->timeline); - GEM_BUG_ON(!seqno); - GEM_BUG_ON(intel_engine_signaled(engine, seqno)); + if (i915_gem_context_is_banned(request->gem_context)) + i915_request_skip(request, -EIO); /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); + GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); - request->global_seqno = seqno; + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && !i915_request_enable_breadcrumb(request)) intel_engine_queue_breadcrumbs(engine); + + __notify_execute_cb(request); + spin_unlock(&request->lock); engine->emit_fini_breadcrumb(request, @@ -406,12 +463,10 @@ void __i915_request_unsubmit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; - GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n", + GEM_TRACE("%s fence %llx:%lld, current %d\n", engine->name, request->fence.context, request->fence.seqno, - request->global_seqno, - hwsp_seqno(request), - intel_engine_get_seqno(engine)); + hwsp_seqno(request)); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->timeline.lock); @@ -420,18 +475,25 @@ void __i915_request_unsubmit(struct i915_request *request) * Only unwind in reverse order, required so that the per-context list * is kept in seqno/ring order. */ - GEM_BUG_ON(!request->global_seqno); - GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); - GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno)); - engine->timeline.seqno--; /* We may be recursing from the signal callback of another i915 fence */ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - request->global_seqno = 0; + + /* + * As we do not allow WAIT to preempt inflight requests, + * once we have executed a request, along with triggering + * any execution callbacks, we must preserve its ordering + * within the non-preemptible FIFO. + */ + BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */ + request->sched.attr.priority |= __NO_PREEMPTION; + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) i915_request_cancel_breadcrumb(request); + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); + spin_unlock(&request->lock); /* Transfer back from the global per-engine timeline to per-context */ @@ -518,7 +580,7 @@ i915_request_alloc_slow(struct intel_context *ce) ring_retire_requests(ring); out: - return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL); + return kmem_cache_alloc(global.slab_requests, GFP_KERNEL); } static int add_timeline_barrier(struct i915_request *rq) @@ -539,8 +601,10 @@ struct i915_request * i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) { struct drm_i915_private *i915 = engine->i915; - struct i915_request *rq; struct intel_context *ce; + struct i915_timeline *tl; + struct i915_request *rq; + u32 seqno; int ret; lockdep_assert_held(&i915->drm.struct_mutex); @@ -556,8 +620,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * ABI: Before userspace accesses the GPU (e.g. execbuffer), report * EIO if the GPU is already wedged. */ - if (i915_terminally_wedged(&i915->gpu_error)) - return ERR_PTR(-EIO); + ret = i915_terminally_wedged(i915); + if (ret) + return ERR_PTR(ret); /* * Pinning the contexts may generate requests in order to acquire @@ -569,6 +634,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) return ERR_CAST(ce); reserve_gt(i915); + mutex_lock(&ce->ring->timeline->mutex); /* Move our oldest request to the slab-cache (if not in use!) */ rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); @@ -605,7 +671,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) * * Do not use kmem_cache_zalloc() here! */ - rq = kmem_cache_alloc(i915->requests, + rq = kmem_cache_alloc(global.slab_requests, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (unlikely(!rq)) { rq = i915_request_alloc_slow(ce); @@ -615,24 +681,28 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) } } - rq->rcustate = get_state_synchronize_rcu(); - INIT_LIST_HEAD(&rq->active_list); + INIT_LIST_HEAD(&rq->execute_cb); + + tl = ce->ring->timeline; + ret = i915_timeline_get_seqno(tl, rq, &seqno); + if (ret) + goto err_free; + rq->i915 = i915; rq->engine = engine; rq->gem_context = ctx; rq->hw_context = ce; rq->ring = ce->ring; - rq->timeline = ce->ring->timeline; + rq->timeline = tl; GEM_BUG_ON(rq->timeline == &engine->timeline); - rq->hwsp_seqno = rq->timeline->hwsp_seqno; + rq->hwsp_seqno = tl->hwsp_seqno; + rq->hwsp_cacheline = tl->hwsp_cacheline; + rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ spin_lock_init(&rq->lock); - dma_fence_init(&rq->fence, - &i915_fence_ops, - &rq->lock, - rq->timeline->fence_context, - timeline_get_seqno(rq->timeline)); + dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, + tl->fence_context, seqno); /* We bump the ref for the fence chain */ i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); @@ -640,7 +710,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) i915_sched_node_init(&rq->sched); /* No zalloc, must clear what we need by hand */ - rq->global_seqno = 0; rq->file_priv = NULL; rq->batch = NULL; rq->capture_list = NULL; @@ -693,14 +762,63 @@ err_unwind: GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); - kmem_cache_free(i915->requests, rq); +err_free: + kmem_cache_free(global.slab_requests, rq); err_unreserve: + mutex_unlock(&ce->ring->timeline->mutex); unreserve_gt(i915); intel_context_unpin(ce); return ERR_PTR(ret); } static int +emit_semaphore_wait(struct i915_request *to, + struct i915_request *from, + gfp_t gfp) +{ + u32 hwsp_offset; + u32 *cs; + int err; + + GEM_BUG_ON(!from->timeline->has_initial_breadcrumb); + GEM_BUG_ON(INTEL_GEN(to->i915) < 8); + + /* We need to pin the signaler's HWSP until we are finished reading. */ + err = i915_timeline_read_hwsp(from, to, &hwsp_offset); + if (err) + return err; + + /* Only submit our spinner after the signaler is running! */ + err = i915_request_await_execution(to, from, gfp); + if (err) + return err; + + cs = intel_ring_begin(to, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Using greater-than-or-equal here means we have to worry + * about seqno wraparound. To side step that issue, we swap + * the timeline HWSP upon wrapping, so that everyone listening + * for the old (pre-wrap) values do not see the much smaller + * (post-wrap) values than they were expecting (and so wait + * forever). + */ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = from->fence.seqno; + *cs++ = hwsp_offset; + *cs++ = 0; + + intel_ring_advance(to, cs); + to->sched.flags |= I915_SCHED_HAS_SEMAPHORE; + return 0; +} + +static int i915_request_await_request(struct i915_request *to, struct i915_request *from) { int ret; @@ -712,9 +830,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) return 0; if (to->engine->schedule) { - ret = i915_sched_node_add_dependency(to->i915, - &to->sched, - &from->sched); + ret = i915_sched_node_add_dependency(&to->sched, &from->sched); if (ret < 0) return ret; } @@ -723,6 +839,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from) ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, &from->submit, I915_FENCE_GFP); + } else if (intel_engine_has_semaphores(to->engine) && + to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) { + ret = emit_semaphore_wait(to, from, I915_FENCE_GFP); } else { ret = i915_sw_fence_await_dma_fence(&to->submit, &from->fence, 0, @@ -873,6 +992,60 @@ void i915_request_skip(struct i915_request *rq, int error) memset(vaddr + head, 0, rq->postfix - head); } +static struct i915_request * +__i915_request_add_to_timeline(struct i915_request *rq) +{ + struct i915_timeline *timeline = rq->timeline; + struct i915_request *prev; + + /* + * Dependency tracking and request ordering along the timeline + * is special cased so that we can eliminate redundant ordering + * operations while building the request (we know that the timeline + * itself is ordered, and here we guarantee it). + * + * As we know we will need to emit tracking along the timeline, + * we embed the hooks into our request struct -- at the cost of + * having to have specialised no-allocation interfaces (which will + * be beneficial elsewhere). + * + * A second benefit to open-coding i915_request_await_request is + * that we can apply a slight variant of the rules specialised + * for timelines that jump between engines (such as virtual engines). + * If we consider the case of virtual engine, we must emit a dma-fence + * to prevent scheduling of the second request until the first is + * complete (to maximise our greedy late load balancing) and this + * precludes optimising to use semaphores serialisation of a single + * timeline across engines. + */ + prev = i915_active_request_raw(&timeline->last_request, + &rq->i915->drm.struct_mutex); + if (prev && !i915_request_completed(prev)) { + if (is_power_of_2(prev->engine->mask | rq->engine->mask)) + i915_sw_fence_await_sw_fence(&rq->submit, + &prev->submit, + &rq->submitq); + else + __i915_sw_fence_await_dma_fence(&rq->submit, + &prev->fence, + &rq->dmaq); + if (rq->engine->schedule) + __i915_sched_node_add_dependency(&rq->sched, + &prev->sched, + &rq->dep, + 0); + } + + spin_lock_irq(&timeline->lock); + list_add_tail(&rq->link, &timeline->requests); + spin_unlock_irq(&timeline->lock); + + GEM_BUG_ON(timeline->seqno != rq->fence.seqno); + __i915_active_request_set(&timeline->last_request, rq); + + return prev; +} + /* * NB: This function is not allowed to fail. Doing so would mean the the * request is not being tracked for completion but the work itself is @@ -889,7 +1062,7 @@ void i915_request_add(struct i915_request *request) GEM_TRACE("%s fence %llx:%lld\n", engine->name, request->fence.context, request->fence.seqno); - lockdep_assert_held(&request->i915->drm.struct_mutex); + lockdep_assert_held(&request->timeline->mutex); trace_i915_request_add(request); /* @@ -917,37 +1090,12 @@ void i915_request_add(struct i915_request *request) GEM_BUG_ON(IS_ERR(cs)); request->postfix = intel_ring_offset(request, cs); - /* - * Seal the request and mark it as pending execution. Note that - * we may inspect this state, without holding any locks, during - * hangcheck. Hence we apply the barrier to ensure that we do not - * see a more recent value in the hws than we are tracking. - */ - - prev = i915_active_request_raw(&timeline->last_request, - &request->i915->drm.struct_mutex); - if (prev && !i915_request_completed(prev)) { - i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, - &request->submitq); - if (engine->schedule) - __i915_sched_node_add_dependency(&request->sched, - &prev->sched, - &request->dep, - 0); - } - - spin_lock_irq(&timeline->lock); - list_add_tail(&request->link, &timeline->requests); - spin_unlock_irq(&timeline->lock); - - GEM_BUG_ON(timeline->seqno != request->fence.seqno); - __i915_active_request_set(&timeline->last_request, request); + prev = __i915_request_add_to_timeline(request); list_add_tail(&request->ring_link, &ring->request_list); - if (list_is_first(&request->ring_link, &ring->request_list)) { - GEM_TRACE("marking %s as active\n", ring->timeline->name); + if (list_is_first(&request->ring_link, &ring->request_list)) list_add(&ring->active_link, &request->i915->gt.active_rings); - } + request->i915->gt.active_engines |= request->engine->mask; request->emitted_jiffies = jiffies; /* @@ -967,6 +1115,21 @@ void i915_request_add(struct i915_request *request) struct i915_sched_attr attr = request->gem_context->sched; /* + * Boost actual workloads past semaphores! + * + * With semaphores we spin on one engine waiting for another, + * simply to reduce the latency of starting our work when + * the signaler completes. However, if there is any other + * work that we could be doing on this engine instead, that + * is better utilisation and will reduce the overall duration + * of the current work. To avoid PI boosting a semaphore + * far in the distance past over useful work, we keep a history + * of any semaphore use along our dependency chain. + */ + if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE)) + attr.priority |= I915_PRIORITY_NOSEMAPHORE; + + /* * Boost priorities to new clients (new request flows). * * Allow interactive/synchronous clients to jump ahead of @@ -1000,6 +1163,8 @@ void i915_request_add(struct i915_request *request) */ if (prev && i915_request_completed(prev)) i915_request_retire_upto(prev); + + mutex_unlock(&request->timeline->mutex); } static unsigned long local_clock_us(unsigned int *cpu) @@ -1136,8 +1301,23 @@ long i915_request_wait(struct i915_request *rq, if (__i915_spin_request(rq, state, 5)) goto out; - if (flags & I915_WAIT_PRIORITY) + /* + * This client is about to stall waiting for the GPU. In many cases + * this is undesirable and limits the throughput of the system, as + * many clients cannot continue processing user input/output whilst + * blocked. RPS autotuning may take tens of milliseconds to respond + * to the GPU load and thus incurs additional latency for the client. + * We can circumvent that by promoting the GPU frequency to maximum + * before we sleep. This makes the GPU throttle up much more quickly + * (good for benchmarks and user experience, e.g. window animations), + * but at a cost of spending more power processing the workload + * (bad for battery). + */ + if (flags & I915_WAIT_PRIORITY) { + if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) + gen6_rps_boost(rq); i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); + } wait.tsk = current; if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) @@ -1179,11 +1359,66 @@ void i915_retire_requests(struct drm_i915_private *i915) if (!i915->gt.active_requests) return; - list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link) + list_for_each_entry_safe(ring, tmp, + &i915->gt.active_rings, active_link) { + intel_ring_get(ring); /* last rq holds reference! */ ring_retire_requests(ring); + intel_ring_put(ring); + } } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_request.c" #include "selftests/i915_request.c" #endif + +static void i915_global_request_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_execute_cbs); + kmem_cache_shrink(global.slab_requests); +} + +static void i915_global_request_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_execute_cbs); + kmem_cache_destroy(global.slab_requests); +} + +static struct i915_global_request global = { { + .shrink = i915_global_request_shrink, + .exit = i915_global_request_exit, +} }; + +int __init i915_global_request_init(void) +{ + global.slab_requests = KMEM_CACHE(i915_request, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_TYPESAFE_BY_RCU); + if (!global.slab_requests) + return -ENOMEM; + + global.slab_execute_cbs = KMEM_CACHE(execute_cb, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT | + SLAB_TYPESAFE_BY_RCU); + if (!global.slab_execute_cbs) + goto err_requests; + + global.slab_dependencies = KMEM_CACHE(i915_dependency, + SLAB_HWCACHE_ALIGN | + SLAB_RECLAIM_ACCOUNT); + if (!global.slab_dependencies) + goto err_execute_cbs; + + i915_global_register(&global.base); + return 0; + +err_execute_cbs: + kmem_cache_destroy(global.slab_execute_cbs); +err_requests: + kmem_cache_destroy(global.slab_requests); + return -ENOMEM; +} diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 40f3e8dcbdd5..cd6c130964cd 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -29,6 +29,7 @@ #include "i915_gem.h" #include "i915_scheduler.h" +#include "i915_selftest.h" #include "i915_sw_fence.h" #include <uapi/drm/i915_drm.h> @@ -37,6 +38,7 @@ struct drm_file; struct drm_i915_gem_object; struct i915_request; struct i915_timeline; +struct i915_timeline_cacheline; struct i915_capture_list { struct i915_capture_list *next; @@ -126,7 +128,11 @@ struct i915_request { * It is used by the driver to then queue the request for execution. */ struct i915_sw_fence submit; - wait_queue_entry_t submitq; + union { + wait_queue_entry_t submitq; + struct i915_sw_dma_fence_cb dmaq; + }; + struct list_head execute_cb; /* * A list of everyone we wait upon, and everyone who waits upon us. @@ -147,13 +153,15 @@ struct i915_request { */ const u32 *hwsp_seqno; - /** - * GEM sequence number associated with this request on the - * global execution timeline. It is zero when the request is not - * on the HW queue (i.e. not on the engine timeline list). - * Its value is guarded by the timeline spinlock. + /* + * If we need to access the timeline's seqno for this request in + * another request, we need to keep a read reference to this associated + * cacheline, so that we do not free and recycle it before the foreign + * observers have completed. Hence, we keep a pointer to the cacheline + * inside the timeline's HWSP vma, but it is only valid while this + * request has not completed and guarded by the timeline mutex. */ - u32 global_seqno; + struct i915_timeline_cacheline *hwsp_cacheline; /** Position in the ring of the start of the request */ u32 head; @@ -204,6 +212,11 @@ struct i915_request { struct drm_i915_file_private *file_priv; /** file_priv list entry for this request */ struct list_head client_link; + + I915_SELFTEST_DECLARE(struct { + struct list_head link; + unsigned long delay; + } mock;) }; #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) @@ -247,30 +260,6 @@ i915_request_put(struct i915_request *rq) dma_fence_put(&rq->fence); } -/** - * i915_request_global_seqno - report the current global seqno - * @request - the request - * - * A request is assigned a global seqno only when it is on the hardware - * execution queue. The global seqno can be used to maintain a list of - * requests on the same engine in retirement order, for example for - * constructing a priority queue for waiting. Prior to its execution, or - * if it is subsequently removed in the event of preemption, its global - * seqno is zero. As both insertion and removal from the execution queue - * may operate in IRQ context, it is not guarded by the usual struct_mutex - * BKL. Instead those relying on the global seqno must be prepared for its - * value to change between reads. Only when the request is complete can - * the global seqno be stable (due to the memory barriers on submitting - * the commands to the hardware to write the breadcrumb, if the HWS shows - * that it has passed the global seqno and the global seqno is unchanged - * after the read, it is indeed complete). - */ -static inline u32 -i915_request_global_seqno(const struct i915_request *request) -{ - return READ_ONCE(request->global_seqno); -} - int i915_request_await_object(struct i915_request *to, struct drm_i915_gem_object *obj, bool write); @@ -358,10 +347,27 @@ static inline bool __i915_request_has_started(const struct i915_request *rq) * i915_request_started - check if the request has begun being executed * @rq: the request * - * Returns true if the request has been submitted to hardware, and the hardware - * has advanced passed the end of the previous request and so should be either - * currently processing the request (though it may be preempted and so - * not necessarily the next request to complete) or have completed the request. + * If the timeline is not using initial breadcrumbs, a request is + * considered started if the previous request on its timeline (i.e. + * context) has been signaled. + * + * If the timeline is using semaphores, it will also be emitting an + * "initial breadcrumb" after the semaphores are complete and just before + * it began executing the user payload. A request can therefore be active + * on the HW and not yet started as it is still busywaiting on its + * dependencies (via HW semaphores). + * + * If the request has started, its dependencies will have been signaled + * (either by fences or by semaphores) and it will have begun processing + * the user payload. + * + * However, even if a request has started, it may have been preempted and + * so no longer active, or it may have already completed. + * + * See also i915_request_is_active(). + * + * Returns true if the request has begun executing the user payload, or + * has completed: */ static inline bool i915_request_started(const struct i915_request *rq) { diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c index 0e0ddf2e6815..2f25ed702ba0 100644 --- a/drivers/gpu/drm/i915/i915_reset.c +++ b/drivers/gpu/drm/i915/i915_reset.c @@ -22,24 +22,15 @@ static void engine_skip_context(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; struct i915_gem_context *hung_ctx = rq->gem_context; - struct i915_timeline *timeline = rq->timeline; lockdep_assert_held(&engine->timeline.lock); - GEM_BUG_ON(timeline == &engine->timeline); - spin_lock(&timeline->lock); - - if (i915_request_is_active(rq)) { - list_for_each_entry_continue(rq, - &engine->timeline.requests, link) - if (rq->gem_context == hung_ctx) - i915_request_skip(rq, -EIO); - } - - list_for_each_entry(rq, &timeline->requests, link) - i915_request_skip(rq, -EIO); + if (!i915_request_is_active(rq)) + return; - spin_unlock(&timeline->lock); + list_for_each_entry_continue(rq, &engine->timeline.requests, link) + if (rq->gem_context == hung_ctx) + i915_request_skip(rq, -EIO); } static void client_mark_guilty(struct drm_i915_file_private *file_priv, @@ -68,23 +59,29 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv, static bool context_mark_guilty(struct i915_gem_context *ctx) { - unsigned int score; - bool banned, bannable; + unsigned long prev_hang; + bool banned; + int i; atomic_inc(&ctx->guilty_count); - bannable = i915_gem_context_is_bannable(ctx); - score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; - - /* Cool contexts don't accumulate client ban score */ - if (!bannable) + /* Cool contexts are too cool to be banned! (Used for reset testing.) */ + if (!i915_gem_context_is_bannable(ctx)) return false; + /* Record the timestamp for the last N hangs */ + prev_hang = ctx->hang_timestamp[0]; + for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++) + ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1]; + ctx->hang_timestamp[i] = jiffies; + + /* If we have hung N+1 times in rapid succession, we ban the context! */ + banned = !i915_gem_context_is_recoverable(ctx); + if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES)) + banned = true; if (banned) { - DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n", - ctx->name, atomic_read(&ctx->guilty_count), - score); + DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n", + ctx->name, atomic_read(&ctx->guilty_count)); i915_gem_context_set_banned(ctx); } @@ -101,6 +98,12 @@ static void context_mark_innocent(struct i915_gem_context *ctx) void i915_reset_request(struct i915_request *rq, bool guilty) { + GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", + rq->engine->name, + rq->fence.context, + rq->fence.seqno, + yesno(guilty)); + lockdep_assert_held(&rq->engine->timeline.lock); GEM_BUG_ON(i915_request_completed(rq)); @@ -119,8 +122,10 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; const u32 base = engine->mmio_base; + GEM_TRACE("%s\n", engine->name); + if (intel_engine_stop_cs(engine)) - DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name); + GEM_TRACE("%s: timed out on STOP_RING\n", engine->name); I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ @@ -133,9 +138,9 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) I915_WRITE_FW(RING_CTL(base), 0); /* Check acts as a post */ - if (I915_READ_FW(RING_HEAD(base)) != 0) - DRM_DEBUG_DRIVER("%s: ring head not parked\n", - engine->name); + if (I915_READ_FW(RING_HEAD(base))) + GEM_TRACE("%s: ring head [%x] not parked\n", + engine->name, I915_READ_FW(RING_HEAD(base))); } static void i915_stop_engines(struct drm_i915_private *i915, @@ -240,10 +245,12 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv, unsigned int engine_mask, unsigned int retry) { + struct intel_uncore *uncore = &dev_priv->uncore; int ret; - I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); - ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR, + intel_uncore_write_fw(uncore, ILK_GDSR, + ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); + ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 5000, 0, NULL); @@ -252,8 +259,9 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv, goto out; } - I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); - ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR, + intel_uncore_write_fw(uncore, ILK_GDSR, + ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); + ret = __intel_wait_for_register_fw(uncore, ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0, 5000, 0, NULL); @@ -263,8 +271,8 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv, } out: - I915_WRITE_FW(ILK_GDSR, 0); - POSTING_READ_FW(ILK_GDSR); + intel_uncore_write_fw(uncore, ILK_GDSR, 0); + intel_uncore_posting_read_fw(uncore, ILK_GDSR); return ret; } @@ -272,6 +280,7 @@ out: static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, u32 hw_domain_mask) { + struct intel_uncore *uncore = &dev_priv->uncore; int err; /* @@ -279,10 +288,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, * for fifo space for the write or forcewake the chip for * the read */ - I915_WRITE_FW(GEN6_GDRST, hw_domain_mask); + intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask); /* Wait for the device to ack the reset requests */ - err = __intel_wait_for_register_fw(dev_priv, + err = __intel_wait_for_register_fw(uncore, GEN6_GDRST, hw_domain_mask, 0, 500, 0, NULL); @@ -298,12 +307,12 @@ static int gen6_reset_engines(struct drm_i915_private *i915, unsigned int retry) { struct intel_engine_cs *engine; - const u32 hw_engine_mask[I915_NUM_ENGINES] = { - [RCS] = GEN6_GRDOM_RENDER, - [BCS] = GEN6_GRDOM_BLT, - [VCS] = GEN6_GRDOM_MEDIA, - [VCS2] = GEN8_GRDOM_MEDIA2, - [VECS] = GEN6_GRDOM_VECS, + const u32 hw_engine_mask[] = { + [RCS0] = GEN6_GRDOM_RENDER, + [BCS0] = GEN6_GRDOM_BLT, + [VCS0] = GEN6_GRDOM_MEDIA, + [VCS1] = GEN8_GRDOM_MEDIA2, + [VECS0] = GEN6_GRDOM_VECS, }; u32 hw_mask; @@ -313,8 +322,10 @@ static int gen6_reset_engines(struct drm_i915_private *i915, unsigned int tmp; hw_mask = 0; - for_each_engine_masked(engine, i915, engine_mask, tmp) + for_each_engine_masked(engine, i915, engine_mask, tmp) { + GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; + } } return gen6_hw_domain_reset(i915, hw_mask); @@ -323,6 +334,7 @@ static int gen6_reset_engines(struct drm_i915_private *i915, static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv, struct intel_engine_cs *engine) { + struct intel_uncore *uncore = &dev_priv->uncore; u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; @@ -370,10 +382,9 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv, * ends up being locked to the engine we want to reset, we have to reset * it as well (we will unlock it once the reset sequence is completed). */ - I915_WRITE_FW(sfc_forced_lock, - I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit); + intel_uncore_rmw_or_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(uncore, sfc_forced_lock_ack, sfc_forced_lock_ack_bit, sfc_forced_lock_ack_bit, @@ -382,7 +393,7 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv, return 0; } - if (I915_READ_FW(sfc_usage) & sfc_usage_bit) + if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit) return sfc_reset_bit; return 0; @@ -421,28 +432,27 @@ static int gen11_reset_engines(struct drm_i915_private *i915, unsigned int engine_mask, unsigned int retry) { - const u32 hw_engine_mask[I915_NUM_ENGINES] = { - [RCS] = GEN11_GRDOM_RENDER, - [BCS] = GEN11_GRDOM_BLT, - [VCS] = GEN11_GRDOM_MEDIA, - [VCS2] = GEN11_GRDOM_MEDIA2, - [VCS3] = GEN11_GRDOM_MEDIA3, - [VCS4] = GEN11_GRDOM_MEDIA4, - [VECS] = GEN11_GRDOM_VECS, - [VECS2] = GEN11_GRDOM_VECS2, + const u32 hw_engine_mask[] = { + [RCS0] = GEN11_GRDOM_RENDER, + [BCS0] = GEN11_GRDOM_BLT, + [VCS0] = GEN11_GRDOM_MEDIA, + [VCS1] = GEN11_GRDOM_MEDIA2, + [VCS2] = GEN11_GRDOM_MEDIA3, + [VCS3] = GEN11_GRDOM_MEDIA4, + [VECS0] = GEN11_GRDOM_VECS, + [VECS1] = GEN11_GRDOM_VECS2, }; struct intel_engine_cs *engine; unsigned int tmp; u32 hw_mask; int ret; - BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES); - if (engine_mask == ALL_ENGINES) { hw_mask = GEN11_GRDOM_FULL; } else { hw_mask = 0; for_each_engine_masked(engine, i915, engine_mask, tmp) { + GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; hw_mask |= gen11_lock_sfc(i915, engine); } @@ -459,13 +469,13 @@ static int gen11_reset_engines(struct drm_i915_private *i915, static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct intel_uncore *uncore = &engine->i915->uncore; int ret; - I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), - _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); + intel_uncore_write_fw(uncore, RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); - ret = __intel_wait_for_register_fw(dev_priv, + ret = __intel_wait_for_register_fw(uncore, RING_RESET_CTL(engine->mmio_base), RESET_CTL_READY_TO_RESET, RESET_CTL_READY_TO_RESET, @@ -532,9 +542,6 @@ typedef int (*reset_func)(struct drm_i915_private *, static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) { - if (!i915_modparams.reset) - return NULL; - if (INTEL_GEN(i915) >= 8) return gen8_reset_engines; else if (INTEL_GEN(i915) >= 6) @@ -566,7 +573,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask) * If the power well sleeps during the reset, the reset * request may be dropped and never completes (causing -EIO). */ - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { /* * We stop engines, otherwise we might get failed reset and a @@ -582,14 +589,15 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask) * * FIXME: Wa for more modern gens needs to be validated */ - i915_stop_engines(i915, engine_mask); + if (retry) + i915_stop_engines(i915, engine_mask); GEM_TRACE("engine_mask=%x\n", engine_mask); preempt_disable(); ret = reset(i915, engine_mask, retry); preempt_enable(); } - intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); return ret; } @@ -599,6 +607,9 @@ bool intel_has_gpu_reset(struct drm_i915_private *i915) if (USES_GUC(i915)) return false; + if (!i915_modparams.reset) + return NULL; + return intel_get_gpu_reset(i915); } @@ -615,9 +626,9 @@ int intel_reset_guc(struct drm_i915_private *i915) GEM_BUG_ON(!HAS_GUC(i915)); - intel_uncore_forcewake_get(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); ret = gen6_hw_domain_reset(i915, guc_domain); - intel_uncore_forcewake_put(i915, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); return ret; } @@ -635,10 +646,36 @@ static void reset_prepare_engine(struct intel_engine_cs *engine) * written to the powercontext is undefined and so we may lose * GPU state upon resume, i.e. fail to restart after a reset. */ - intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&engine->i915->uncore, FORCEWAKE_ALL); engine->reset.prepare(engine); } +static void revoke_mmaps(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < i915->num_fence_regs; i++) { + struct drm_vma_offset_node *node; + struct i915_vma *vma; + u64 vma_offset; + + vma = READ_ONCE(i915->fence_regs[i].vma); + if (!vma) + continue; + + if (!i915_vma_has_userfault(vma)) + continue; + + GEM_BUG_ON(vma->fence != &i915->fence_regs[i]); + node = &vma->obj->base.vma_node; + vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT; + unmap_mapping_range(i915->drm.anon_inode->i_mapping, + drm_vma_node_offset_addr(node) + vma_offset, + vma->size, + 1); + } +} + static void reset_prepare(struct drm_i915_private *i915) { struct intel_engine_cs *engine; @@ -647,7 +684,12 @@ static void reset_prepare(struct drm_i915_private *i915) for_each_engine(engine, i915, id) reset_prepare_engine(engine); - intel_uc_sanitize(i915); + intel_uc_reset_prepare(i915); +} + +static void gt_revoke(struct drm_i915_private *i915) +{ + revoke_mmaps(i915); } static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) @@ -665,7 +707,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) return err; for_each_engine(engine, i915, id) - intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id)); + intel_engine_reset(engine, stalled_mask & engine->mask); i915_gem_restore_fences(i915); @@ -675,7 +717,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) static void reset_finish_engine(struct intel_engine_cs *engine) { engine->reset.finish(engine); - intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&engine->i915->uncore, FORCEWAKE_ALL); } struct i915_gpu_restart { @@ -722,8 +764,10 @@ static void reset_finish(struct drm_i915_private *i915) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) + for_each_engine(engine, i915, id) { reset_finish_engine(engine); + intel_engine_signal_breadcrumbs(engine); + } } static void reset_restart(struct drm_i915_private *i915) @@ -761,23 +805,19 @@ static void nop_submit_request(struct i915_request *request) spin_lock_irqsave(&engine->timeline.lock, flags); __i915_request_submit(request); i915_request_mark_complete(request); - intel_engine_write_global_seqno(engine, request->global_seqno); spin_unlock_irqrestore(&engine->timeline.lock, flags); intel_engine_queue_breadcrumbs(engine); } -void i915_gem_set_wedged(struct drm_i915_private *i915) +static void __i915_gem_set_wedged(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; enum intel_engine_id id; - mutex_lock(&error->wedge_mutex); - if (test_bit(I915_WEDGED, &error->flags)) { - mutex_unlock(&error->wedge_mutex); + if (test_bit(I915_WEDGED, &error->flags)) return; - } if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) { struct drm_printer p = drm_debug_printer(__func__); @@ -793,11 +833,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) * rolling the global seqno forward (since this would complete requests * for which we haven't set the fence error to EIO yet). */ - for_each_engine(engine, i915, id) - reset_prepare_engine(engine); + reset_prepare(i915); /* Even if the GPU reset fails, it should still stop the engines */ - if (INTEL_GEN(i915) >= 5) + if (!INTEL_INFO(i915)->gpu_reset_clobbers_display) intel_gpu_reset(i915, ALL_ENGINES); for_each_engine(engine, i915, id) { @@ -811,31 +850,35 @@ void i915_gem_set_wedged(struct drm_i915_private *i915) * either this call here to intel_engine_write_global_seqno, or the one * in nop_submit_request. */ - synchronize_rcu(); + synchronize_rcu_expedited(); /* Mark all executing requests as skipped */ for_each_engine(engine, i915, id) engine->cancel_requests(engine); - for_each_engine(engine, i915, id) { - reset_finish_engine(engine); - intel_engine_signal_breadcrumbs(engine); - } + reset_finish(i915); smp_mb__before_atomic(); set_bit(I915_WEDGED, &error->flags); GEM_TRACE("end\n"); - mutex_unlock(&error->wedge_mutex); +} - wake_up_all(&error->reset_queue); +void i915_gem_set_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + intel_wakeref_t wakeref; + + mutex_lock(&error->wedge_mutex); + with_intel_runtime_pm(i915, wakeref) + __i915_gem_set_wedged(i915); + mutex_unlock(&error->wedge_mutex); } -bool i915_gem_unset_wedged(struct drm_i915_private *i915) +static bool __i915_gem_unset_wedged(struct drm_i915_private *i915) { struct i915_gpu_error *error = &i915->gpu_error; struct i915_timeline *tl; - bool ret = false; if (!test_bit(I915_WEDGED, &error->flags)) return true; @@ -843,8 +886,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) if (!i915->gt.scratch) /* Never full initialised, recovery impossible */ return false; - mutex_lock(&error->wedge_mutex); - GEM_TRACE("start\n"); /* @@ -860,30 +901,20 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) mutex_lock(&i915->gt.timelines.mutex); list_for_each_entry(tl, &i915->gt.timelines.active_list, link) { struct i915_request *rq; - long timeout; rq = i915_active_request_get_unlocked(&tl->last_request); if (!rq) continue; /* - * We can't use our normal waiter as we want to - * avoid recursively trying to handle the current - * reset. The basic dma_fence_default_wait() installs - * a callback for dma_fence_signal(), which is - * triggered by our nop handler (indirectly, the - * callback enables the signaler thread which is - * woken by the nop_submit_request() advancing the seqno - * and when the seqno passes the fence, the signaler - * then signals the fence waking us up). + * All internal dependencies (i915_requests) will have + * been flushed by the set-wedge, but we may be stuck waiting + * for external fences. These should all be capped to 10s + * (I915_FENCE_TIMEOUT) so this wait should not be unbounded + * in the worst case. */ - timeout = dma_fence_default_wait(&rq->fence, true, - MAX_SCHEDULE_TIMEOUT); + dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); i915_request_put(rq); - if (timeout < 0) { - mutex_unlock(&i915->gt.timelines.mutex); - goto unlock; - } } mutex_unlock(&i915->gt.timelines.mutex); @@ -904,57 +935,37 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ clear_bit(I915_WEDGED, &i915->gpu_error.flags); - ret = true; -unlock: - mutex_unlock(&i915->gpu_error.wedge_mutex); - return ret; + return true; } -struct __i915_reset { - struct drm_i915_private *i915; - unsigned int stalled_mask; -}; - -static int __i915_reset__BKL(void *data) +bool i915_gem_unset_wedged(struct drm_i915_private *i915) { - struct __i915_reset *arg = data; - int err; + struct i915_gpu_error *error = &i915->gpu_error; + bool result; - err = intel_gpu_reset(arg->i915, ALL_ENGINES); - if (err) - return err; + mutex_lock(&error->wedge_mutex); + result = __i915_gem_unset_wedged(i915); + mutex_unlock(&error->wedge_mutex); - return gt_reset(arg->i915, arg->stalled_mask); + return result; } -#if RESET_UNDER_STOP_MACHINE -/* - * XXX An alternative to using stop_machine would be to park only the - * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP) - * we should be able to prevent their memmory accesses via the lost fence - * registers over the course of the reset without the potential recursive - * of mutexes between the pagefault handler and reset. - * - * See igt/gem_mmap_gtt/hang - */ -#define __do_reset(fn, arg) stop_machine(fn, arg, NULL) -#else -#define __do_reset(fn, arg) fn(arg) -#endif - static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask) { - struct __i915_reset arg = { i915, stalled_mask }; int err, i; - err = __do_reset(__i915_reset__BKL, &arg); + gt_revoke(i915); + + err = intel_gpu_reset(i915, ALL_ENGINES); for (i = 0; err && i < RESET_MAX_RETRIES; i++) { - msleep(100); - err = __do_reset(__i915_reset__BKL, &arg); + msleep(10 * (i + 1)); + err = intel_gpu_reset(i915, ALL_ENGINES); } + if (err) + return err; - return err; + return gt_reset(i915, stalled_mask); } /** @@ -966,8 +977,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask) * Reset the chip. Useful if a hang is detected. Marks the device as wedged * on failure. * - * Caller must hold the struct_mutex. - * * Procedure is fairly simple: * - reset the chip using the reset reg * - re-init context state @@ -990,7 +999,7 @@ void i915_reset(struct drm_i915_private *i915, GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags)); /* Clear any previous failed attempts at recovery. Time to try again. */ - if (!i915_gem_unset_wedged(i915)) + if (!__i915_gem_unset_wedged(i915)) return; if (reason) @@ -1007,11 +1016,17 @@ void i915_reset(struct drm_i915_private *i915, goto error; } + if (INTEL_INFO(i915)->gpu_reset_clobbers_display) + intel_runtime_pm_disable_interrupts(i915); + if (do_reset(i915, stalled_mask)) { dev_err(i915->drm.dev, "Failed to reset chip\n"); goto taint; } + if (INTEL_INFO(i915)->gpu_reset_clobbers_display) + intel_runtime_pm_enable_interrupts(i915); + intel_overlay_reset(i915); /* @@ -1033,7 +1048,7 @@ void i915_reset(struct drm_i915_private *i915, finish: reset_finish(i915); - if (!i915_terminally_wedged(error)) + if (!__i915_wedged(error)) reset_restart(i915); return; @@ -1052,14 +1067,14 @@ taint: */ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); error: - i915_gem_set_wedged(i915); + __i915_gem_set_wedged(i915); goto finish; } static inline int intel_gt_reset_engine(struct drm_i915_private *i915, struct intel_engine_cs *engine) { - return intel_gpu_reset(i915, intel_engine_flag(engine)); + return intel_gpu_reset(i915, engine->mask); } /** @@ -1144,7 +1159,12 @@ static void i915_reset_device(struct drm_i915_private *i915, i915_wedge_on_timeout(&w, i915, 5 * HZ) { intel_prepare_reset(i915); + /* Flush everyone using a resource about to be clobbered */ + synchronize_srcu_expedited(&error->reset_backoff_srcu); + + mutex_lock(&error->wedge_mutex); i915_reset(i915, engine_mask, reason); + mutex_unlock(&error->wedge_mutex); intel_finish_reset(i915); } @@ -1153,19 +1173,24 @@ static void i915_reset_device(struct drm_i915_private *i915, kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); } +static void clear_register(struct drm_i915_private *dev_priv, i915_reg_t reg) +{ + I915_WRITE(reg, I915_READ(reg)); +} + void i915_clear_error_registers(struct drm_i915_private *dev_priv) { u32 eir; if (!IS_GEN(dev_priv, 2)) - I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); + clear_register(dev_priv, PGTBL_ER); if (INTEL_GEN(dev_priv) < 4) - I915_WRITE(IPEIR, I915_READ(IPEIR)); + clear_register(dev_priv, IPEIR(RENDER_RING_BASE)); else - I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); + clear_register(dev_priv, IPEIR_I965); - I915_WRITE(EIR, I915_READ(EIR)); + clear_register(dev_priv, EIR); eir = I915_READ(EIR); if (eir) { /* @@ -1190,7 +1215,7 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv) I915_READ(RING_FAULT_REG(engine)) & ~RING_FAULT_VALID); } - POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); + POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS0])); } } @@ -1212,6 +1237,7 @@ void i915_handle_error(struct drm_i915_private *i915, unsigned long flags, const char *fmt, ...) { + struct i915_gpu_error *error = &i915->gpu_error; struct intel_engine_cs *engine; intel_wakeref_t wakeref; unsigned int tmp; @@ -1237,7 +1263,7 @@ void i915_handle_error(struct drm_i915_private *i915, */ wakeref = intel_runtime_pm_get(i915); - engine_mask &= INTEL_INFO(i915)->ring_mask; + engine_mask &= INTEL_INFO(i915)->engine_mask; if (flags & I915_ERROR_CAPTURE) { i915_capture_error_state(i915, engine_mask, msg); @@ -1248,20 +1274,19 @@ void i915_handle_error(struct drm_i915_private *i915, * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(i915) && - !i915_terminally_wedged(&i915->gpu_error)) { + if (intel_has_reset_engine(i915) && !__i915_wedged(error)) { for_each_engine_masked(engine, i915, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &i915->gpu_error.flags)) + &error->flags)) continue; if (i915_reset_engine(engine, msg) == 0) - engine_mask &= ~intel_engine_flag(engine); + engine_mask &= ~engine->mask; clear_bit(I915_RESET_ENGINE + engine->id, - &i915->gpu_error.flags); - wake_up_bit(&i915->gpu_error.flags, + &error->flags); + wake_up_bit(&error->flags, I915_RESET_ENGINE + engine->id); } } @@ -1270,18 +1295,20 @@ void i915_handle_error(struct drm_i915_private *i915, goto out; /* Full reset needs the mutex, stop any other user trying to do so. */ - if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) { - wait_event(i915->gpu_error.reset_queue, - !test_bit(I915_RESET_BACKOFF, - &i915->gpu_error.flags)); - goto out; + if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) { + wait_event(error->reset_queue, + !test_bit(I915_RESET_BACKOFF, &error->flags)); + goto out; /* piggy-back on the other reset */ } + /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ + synchronize_rcu_expedited(); + /* Prevent any other reset-engine attempt. */ for_each_engine(engine, i915, tmp) { while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - &i915->gpu_error.flags)) - wait_on_bit(&i915->gpu_error.flags, + &error->flags)) + wait_on_bit(&error->flags, I915_RESET_ENGINE + engine->id, TASK_UNINTERRUPTIBLE); } @@ -1290,16 +1317,74 @@ void i915_handle_error(struct drm_i915_private *i915, for_each_engine(engine, i915, tmp) { clear_bit(I915_RESET_ENGINE + engine->id, - &i915->gpu_error.flags); + &error->flags); } - clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags); - wake_up_all(&i915->gpu_error.reset_queue); + clear_bit(I915_RESET_BACKOFF, &error->flags); + wake_up_all(&error->reset_queue); out: intel_runtime_pm_put(i915, wakeref); } +int i915_reset_trylock(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + int srcu; + + might_lock(&error->reset_backoff_srcu); + might_sleep(); + + rcu_read_lock(); + while (test_bit(I915_RESET_BACKOFF, &error->flags)) { + rcu_read_unlock(); + + if (wait_event_interruptible(error->reset_queue, + !test_bit(I915_RESET_BACKOFF, + &error->flags))) + return -EINTR; + + rcu_read_lock(); + } + srcu = srcu_read_lock(&error->reset_backoff_srcu); + rcu_read_unlock(); + + return srcu; +} + +void i915_reset_unlock(struct drm_i915_private *i915, int tag) +__releases(&i915->gpu_error.reset_backoff_srcu) +{ + struct i915_gpu_error *error = &i915->gpu_error; + + srcu_read_unlock(&error->reset_backoff_srcu, tag); +} + +int i915_terminally_wedged(struct drm_i915_private *i915) +{ + struct i915_gpu_error *error = &i915->gpu_error; + + might_sleep(); + + if (!__i915_wedged(error)) + return 0; + + /* Reset still in progress? Maybe we will recover? */ + if (!test_bit(I915_RESET_BACKOFF, &error->flags)) + return -EIO; + + /* XXX intel_reset_finish() still takes struct_mutex!!! */ + if (mutex_is_locked(&i915->drm.struct_mutex)) + return -EAGAIN; + + if (wait_event_interruptible(error->reset_queue, + !test_bit(I915_RESET_BACKOFF, + &error->flags))) + return -EINTR; + + return __i915_wedged(error) ? -EIO : 0; +} + bool i915_reset_flush(struct drm_i915_private *i915) { int err; diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h index f2d347f319df..16f2389f656f 100644 --- a/drivers/gpu/drm/i915/i915_reset.h +++ b/drivers/gpu/drm/i915/i915_reset.h @@ -9,6 +9,7 @@ #include <linux/compiler.h> #include <linux/types.h> +#include <linux/srcu.h> struct drm_i915_private; struct intel_engine_cs; @@ -32,6 +33,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, void i915_reset_request(struct i915_request *rq, bool guilty); bool i915_reset_flush(struct drm_i915_private *i915); +int __must_check i915_reset_trylock(struct drm_i915_private *i915); +void i915_reset_unlock(struct drm_i915_private *i915, int tag); + +int i915_terminally_wedged(struct drm_i915_private *i915); + bool intel_has_gpu_reset(struct drm_i915_private *i915); bool intel_has_reset_engine(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index d01683167c77..e0f609d01564 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -7,9 +7,16 @@ #include <linux/mutex.h> #include "i915_drv.h" +#include "i915_globals.h" #include "i915_request.h" #include "i915_scheduler.h" +static struct i915_global_scheduler { + struct i915_global base; + struct kmem_cache *slab_dependencies; + struct kmem_cache *slab_priorities; +} global; + static DEFINE_SPINLOCK(schedule_lock); static const struct i915_request * @@ -18,6 +25,11 @@ node_to_request(const struct i915_sched_node *node) return container_of(node, const struct i915_request, sched); } +static inline bool node_started(const struct i915_sched_node *node) +{ + return i915_request_started(node_to_request(node)); +} + static inline bool node_signaled(const struct i915_sched_node *node) { return i915_request_completed(node_to_request(node)); @@ -29,19 +41,19 @@ void i915_sched_node_init(struct i915_sched_node *node) INIT_LIST_HEAD(&node->waiters_list); INIT_LIST_HEAD(&node->link); node->attr.priority = I915_PRIORITY_INVALID; + node->flags = 0; } static struct i915_dependency * -i915_dependency_alloc(struct drm_i915_private *i915) +i915_dependency_alloc(void) { - return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); + return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL); } static void -i915_dependency_free(struct drm_i915_private *i915, - struct i915_dependency *dep) +i915_dependency_free(struct i915_dependency *dep) { - kmem_cache_free(i915->dependencies, dep); + kmem_cache_free(global.slab_dependencies, dep); } bool __i915_sched_node_add_dependency(struct i915_sched_node *node, @@ -60,6 +72,11 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, dep->signaler = signal; dep->flags = flags; + /* Keep track of whether anyone on this chain has a semaphore */ + if (signal->flags & I915_SCHED_HAS_SEMAPHORE && + !node_started(signal)) + node->flags |= I915_SCHED_HAS_SEMAPHORE; + ret = true; } @@ -68,25 +85,23 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, return ret; } -int i915_sched_node_add_dependency(struct drm_i915_private *i915, - struct i915_sched_node *node, +int i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_sched_node *signal) { struct i915_dependency *dep; - dep = i915_dependency_alloc(i915); + dep = i915_dependency_alloc(); if (!dep) return -ENOMEM; if (!__i915_sched_node_add_dependency(node, signal, dep, I915_DEPENDENCY_ALLOC)) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); return 0; } -void i915_sched_node_fini(struct drm_i915_private *i915, - struct i915_sched_node *node) +void i915_sched_node_fini(struct i915_sched_node *node) { struct i915_dependency *dep, *tmp; @@ -106,7 +121,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915, list_del(&dep->wait_link); if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); } /* Remove ourselves from everyone who depends upon us */ @@ -116,7 +131,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915, list_del(&dep->signal_link); if (dep->flags & I915_DEPENDENCY_ALLOC) - i915_dependency_free(i915, dep); + i915_dependency_free(dep); } spin_unlock(&schedule_lock); @@ -193,7 +208,7 @@ find_priolist: if (prio == I915_PRIORITY_NORMAL) { p = &execlists->default_priolist; } else { - p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); + p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC); /* Convert an allocation failure to a priority bump */ if (unlikely(!p)) { prio = I915_PRIORITY_NORMAL; /* recurses just once */ @@ -223,8 +238,14 @@ out: return &p->requests[idx]; } +struct sched_cache { + struct list_head *priolist; +}; + static struct intel_engine_cs * -sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) +sched_lock_engine(const struct i915_sched_node *node, + struct intel_engine_cs *locked, + struct sched_cache *cache) { struct intel_engine_cs *engine = node_to_request(node)->engine; @@ -232,6 +253,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) if (engine != locked) { spin_unlock(&locked->timeline.lock); + memset(cache, 0, sizeof(*cache)); spin_lock(&engine->timeline.lock); } @@ -253,11 +275,11 @@ static bool inflight(const struct i915_request *rq, static void __i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr) { - struct list_head *uninitialized_var(pl); - struct intel_engine_cs *engine, *last; + struct intel_engine_cs *engine; struct i915_dependency *dep, *p; struct i915_dependency stack; const int prio = attr->priority; + struct sched_cache cache; LIST_HEAD(dfs); /* Needed in order to use the temporary link inside i915_dependency */ @@ -294,6 +316,10 @@ static void __i915_schedule(struct i915_request *rq, list_for_each_entry(dep, &dfs, dfs_link) { struct i915_sched_node *node = dep->signaler; + /* If we are already flying, we know we have no signalers */ + if (node_started(node)) + continue; + /* * Within an engine, there can be no cycle, but we may * refer to the same dependency chain multiple times @@ -306,7 +332,6 @@ static void __i915_schedule(struct i915_request *rq, if (node_signaled(p->signaler)) continue; - GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority); if (prio > READ_ONCE(p->signaler->attr.priority)) list_move_tail(&p->dfs_link, &dfs); } @@ -328,7 +353,7 @@ static void __i915_schedule(struct i915_request *rq, __list_del_entry(&stack.dfs_link); } - last = NULL; + memset(&cache, 0, sizeof(cache)); engine = rq->engine; spin_lock_irq(&engine->timeline.lock); @@ -338,7 +363,7 @@ static void __i915_schedule(struct i915_request *rq, INIT_LIST_HEAD(&dep->dfs_link); - engine = sched_lock_engine(node, engine); + engine = sched_lock_engine(node, engine, &cache); lockdep_assert_held(&engine->timeline.lock); /* Recheck after acquiring the engine->timeline.lock */ @@ -347,11 +372,11 @@ static void __i915_schedule(struct i915_request *rq, node->attr.priority = prio; if (!list_empty(&node->link)) { - if (last != engine) { - pl = i915_sched_lookup_priolist(engine, prio); - last = engine; - } - list_move_tail(&node->link, pl); + if (!cache.priolist) + cache.priolist = + i915_sched_lookup_priolist(engine, + prio); + list_move_tail(&node->link, cache.priolist); } else { /* * If the request is not in the priolist queue because @@ -408,3 +433,45 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump) spin_unlock_bh(&schedule_lock); } + +void __i915_priolist_free(struct i915_priolist *p) +{ + kmem_cache_free(global.slab_priorities, p); +} + +static void i915_global_scheduler_shrink(void) +{ + kmem_cache_shrink(global.slab_dependencies); + kmem_cache_shrink(global.slab_priorities); +} + +static void i915_global_scheduler_exit(void) +{ + kmem_cache_destroy(global.slab_dependencies); + kmem_cache_destroy(global.slab_priorities); +} + +static struct i915_global_scheduler global = { { + .shrink = i915_global_scheduler_shrink, + .exit = i915_global_scheduler_exit, +} }; + +int __init i915_global_scheduler_init(void) +{ + global.slab_dependencies = KMEM_CACHE(i915_dependency, + SLAB_HWCACHE_ALIGN); + if (!global.slab_dependencies) + return -ENOMEM; + + global.slab_priorities = KMEM_CACHE(i915_priolist, + SLAB_HWCACHE_ALIGN); + if (!global.slab_priorities) + goto err_priorities; + + i915_global_register(&global.base); + return 0; + +err_priorities: + kmem_cache_destroy(global.slab_priorities); + return -ENOMEM; +} diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index dbe9cb7ecd82..9a1d257f3d6e 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -24,14 +24,17 @@ enum { I915_PRIORITY_INVALID = INT_MIN }; -#define I915_USER_PRIORITY_SHIFT 2 +#define I915_USER_PRIORITY_SHIFT 3 #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1) -#define I915_PRIORITY_WAIT ((u8)BIT(0)) -#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1)) +#define I915_PRIORITY_WAIT ((u8)BIT(0)) +#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1)) +#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2)) + +#define __NO_PREEMPTION (I915_PRIORITY_WAIT) struct i915_sched_attr { /** @@ -72,6 +75,8 @@ struct i915_sched_node { struct list_head waiters_list; /* those after us, they depend upon us */ struct list_head link; struct i915_sched_attr attr; + unsigned int flags; +#define I915_SCHED_HAS_SEMAPHORE BIT(0) }; struct i915_dependency { @@ -83,6 +88,25 @@ struct i915_dependency { #define I915_DEPENDENCY_ALLOC BIT(0) }; +struct i915_priolist { + struct list_head requests[I915_PRIORITY_COUNT]; + struct rb_node node; + unsigned long used; + int priority; +}; + +#define priolist_for_each_request(it, plist, idx) \ + for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ + list_for_each_entry(it, &(plist)->requests[idx], sched.link) + +#define priolist_for_each_request_consume(it, n, plist, idx) \ + for (; \ + (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \ + (plist)->used &= ~BIT(idx)) \ + list_for_each_entry_safe(it, n, \ + &(plist)->requests[idx], \ + sched.link) + void i915_sched_node_init(struct i915_sched_node *node); bool __i915_sched_node_add_dependency(struct i915_sched_node *node, @@ -90,12 +114,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_dependency *dep, unsigned long flags); -int i915_sched_node_add_dependency(struct drm_i915_private *i915, - struct i915_sched_node *node, +int i915_sched_node_add_dependency(struct i915_sched_node *node, struct i915_sched_node *signal); -void i915_sched_node_fini(struct drm_i915_private *i915, - struct i915_sched_node *node); +void i915_sched_node_fini(struct i915_sched_node *node); void i915_schedule(struct i915_request *request, const struct i915_sched_attr *attr); @@ -105,4 +127,11 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump); struct list_head * i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio); +void __i915_priolist_free(struct i915_priolist *p); +static inline void i915_priolist_free(struct i915_priolist *p) +{ + if (p->priority != I915_PRIORITY_NORMAL) + __i915_priolist_free(p); +} + #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 7c58b049ecb5..5387aafd3424 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence, __i915_sw_fence_notify(fence, FENCE_FREE); } -static void i915_sw_fence_complete(struct i915_sw_fence *fence) +void i915_sw_fence_complete(struct i915_sw_fence *fence) { debug_fence_assert(fence); @@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence) __i915_sw_fence_complete(fence, NULL); } -static void i915_sw_fence_await(struct i915_sw_fence *fence) +void i915_sw_fence_await(struct i915_sw_fence *fence) { debug_fence_assert(fence); WARN_ON(atomic_inc_return(&fence->pending) <= 1); @@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp); } -struct i915_sw_dma_fence_cb { - struct dma_fence_cb base; - struct i915_sw_fence *fence; -}; - struct i915_sw_dma_fence_cb_timer { struct i915_sw_dma_fence_cb base; struct dma_fence *dma; @@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, return ret; } +static void __dma_i915_sw_fence_wake(struct dma_fence *dma, + struct dma_fence_cb *data) +{ + struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); + + i915_sw_fence_complete(cb->fence); +} + +int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, + struct dma_fence *dma, + struct i915_sw_dma_fence_cb *cb) +{ + int ret; + + debug_fence_assert(fence); + + if (dma_fence_is_signaled(dma)) + return 0; + + cb->fence = fence; + i915_sw_fence_await(fence); + + ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake); + if (ret == 0) { + ret = 1; + } else { + i915_sw_fence_complete(fence); + if (ret == -ENOENT) /* fence already signaled */ + ret = 0; + } + + return ret; +} + int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, const struct dma_fence_ops *exclude, diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 0e055ea0179f..9cb5c3b307a6 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -9,14 +9,13 @@ #ifndef _I915_SW_FENCE_H_ #define _I915_SW_FENCE_H_ +#include <linux/dma-fence.h> #include <linux/gfp.h> #include <linux/kref.h> #include <linux/notifier.h> /* for NOTIFY_DONE */ #include <linux/wait.h> struct completion; -struct dma_fence; -struct dma_fence_ops; struct reservation_object; struct i915_sw_fence { @@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, struct i915_sw_fence *after, gfp_t gfp); + +struct i915_sw_dma_fence_cb { + struct dma_fence_cb base; + struct i915_sw_fence *fence; +}; + +int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, + struct dma_fence *dma, + struct i915_sw_dma_fence_cb *cb); int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, struct dma_fence *dma, unsigned long timeout, gfp_t gfp); + int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, struct reservation_object *resv, const struct dma_fence_ops *exclude, @@ -79,6 +88,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence, unsigned long timeout, gfp_t gfp); +void i915_sw_fence_await(struct i915_sw_fence *fence); +void i915_sw_fence_complete(struct i915_sw_fence *fence); + static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence) { return atomic_read(&fence->pending) <= 0; diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c index b2202d2e58a2..2f4907364920 100644 --- a/drivers/gpu/drm/i915/i915_timeline.c +++ b/drivers/gpu/drm/i915/i915_timeline.c @@ -6,19 +6,32 @@ #include "i915_drv.h" -#include "i915_timeline.h" +#include "i915_active.h" #include "i915_syncmap.h" +#include "i915_timeline.h" + +#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) +#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) struct i915_timeline_hwsp { - struct i915_vma *vma; + struct i915_gt_timelines *gt; struct list_head free_link; + struct i915_vma *vma; u64 free_bitmap; }; -static inline struct i915_timeline_hwsp * -i915_timeline_hwsp(const struct i915_timeline *tl) +struct i915_timeline_cacheline { + struct i915_active active; + struct i915_timeline_hwsp *hwsp; + void *vaddr; +#define CACHELINE_BITS 6 +#define CACHELINE_FREE CACHELINE_BITS +}; + +static inline struct drm_i915_private * +hwsp_to_i915(struct i915_timeline_hwsp *hwsp) { - return tl->hwsp_ggtt->private; + return container_of(hwsp->gt, struct drm_i915_private, gt.timelines); } static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915) @@ -71,6 +84,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) vma->private = hwsp; hwsp->vma = vma; hwsp->free_bitmap = ~0ull; + hwsp->gt = gt; spin_lock(>->hwsp_lock); list_add(&hwsp->free_link, >->hwsp_free_list); @@ -88,14 +102,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline) return hwsp->vma; } -static void hwsp_free(struct i915_timeline *timeline) +static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline) { - struct i915_gt_timelines *gt = &timeline->i915->gt.timelines; - struct i915_timeline_hwsp *hwsp; - - hwsp = i915_timeline_hwsp(timeline); - if (!hwsp) /* leave global HWSP alone! */ - return; + struct i915_gt_timelines *gt = hwsp->gt; spin_lock(>->hwsp_lock); @@ -103,7 +112,8 @@ static void hwsp_free(struct i915_timeline *timeline) if (!hwsp->free_bitmap) list_add_tail(&hwsp->free_link, >->hwsp_free_list); - hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES); + GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap)); + hwsp->free_bitmap |= BIT_ULL(cacheline); /* And if no one is left using it, give the page back to the system */ if (hwsp->free_bitmap == ~0ull) { @@ -115,9 +125,78 @@ static void hwsp_free(struct i915_timeline *timeline) spin_unlock(>->hwsp_lock); } +static void __idle_cacheline_free(struct i915_timeline_cacheline *cl) +{ + GEM_BUG_ON(!i915_active_is_idle(&cl->active)); + + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + i915_vma_put(cl->hwsp->vma); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + + i915_active_fini(&cl->active); + kfree(cl); +} + +static void __cacheline_retire(struct i915_active *active) +{ + struct i915_timeline_cacheline *cl = + container_of(active, typeof(*cl), active); + + i915_vma_unpin(cl->hwsp->vma); + if (ptr_test_bit(cl->vaddr, CACHELINE_FREE)) + __idle_cacheline_free(cl); +} + +static struct i915_timeline_cacheline * +cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline) +{ + struct i915_timeline_cacheline *cl; + void *vaddr; + + GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS)); + + cl = kmalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return ERR_PTR(-ENOMEM); + + vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + kfree(cl); + return ERR_CAST(vaddr); + } + + i915_vma_get(hwsp->vma); + cl->hwsp = hwsp; + cl->vaddr = page_pack_bits(vaddr, cacheline); + + i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire); + + return cl; +} + +static void cacheline_acquire(struct i915_timeline_cacheline *cl) +{ + if (cl && i915_active_acquire(&cl->active)) + __i915_vma_pin(cl->hwsp->vma); +} + +static void cacheline_release(struct i915_timeline_cacheline *cl) +{ + if (cl) + i915_active_release(&cl->active); +} + +static void cacheline_free(struct i915_timeline_cacheline *cl) +{ + GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); + cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); + + if (i915_active_is_idle(&cl->active)) + __idle_cacheline_free(cl); +} + int i915_timeline_init(struct drm_i915_private *i915, struct i915_timeline *timeline, - const char *name, struct i915_vma *hwsp) { void *vaddr; @@ -133,35 +212,46 @@ int i915_timeline_init(struct drm_i915_private *i915, BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES); timeline->i915 = i915; - timeline->name = name; timeline->pin_count = 0; timeline->has_initial_breadcrumb = !hwsp; + timeline->hwsp_cacheline = NULL; - timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; if (!hwsp) { + struct i915_timeline_cacheline *cl; unsigned int cacheline; hwsp = hwsp_alloc(timeline, &cacheline); if (IS_ERR(hwsp)) return PTR_ERR(hwsp); + cl = cacheline_alloc(hwsp->private, cacheline); + if (IS_ERR(cl)) { + __idle_hwsp_free(hwsp->private, cacheline); + return PTR_ERR(cl); + } + + timeline->hwsp_cacheline = cl; timeline->hwsp_offset = cacheline * CACHELINE_BYTES; - } - timeline->hwsp_ggtt = i915_vma_get(hwsp); - vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - hwsp_free(timeline); - i915_vma_put(hwsp); - return PTR_ERR(vaddr); + vaddr = page_mask_bits(cl->vaddr); + } else { + timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR; + + vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); } timeline->hwsp_seqno = memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES); + timeline->hwsp_ggtt = i915_vma_get(hwsp); + GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size); + timeline->fence_context = dma_fence_context_alloc(1); spin_lock_init(&timeline->lock); + mutex_init(&timeline->mutex); INIT_ACTIVE_REQUEST(&timeline->barrier); INIT_ACTIVE_REQUEST(&timeline->last_request); @@ -239,15 +329,17 @@ void i915_timeline_fini(struct i915_timeline *timeline) GEM_BUG_ON(i915_active_request_isset(&timeline->barrier)); i915_syncmap_free(&timeline->sync); - hwsp_free(timeline); - i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + if (timeline->hwsp_cacheline) + cacheline_free(timeline->hwsp_cacheline); + else + i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj); + i915_vma_put(timeline->hwsp_ggtt); } struct i915_timeline * i915_timeline_create(struct drm_i915_private *i915, - const char *name, struct i915_vma *global_hwsp) { struct i915_timeline *timeline; @@ -257,7 +349,7 @@ i915_timeline_create(struct drm_i915_private *i915, if (!timeline) return ERR_PTR(-ENOMEM); - err = i915_timeline_init(i915, timeline, name, global_hwsp); + err = i915_timeline_init(i915, timeline, global_hwsp); if (err) { kfree(timeline); return ERR_PTR(err); @@ -284,6 +376,7 @@ int i915_timeline_pin(struct i915_timeline *tl) i915_ggtt_offset(tl->hwsp_ggtt) + offset_in_page(tl->hwsp_offset); + cacheline_acquire(tl->hwsp_cacheline); timeline_add_to_active(tl); return 0; @@ -293,6 +386,157 @@ unpin: return err; } +static u32 timeline_advance(struct i915_timeline *tl) +{ + GEM_BUG_ON(!tl->pin_count); + GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); + + return tl->seqno += 1 + tl->has_initial_breadcrumb; +} + +static void timeline_rollback(struct i915_timeline *tl) +{ + tl->seqno -= 1 + tl->has_initial_breadcrumb; +} + +static noinline int +__i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + struct i915_timeline_cacheline *cl; + unsigned int cacheline; + struct i915_vma *vma; + void *vaddr; + int err; + + /* + * If there is an outstanding GPU reference to this cacheline, + * such as it being sampled by a HW semaphore on another timeline, + * we cannot wraparound our seqno value (the HW semaphore does + * a strict greater-than-or-equals compare, not i915_seqno_passed). + * So if the cacheline is still busy, we must detach ourselves + * from it and leave it inflight alongside its users. + * + * However, if nobody is watching and we can guarantee that nobody + * will, we could simply reuse the same cacheline. + * + * if (i915_active_request_is_signaled(&tl->last_request) && + * i915_active_is_signaled(&tl->hwsp_cacheline->active)) + * return 0; + * + * That seems unlikely for a busy timeline that needed to wrap in + * the first place, so just replace the cacheline. + */ + + vma = hwsp_alloc(tl, &cacheline); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_rollback; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) { + __idle_hwsp_free(vma->private, cacheline); + goto err_rollback; + } + + cl = cacheline_alloc(vma->private, cacheline); + if (IS_ERR(cl)) { + err = PTR_ERR(cl); + __idle_hwsp_free(vma->private, cacheline); + goto err_unpin; + } + GEM_BUG_ON(cl->hwsp->vma != vma); + + /* + * Attach the old cacheline to the current request, so that we only + * free it after the current request is retired, which ensures that + * all writes into the cacheline from previous requests are complete. + */ + err = i915_active_ref(&tl->hwsp_cacheline->active, + tl->fence_context, rq); + if (err) + goto err_cacheline; + + cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */ + cacheline_free(tl->hwsp_cacheline); + + i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */ + i915_vma_put(tl->hwsp_ggtt); + + tl->hwsp_ggtt = i915_vma_get(vma); + + vaddr = page_mask_bits(cl->vaddr); + tl->hwsp_offset = cacheline * CACHELINE_BYTES; + tl->hwsp_seqno = + memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); + + tl->hwsp_offset += i915_ggtt_offset(vma); + + cacheline_acquire(cl); + tl->hwsp_cacheline = cl; + + *seqno = timeline_advance(tl); + GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); + return 0; + +err_cacheline: + cacheline_free(cl); +err_unpin: + i915_vma_unpin(vma); +err_rollback: + timeline_rollback(tl); + return err; +} + +int i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno) +{ + *seqno = timeline_advance(tl); + + /* Replace the HWSP on wraparound for HW semaphores */ + if (unlikely(!*seqno && tl->hwsp_cacheline)) + return __i915_timeline_get_seqno(tl, rq, seqno); + + return 0; +} + +static int cacheline_ref(struct i915_timeline_cacheline *cl, + struct i915_request *rq) +{ + return i915_active_ref(&cl->active, rq->fence.context, rq); +} + +int i915_timeline_read_hwsp(struct i915_request *from, + struct i915_request *to, + u32 *hwsp) +{ + struct i915_timeline_cacheline *cl = from->hwsp_cacheline; + struct i915_timeline *tl = from->timeline; + int err; + + GEM_BUG_ON(to->timeline == tl); + + mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); + err = i915_request_completed(from); + if (!err) + err = cacheline_ref(cl, to); + if (!err) { + if (likely(cl == tl->hwsp_cacheline)) { + *hwsp = tl->hwsp_offset; + } else { /* across a seqno wrap, recover the original offset */ + *hwsp = i915_ggtt_offset(cl->hwsp->vma) + + ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * + CACHELINE_BYTES; + } + } + mutex_unlock(&tl->mutex); + + return err; +} + void i915_timeline_unpin(struct i915_timeline *tl) { GEM_BUG_ON(!tl->pin_count); @@ -300,6 +544,7 @@ void i915_timeline_unpin(struct i915_timeline *tl) return; timeline_remove_from_active(tl); + cacheline_release(tl->hwsp_cacheline); /* * Since this timeline is idle, all bariers upon which we were waiting diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h index 7bec7d2e45bf..c1e47a423d85 100644 --- a/drivers/gpu/drm/i915/i915_timeline.h +++ b/drivers/gpu/drm/i915/i915_timeline.h @@ -25,76 +25,13 @@ #ifndef I915_TIMELINE_H #define I915_TIMELINE_H -#include <linux/list.h> -#include <linux/kref.h> +#include <linux/lockdep.h> -#include "i915_active.h" -#include "i915_request.h" #include "i915_syncmap.h" -#include "i915_utils.h" - -struct i915_vma; -struct i915_timeline_hwsp; - -struct i915_timeline { - u64 fence_context; - u32 seqno; - - spinlock_t lock; -#define TIMELINE_CLIENT 0 /* default subclass */ -#define TIMELINE_ENGINE 1 - - unsigned int pin_count; - const u32 *hwsp_seqno; - struct i915_vma *hwsp_ggtt; - u32 hwsp_offset; - - bool has_initial_breadcrumb; - - /** - * List of breadcrumbs associated with GPU requests currently - * outstanding. - */ - struct list_head requests; - - /* Contains an RCU guarded pointer to the last request. No reference is - * held to the request, users must carefully acquire a reference to - * the request using i915_active_request_get_request_rcu(), or hold the - * struct_mutex. - */ - struct i915_active_request last_request; - - /** - * We track the most recent seqno that we wait on in every context so - * that we only have to emit a new await and dependency on a more - * recent sync point. As the contexts may be executed out-of-order, we - * have to track each individually and can not rely on an absolute - * global_seqno. When we know that all tracked fences are completed - * (i.e. when the driver is idle), we know that the syncmap is - * redundant and we can discard it without loss of generality. - */ - struct i915_syncmap *sync; - - /** - * Barrier provides the ability to serialize ordering between different - * timelines. - * - * Users can call i915_timeline_set_barrier which will make all - * subsequent submissions to this timeline be executed only after the - * barrier has been completed. - */ - struct i915_active_request barrier; - - struct list_head link; - const char *name; - struct drm_i915_private *i915; - - struct kref kref; -}; +#include "i915_timeline_types.h" int i915_timeline_init(struct drm_i915_private *i915, struct i915_timeline *tl, - const char *name, struct i915_vma *hwsp); void i915_timeline_fini(struct i915_timeline *tl); @@ -119,7 +56,6 @@ i915_timeline_set_subclass(struct i915_timeline *timeline, struct i915_timeline * i915_timeline_create(struct drm_i915_private *i915, - const char *name, struct i915_vma *global_hwsp); static inline struct i915_timeline * @@ -160,8 +96,15 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl, } int i915_timeline_pin(struct i915_timeline *tl); +int i915_timeline_get_seqno(struct i915_timeline *tl, + struct i915_request *rq, + u32 *seqno); void i915_timeline_unpin(struct i915_timeline *tl); +int i915_timeline_read_hwsp(struct i915_request *from, + struct i915_request *until, + u32 *hwsp_offset); + void i915_timelines_init(struct drm_i915_private *i915); void i915_timelines_park(struct drm_i915_private *i915); void i915_timelines_fini(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h new file mode 100644 index 000000000000..12ba3c573aa0 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_timeline_types.h @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2016 Intel Corporation + */ + +#ifndef __I915_TIMELINE_TYPES_H__ +#define __I915_TIMELINE_TYPES_H__ + +#include <linux/list.h> +#include <linux/kref.h> +#include <linux/types.h> + +#include "i915_active.h" + +struct drm_i915_private; +struct i915_vma; +struct i915_timeline_cacheline; +struct i915_syncmap; + +struct i915_timeline { + u64 fence_context; + u32 seqno; + + spinlock_t lock; +#define TIMELINE_CLIENT 0 /* default subclass */ +#define TIMELINE_ENGINE 1 + struct mutex mutex; /* protects the flow of requests */ + + unsigned int pin_count; + const u32 *hwsp_seqno; + struct i915_vma *hwsp_ggtt; + u32 hwsp_offset; + + struct i915_timeline_cacheline *hwsp_cacheline; + + bool has_initial_breadcrumb; + + /** + * List of breadcrumbs associated with GPU requests currently + * outstanding. + */ + struct list_head requests; + + /* Contains an RCU guarded pointer to the last request. No reference is + * held to the request, users must carefully acquire a reference to + * the request using i915_active_request_get_request_rcu(), or hold the + * struct_mutex. + */ + struct i915_active_request last_request; + + /** + * We track the most recent seqno that we wait on in every context so + * that we only have to emit a new await and dependency on a more + * recent sync point. As the contexts may be executed out-of-order, we + * have to track each individually and can not rely on an absolute + * global_seqno. When we know that all tracked fences are completed + * (i.e. when the driver is idle), we know that the syncmap is + * redundant and we can discard it without loss of generality. + */ + struct i915_syncmap *sync; + + /** + * Barrier provides the ability to serialize ordering between different + * timelines. + * + * Users can call i915_timeline_set_barrier which will make all + * subsequent submissions to this timeline be executed only after the + * barrier has been completed. + */ + struct i915_active_request barrier; + + struct list_head link; + struct drm_i915_private *i915; + + struct kref kref; +}; + +#endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index eab313c3163c..12893304c8f8 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -18,6 +18,87 @@ /* watermark/fifo updates */ +TRACE_EVENT(intel_pipe_enable, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), + TP_ARGS(dev_priv, pipe), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + + TP_fast_assign( + enum pipe _pipe; + for_each_pipe(dev_priv, _pipe) { + __entry->frame[_pipe] = + dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); + __entry->scanline[_pipe] = + intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + } + __entry->pipe = pipe; + ), + + TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_disable, + TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), + TP_ARGS(dev_priv, pipe), + + TP_STRUCT__entry( + __array(u32, frame, 3) + __array(u32, scanline, 3) + __field(enum pipe, pipe) + ), + + TP_fast_assign( + enum pipe _pipe; + for_each_pipe(dev_priv, _pipe) { + __entry->frame[_pipe] = + dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe); + __entry->scanline[_pipe] = + intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe)); + } + __entry->pipe = pipe; + ), + + TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u", + pipe_name(__entry->pipe), + __entry->frame[PIPE_A], __entry->scanline[PIPE_A], + __entry->frame[PIPE_B], __entry->scanline[PIPE_B], + __entry->frame[PIPE_C], __entry->scanline[PIPE_C]) +); + +TRACE_EVENT(intel_pipe_crc, + TP_PROTO(struct intel_crtc *crtc, const u32 *crcs), + TP_ARGS(crtc, crcs), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(u32, crcs, 5) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, + crtc->pipe); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->crcs, crcs, sizeof(__entry->crcs)); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x", + pipe_name(__entry->pipe), __entry->frame, __entry->scanline, + __entry->crcs[0], __entry->crcs[1], __entry->crcs[2], + __entry->crcs[3], __entry->crcs[4]) +); + TRACE_EVENT(intel_cpu_fifo_underrun, TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe), TP_ARGS(dev_priv, pipe), @@ -627,7 +708,6 @@ DECLARE_EVENT_CLASS(i915_request, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global) ), TP_fast_assign( @@ -637,13 +717,11 @@ DECLARE_EVENT_CLASS(i915_request, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global = rq->global_seqno; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u", __entry->dev, __entry->class, __entry->instance, - __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->global) + __entry->hw_id, __entry->ctx, __entry->seqno) ); DEFINE_EVENT(i915_request, i915_request_add, @@ -673,7 +751,6 @@ TRACE_EVENT(i915_request_in, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global_seqno) __field(u32, port) __field(u32, prio) ), @@ -685,15 +762,14 @@ TRACE_EVENT(i915_request_in, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global_seqno = rq->global_seqno; __entry->prio = rq->sched.attr.priority; __entry->port = port; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u", __entry->dev, __entry->class, __entry->instance, __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->prio, __entry->global_seqno, __entry->port) + __entry->prio, __entry->port) ); TRACE_EVENT(i915_request_out, @@ -707,7 +783,6 @@ TRACE_EVENT(i915_request_out, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global_seqno) __field(u32, completed) ), @@ -718,14 +793,13 @@ TRACE_EVENT(i915_request_out, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global_seqno = rq->global_seqno; __entry->completed = i915_request_completed(rq); ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u", __entry->dev, __entry->class, __entry->instance, __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->global_seqno, __entry->completed) + __entry->completed) ); #else @@ -768,7 +842,6 @@ TRACE_EVENT(i915_request_wait_begin, __field(u16, class) __field(u16, instance) __field(u32, seqno) - __field(u32, global) __field(unsigned int, flags) ), @@ -785,14 +858,13 @@ TRACE_EVENT(i915_request_wait_begin, __entry->instance = rq->engine->instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; - __entry->global = rq->global_seqno; __entry->flags = flags; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x", + TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x", __entry->dev, __entry->class, __entry->instance, __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->global, !!(__entry->flags & I915_WAIT_LOCKED), + !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags) ); diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c new file mode 100644 index 000000000000..c822d0aafd2d --- /dev/null +++ b/drivers/gpu/drm/i915/i915_user_extensions.c @@ -0,0 +1,61 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/nospec.h> +#include <linux/sched/signal.h> +#include <linux/uaccess.h> + +#include <uapi/drm/i915_drm.h> + +#include "i915_user_extensions.h" +#include "i915_utils.h" + +int i915_user_extensions(struct i915_user_extension __user *ext, + const i915_user_extension_fn *tbl, + unsigned int count, + void *data) +{ + unsigned int stackdepth = 512; + + while (ext) { + int i, err; + u32 name; + u64 next; + + if (!stackdepth--) /* recursion vs useful flexibility */ + return -E2BIG; + + err = check_user_mbz(&ext->flags); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) { + err = check_user_mbz(&ext->rsvd[i]); + if (err) + return err; + } + + if (get_user(name, &ext->name)) + return -EFAULT; + + err = -EINVAL; + if (name < count) { + name = array_index_nospec(name, count); + if (tbl[name]) + err = tbl[name](ext, data); + } + if (err) + return err; + + if (get_user(next, &ext->next_extension) || + overflows_type(next, ext)) + return -EFAULT; + + ext = u64_to_user_ptr(next); + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_user_extensions.h b/drivers/gpu/drm/i915/i915_user_extensions.h new file mode 100644 index 000000000000..a14bf6bba9a1 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_user_extensions.h @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#ifndef I915_USER_EXTENSIONS_H +#define I915_USER_EXTENSIONS_H + +struct i915_user_extension; + +typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext, + void *data); + +int i915_user_extensions(struct i915_user_extension __user *ext, + const i915_user_extension_fn *tbl, + unsigned int count, + void *data); + +#endif /* I915_USER_EXTENSIONS_H */ diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 9726df37c4c4..2dbe8933b50a 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -105,6 +105,37 @@ __T; \ }) +/* + * container_of_user: Extract the superclass from a pointer to a member. + * + * Exactly like container_of() with the exception that it plays nicely + * with sparse for __user @ptr. + */ +#define container_of_user(ptr, type, member) ({ \ + void __user *__mptr = (void __user *)(ptr); \ + BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ + !__same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type __user *)(__mptr - offsetof(type, member))); }) + +/* + * check_user_mbz: Check that a user value exists and is zero + * + * Frequently in our uABI we reserve space for future extensions, and + * two ensure that userspace is prepared we enforce that space must + * be zero. (Then any future extension can safely assume a default value + * of 0.) + * + * check_user_mbz() combines checking that the user pointer is accessible + * and that the contained value is zero. + * + * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success. + */ +#define check_user_mbz(U) ({ \ + typeof(*(U)) mbz__; \ + get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \ +}) + static inline u64 ptr_to_u64(const void *ptr) { return (uintptr_t)ptr; @@ -123,12 +154,6 @@ static inline u64 ptr_to_u64(const void *ptr) #include <linux/list.h> -static inline int list_is_first(const struct list_head *list, - const struct list_head *head) -{ - return head->next == list; -} - static inline void __list_del_many(struct list_head *head, struct list_head *first) { diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 869cf4a3b6de..94d3992b599d 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -60,30 +60,31 @@ */ void i915_check_vgpu(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; u64 magic; u16 version_major; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); - magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); + magic = __raw_uncore_read64(uncore, vgtif_reg(magic)); if (magic != VGT_MAGIC) return; - version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); + version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major)); if (version_major < VGT_VERSION_MAJOR) { DRM_INFO("VGT interface version mismatch!\n"); return; } - dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps)); + dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps)); dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); } -bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv) +bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv) { - return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT; + return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT; } struct _balloon_info_ { diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 551acc390046..ebe1b7bced98 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -28,7 +28,7 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv); -bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv); +bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv); static inline bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index b713bed20c38..36726392e737 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -25,11 +25,27 @@ #include "i915_vma.h" #include "i915_drv.h" +#include "i915_globals.h" #include "intel_ringbuffer.h" #include "intel_frontbuffer.h" #include <drm/drm_gem.h> +static struct i915_global_vma { + struct i915_global base; + struct kmem_cache *slab_vmas; +} global; + +struct i915_vma *i915_vma_alloc(void) +{ + return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL); +} + +void i915_vma_free(struct i915_vma *vma) +{ + return kmem_cache_free(global.slab_vmas, vma); +} + #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM) #include <linux/stackdepot.h> @@ -115,7 +131,7 @@ vma_create(struct drm_i915_gem_object *obj, /* The aliasing_ppgtt should never be used directly! */ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm); - vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); + vma = i915_vma_alloc(); if (vma == NULL) return ERR_PTR(-ENOMEM); @@ -190,7 +206,7 @@ vma_create(struct drm_i915_gem_object *obj, cmp = i915_vma_compare(pos, vm, view); if (cmp == 0) { spin_unlock(&obj->vma.lock); - kmem_cache_free(vm->i915->vmas, vma); + i915_vma_free(vma); return pos; } @@ -222,7 +238,7 @@ vma_create(struct drm_i915_gem_object *obj, return vma; err_vma: - kmem_cache_free(vm->i915->vmas, vma); + i915_vma_free(vma); return ERR_PTR(-E2BIG); } @@ -803,8 +819,6 @@ void i915_vma_reopen(struct i915_vma *vma) static void __i915_vma_destroy(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; - GEM_BUG_ON(vma->node.allocated); GEM_BUG_ON(vma->fence); @@ -825,7 +839,7 @@ static void __i915_vma_destroy(struct i915_vma *vma) i915_active_fini(&vma->active); - kmem_cache_free(i915->vmas, vma); + i915_vma_free(vma); } void i915_vma_destroy(struct i915_vma *vma) @@ -1041,3 +1055,28 @@ unpin: #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/i915_vma.c" #endif + +static void i915_global_vma_shrink(void) +{ + kmem_cache_shrink(global.slab_vmas); +} + +static void i915_global_vma_exit(void) +{ + kmem_cache_destroy(global.slab_vmas); +} + +static struct i915_global_vma global = { { + .shrink = i915_global_vma_shrink, + .exit = i915_global_vma_exit, +} }; + +int __init i915_global_vma_init(void) +{ + global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); + if (!global.slab_vmas) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 7c742027f866..6eab70953a57 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -440,4 +440,7 @@ void i915_vma_parked(struct drm_i915_private *i915); list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ for_each_until(!i915_vma_is_ggtt(V)) +struct i915_vma *i915_vma_alloc(void); +void i915_vma_free(struct i915_vma *vma); + #endif diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c index 73a7bee24a66..b67ffaa283dc 100644 --- a/drivers/gpu/drm/i915/icl_dsi.c +++ b/drivers/gpu/drm/i915/icl_dsi.c @@ -246,13 +246,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) for (lane = 0; lane <= 3; lane++) { /* Bspec: must not use GRP register for write */ - tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); tmp |= POST_CURSOR_1(0x0); tmp |= POST_CURSOR_2(0x0); tmp |= CURSOR_COEFF(0x3f); - I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); } } } @@ -390,11 +390,11 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) tmp &= ~LOADGEN_SELECT; I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp); for (lane = 0; lane <= 3; lane++) { - tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane)); + tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port)); tmp &= ~LOADGEN_SELECT; if (lane != 2) tmp |= LOADGEN_SELECT; - I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp); + I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp); } } @@ -861,7 +861,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) I915_WRITE(PIPECONF(dsi_trans), tmp); /* wait for transcoder to be enabled */ - if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), + if (intel_wait_for_register(&dev_priv->uncore, + PIPECONF(dsi_trans), I965_PIPECONF_ACTIVE, I965_PIPECONF_ACTIVE, 10)) DRM_ERROR("DSI transcoder not enabled\n"); @@ -1039,7 +1040,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) I915_WRITE(PIPECONF(dsi_trans), tmp); /* wait for transcoder to be disabled */ - if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans), + if (intel_wait_for_register(&dev_priv->uncore, + PIPECONF(dsi_trans), I965_PIPECONF_ACTIVE, 0, 50)) DRM_ERROR("DSI trancoder not disabled\n"); } @@ -1179,11 +1181,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - u32 pll_id; /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */ - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); - pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id); + pipe_config->port_clock = + cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state); pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk; pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); } @@ -1361,7 +1362,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) struct intel_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; - struct drm_display_mode *scan, *fixed_mode = NULL; + struct drm_display_mode *fixed_mode; enum port port; if (!intel_bios_is_dsi_present(dev_priv, &port)) @@ -1411,15 +1412,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) /* attach connector to encoder */ intel_connector_attach_encoder(intel_connector, encoder); - /* fill mode info from VBT */ mutex_lock(&dev->mode_config.mutex); - intel_dsi_vbt_get_modes(intel_dsi); - list_for_each_entry(scan, &connector->probed_modes, head) { - if (scan->type & DRM_MODE_TYPE_PREFERRED) { - fixed_mode = drm_mode_duplicate(dev, scan); - break; - } - } + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); mutex_unlock(&dev->mode_config.mutex); if (!fixed_mode) { @@ -1427,12 +1421,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) goto err; } - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_panel_setup_backlight(connector, INVALID_PIPE); - if (dev_priv->vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B); else diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dd6c09699237..9d32a6fcf840 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -121,6 +121,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->active_planes &= ~BIT(plane->id); new_crtc_state->nv12_planes &= ~BIT(plane->id); + new_crtc_state->c8_planes &= ~BIT(plane->id); new_plane_state->base.visible = false; if (!new_plane_state->base.crtc && !old_plane_state->base.crtc) @@ -138,6 +139,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ is_planar_yuv_format(new_plane_state->base.fb->format->format)) new_crtc_state->nv12_planes |= BIT(plane->id); + if (new_plane_state->base.visible && + new_plane_state->base.fb->format->format == DRM_FORMAT_C8) + new_crtc_state->c8_planes |= BIT(plane->id); + if (new_plane_state->base.visible || old_plane_state->base.visible) new_crtc_state->update_planes |= BIT(plane->id); @@ -214,6 +219,35 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } +void intel_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + trace_intel_update_plane(&plane->base, crtc); + plane->update_plane(plane, crtc_state, plane_state); +} + +void intel_update_slave(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + trace_intel_update_plane(&plane->base, crtc); + plane->update_slave(plane, crtc_state, plane_state); +} + +void intel_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + + trace_intel_disable_plane(&plane->base, crtc); + plane->disable_plane(plane, crtc_state); +} + void skl_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -238,8 +272,7 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, intel_atomic_get_new_plane_state(state, plane); if (new_plane_state->base.visible) { - trace_intel_update_plane(&plane->base, crtc); - plane->update_plane(plane, new_crtc_state, new_plane_state); + intel_update_plane(plane, new_crtc_state, new_plane_state); } else if (new_plane_state->slave) { struct intel_plane *master = new_plane_state->linked_plane; @@ -256,11 +289,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, new_plane_state = intel_atomic_get_new_plane_state(state, master); - trace_intel_update_plane(&plane->base, crtc); - plane->update_slave(plane, new_crtc_state, new_plane_state); + intel_update_slave(plane, new_crtc_state, new_plane_state); } else { - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, new_crtc_state); + intel_disable_plane(plane, new_crtc_state); } } } @@ -280,13 +311,10 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - if (new_plane_state->base.visible) { - trace_intel_update_plane(&plane->base, crtc); - plane->update_plane(plane, new_crtc_state, new_plane_state); - } else { - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, new_crtc_state); - } + if (new_plane_state->base.visible) + intel_update_plane(plane, new_crtc_state, new_plane_state); + else + intel_disable_plane(plane, new_crtc_state); } } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 5104c6bbd66f..502b57ce72ab 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -741,27 +741,31 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv) } } -static void i915_audio_component_get_power(struct device *kdev) +static unsigned long i915_audio_component_get_power(struct device *kdev) { - intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO); + /* Catch potential impedance mismatches before they occur! */ + BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long)); + + return intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO); } -static void i915_audio_component_put_power(struct device *kdev) +static void i915_audio_component_put_power(struct device *kdev, + unsigned long cookie) { - intel_display_power_put_unchecked(kdev_to_i915(kdev), - POWER_DOMAIN_AUDIO); + intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO, cookie); } static void i915_audio_component_codec_wake_override(struct device *kdev, bool enable) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); + unsigned long cookie; u32 tmp; if (!IS_GEN(dev_priv, 9)) return; - i915_audio_component_get_power(kdev); + cookie = i915_audio_component_get_power(kdev); /* * Enable/disable generating the codec wake signal, overriding the @@ -779,7 +783,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, usleep_range(1000, 1500); } - i915_audio_component_put_power(kdev); + i915_audio_component_put_power(kdev, cookie); } /* Get CDCLK in kHz */ @@ -850,12 +854,13 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, struct i915_audio_component *acomp = dev_priv->audio_component; struct intel_encoder *encoder; struct intel_crtc *crtc; + unsigned long cookie; int err = 0; if (!HAS_DDI(dev_priv)) return 0; - i915_audio_component_get_power(kdev); + cookie = i915_audio_component_get_power(kdev); mutex_lock(&dev_priv->av_mutex); /* 1. get the pipe */ @@ -875,7 +880,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, unlock: mutex_unlock(&dev_priv->av_mutex); - i915_audio_component_put_power(kdev); + i915_audio_component_put_power(kdev, cookie); return err; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b508d8a735e0..1dc8d03ff127 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -760,6 +760,31 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100; dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100; } + + if (bdb->version >= 226) { + u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time; + + wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3; + switch (wakeup_time) { + case 0: + wakeup_time = 500; + break; + case 1: + wakeup_time = 100; + break; + case 3: + wakeup_time = 50; + break; + default: + case 2: + wakeup_time = 2500; + break; + } + dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time; + } else { + /* Reusing PSR1 wakeup time for PSR2 in older VBTs */ + dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us; + } } static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv, @@ -1222,10 +1247,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, if (!info->alternate_ddc_pin) return; - for_each_port_masked(p, (1 << port) - 1) { + for (p = PORT_A; p < I915_MAX_PORTS; p++) { struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - if (info->alternate_ddc_pin != i->alternate_ddc_pin) + if (p == port || !i->present || + info->alternate_ddc_pin != i->alternate_ddc_pin) continue; DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " @@ -1239,8 +1265,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, * port. Otherwise they share the same ddc bin and * system couldn't communicate with them separately. * - * Due to parsing the ports in alphabetical order, - * a higher port will always clobber a lower one. + * Due to parsing the ports in child device order, + * a later device will always clobber an earlier one. */ i->supports_dvi = false; i->supports_hdmi = false; @@ -1258,10 +1284,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, if (!info->alternate_aux_channel) return; - for_each_port_masked(p, (1 << port) - 1) { + for (p = PORT_A; p < I915_MAX_PORTS; p++) { struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; - if (info->alternate_aux_channel != i->alternate_aux_channel) + if (p == port || !i->present || + info->alternate_aux_channel != i->alternate_aux_channel) continue; DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " @@ -1275,8 +1302,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, * port. Otherwise they share the same aux channel * and system couldn't communicate with them separately. * - * Due to parsing the ports in alphabetical order, - * a higher port will always clobber a lower one. + * Due to parsing the ports in child device order, + * a later device will always clobber an earlier one. */ i->supports_dp = false; i->alternate_aux_channel = 0; @@ -1324,48 +1351,57 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) return 0; } -static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, - u8 bdb_version) +static enum port dvo_port_to_port(u8 dvo_port) { - struct child_device_config *it, *child = NULL; - struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; - int i, j; - bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; - /* Each DDI port can have more than one value on the "DVO Port" field, + /* + * Each DDI port can have more than one value on the "DVO Port" field, * so look for all the possible values for each port. */ - int dvo_ports[][3] = { - {DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, - {DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, - {DVO_PORT_HDMIC, DVO_PORT_DPC, -1}, - {DVO_PORT_HDMID, DVO_PORT_DPD, -1}, - {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, - {DVO_PORT_HDMIF, DVO_PORT_DPF, -1}, + static const int dvo_ports[][3] = { + [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1}, + [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1}, + [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1}, + [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1}, + [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, + [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1}, }; + enum port port; + int i; - /* - * Find the first child device to reference the port, report if more - * than one found. - */ - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - it = dev_priv->vbt.child_dev + i; - - for (j = 0; j < 3; j++) { - if (dvo_ports[port][j] == -1) + for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) { + for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) { + if (dvo_ports[port][i] == -1) break; - if (it->dvo_port == dvo_ports[port][j]) { - if (child) { - DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", - port_name(port)); - } else { - child = it; - } - } + if (dvo_port == dvo_ports[port][i]) + return port; } } - if (!child) + + return PORT_NONE; +} + +static void parse_ddi_port(struct drm_i915_private *dev_priv, + const struct child_device_config *child, + u8 bdb_version) +{ + struct ddi_vbt_port_info *info; + bool is_dvi, is_hdmi, is_dp, is_edp, is_crt; + enum port port; + + port = dvo_port_to_port(child->dvo_port); + if (port == PORT_NONE) + return; + + info = &dev_priv->vbt.ddi_port_info[port]; + + if (info->present) { + DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", + port_name(port)); return; + } + + info->present = true; is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; @@ -1498,19 +1534,20 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version) { - enum port port; + const struct child_device_config *child; + int i; if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) return; - if (!dev_priv->vbt.child_dev_num) - return; - if (bdb_version < 155) return; - for (port = PORT_A; port < I915_MAX_PORTS; port++) - parse_ddi_port(dev_priv, port, bdb_version); + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + child = dev_priv->vbt.child_dev + i; + + parse_ddi_port(dev_priv, child, bdb_version); + } } static void @@ -1673,6 +1710,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv) info->supports_dvi = (port != PORT_A && port != PORT_E); info->supports_hdmi = info->supports_dvi; info->supports_dp = (port != PORT_E); + info->supports_edp = (port == PORT_A); } } @@ -2093,8 +2131,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, dvo_port = child->dvo_port; if (dvo_port == DVO_PORT_MIPIA || - (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) || - (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) { + (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) || + (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) { if (port) *port = dvo_port - DVO_PORT_MIPIA; return true; diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index cacaa1d04d17..09ed90c0ba00 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - - /* - * We may race with direct invocation of - * dma_fence_signal(), e.g. i915_request_retire(), - * in which case we can skip processing it ourselves. - */ - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &rq->fence.flags)) - continue; /* * Queue for execution after dropping the signaling @@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) * more signalers to the same context or engine. */ i915_request_get(rq); + + /* + * We may race with direct invocation of + * dma_fence_signal(), e.g. i915_request_retire(), + * so we need to acquire our reference to the request + * before we cancel the breadcrumb. + */ + clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); list_add_tail(&rq->signal_link, &signal); } diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 15ba950dee00..d40f8793718c 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -234,7 +234,8 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) else return 0; - tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); + tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? + HPLLVCO_MOBILE : HPLLVCO); vco = vco_table[tmp & 0x7]; if (vco == 0) @@ -468,7 +469,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv, cdclk_state->vco); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); mutex_unlock(&dev_priv->pcu_lock); if (IS_VALLEYVIEW(dev_priv)) @@ -543,11 +544,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK; val |= (cmd << DSPFREQGUAR_SHIFT); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); @@ -624,11 +625,11 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); val &= ~DSPFREQGUAR_MASK_CHV; val |= (cmd << DSPFREQGUAR_SHIFT_CHV); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 50)) { DRM_ERROR("timed out waiting for CDclk change\n"); @@ -964,7 +965,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5)) DRM_ERROR("DPLL0 not locked\n"); @@ -978,9 +979,9 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) static void skl_dpll0_disable(struct drm_i915_private *dev_priv) { I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); - if (intel_wait_for_register(dev_priv, - LCPLL1_CTL, LCPLL_PLL_LOCK, 0, - 1)) + if (intel_wait_for_register(&dev_priv->uncore, + LCPLL1_CTL, LCPLL_PLL_LOCK, 0, + 1)) DRM_ERROR("Couldn't disable DPLL0\n"); dev_priv->cdclk.hw.vco = 0; @@ -1323,7 +1324,7 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) I915_WRITE(BXT_DE_PLL_ENABLE, 0); /* Timeout 200us */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0, 1)) DRM_ERROR("timeout waiting for DE PLL unlock\n"); @@ -1344,7 +1345,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); /* Timeout 200us */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, BXT_DE_PLL_LOCK, @@ -2560,7 +2561,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) */ void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (dev_priv->cdclk.hw.ref == 24000) dev_priv->max_cdclk_freq = 648000; else @@ -2668,7 +2669,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv) rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000, fraction) - 1); - if (HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) rawclk |= ICP_RAWCLK_NUM(numerator); } @@ -2723,7 +2724,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv) */ void intel_update_rawclk(struct drm_i915_private *dev_priv) { - if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) + if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) dev_priv->rawclk_freq = cnp_rawclk(dev_priv); else if (HAS_PCH_SPLIT(dev_priv)) dev_priv->rawclk_freq = pch_rawclk(dev_priv); @@ -2744,18 +2745,13 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv) */ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.set_cdclk = chv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - vlv_modeset_calc_cdclk; - } else if (IS_VALLEYVIEW(dev_priv)) { - dev_priv->display.set_cdclk = vlv_set_cdclk; - dev_priv->display.modeset_calc_cdclk = - vlv_modeset_calc_cdclk; - } else if (IS_BROADWELL(dev_priv)) { - dev_priv->display.set_cdclk = bdw_set_cdclk; + if (INTEL_GEN(dev_priv) >= 11) { + dev_priv->display.set_cdclk = icl_set_cdclk; + dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + } else if (IS_CANNONLAKE(dev_priv)) { + dev_priv->display.set_cdclk = cnl_set_cdclk; dev_priv->display.modeset_calc_cdclk = - bdw_modeset_calc_cdclk; + cnl_modeset_calc_cdclk; } else if (IS_GEN9_LP(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = @@ -2764,23 +2760,28 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.set_cdclk = skl_set_cdclk; dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; - } else if (IS_CANNONLAKE(dev_priv)) { - dev_priv->display.set_cdclk = cnl_set_cdclk; + } else if (IS_BROADWELL(dev_priv)) { + dev_priv->display.set_cdclk = bdw_set_cdclk; dev_priv->display.modeset_calc_cdclk = - cnl_modeset_calc_cdclk; - } else if (IS_ICELAKE(dev_priv)) { - dev_priv->display.set_cdclk = icl_set_cdclk; - dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + bdw_modeset_calc_cdclk; + } else if (IS_CHERRYVIEW(dev_priv)) { + dev_priv->display.set_cdclk = chv_set_cdclk; + dev_priv->display.modeset_calc_cdclk = + vlv_modeset_calc_cdclk; + } else if (IS_VALLEYVIEW(dev_priv)) { + dev_priv->display.set_cdclk = vlv_set_cdclk; + dev_priv->display.modeset_calc_cdclk = + vlv_modeset_calc_cdclk; } - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) dev_priv->display.get_cdclk = icl_get_cdclk; else if (IS_CANNONLAKE(dev_priv)) dev_priv->display.get_cdclk = cnl_get_cdclk; - else if (IS_GEN9_BC(dev_priv)) - dev_priv->display.get_cdclk = skl_get_cdclk; else if (IS_GEN9_LP(dev_priv)) dev_priv->display.get_cdclk = bxt_get_cdclk; + else if (IS_GEN9_BC(dev_priv)) + dev_priv->display.get_cdclk = skl_get_cdclk; else if (IS_BROADWELL(dev_priv)) dev_priv->display.get_cdclk = bdw_get_cdclk; else if (IS_HASWELL(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index 71a1f12c6b2a..467fd1a1630c 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c @@ -40,23 +40,6 @@ #define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1)) #define LEGACY_LUT_LENGTH 256 - -/* Post offset values for RGB->YCBCR conversion */ -#define POSTOFF_RGB_TO_YUV_HI 0x800 -#define POSTOFF_RGB_TO_YUV_ME 0x100 -#define POSTOFF_RGB_TO_YUV_LO 0x800 - -/* - * These values are direct register values specified in the Bspec, - * for RGB->YUV conversion matrix (colorspace BT709) - */ -#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8 -#define CSC_RGB_TO_YUV_BU 0x37e80000 -#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0 -#define CSC_RGB_TO_YUV_BY 0xb5280000 -#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8 -#define CSC_RGB_TO_YUV_BV 0x1e080000 - /* * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point * format). This macro takes the coefficient we want transformed and the @@ -69,10 +52,45 @@ #define ILK_CSC_COEFF_FP(coeff, fbits) \ (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8) -#define ILK_CSC_COEFF_LIMITED_RANGE \ - ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9) -#define ILK_CSC_COEFF_1_0 \ - ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) +#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0 +#define ILK_CSC_COEFF_1_0 0x7800 + +#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255) + +static const u16 ilk_csc_off_zero[3] = {}; + +static const u16 ilk_csc_coeff_identity[9] = { + ILK_CSC_COEFF_1_0, 0, 0, + 0, ILK_CSC_COEFF_1_0, 0, + 0, 0, ILK_CSC_COEFF_1_0, +}; + +static const u16 ilk_csc_postoff_limited_range[3] = { + ILK_CSC_POSTOFF_LIMITED_RANGE, + ILK_CSC_POSTOFF_LIMITED_RANGE, + ILK_CSC_POSTOFF_LIMITED_RANGE, +}; + +static const u16 ilk_csc_coeff_limited_range[9] = { + ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, + 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, + 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, +}; + +/* + * These values are direct register values specified in the Bspec, + * for RGB->YUV conversion matrix (colorspace BT709) + */ +static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = { + 0x1e08, 0x9cc0, 0xb528, + 0x2ba8, 0x09d8, 0x37e8, + 0xbce8, 0x9ad8, 0x1e08, +}; + +/* Post offset values for RGB->YCBCR conversion */ +static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = { + 0x0800, 0x0100, 0x0800, +}; static bool lut_is_legacy(const struct drm_property_blob *lut) { @@ -113,145 +131,180 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input) return result; } -static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc) +static void ilk_update_pipe_csc(struct intel_crtc *crtc, + const u16 preoff[3], + const u16 coeff[9], + const u16 postoff[3]) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); + I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]); + I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]); + I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]); - I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU); - I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU); + I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]); + I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16); - I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY); - I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY); + I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]); + I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16); - I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV); - I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV); + I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]); + I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16); - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO); - I915_WRITE(PIPE_CSC_MODE(pipe), 0); + if (INTEL_GEN(dev_priv) >= 7) { + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]); + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]); + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]); + } } -static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) +static void icl_update_output_csc(struct intel_crtc *crtc, + const u16 preoff[3], + const u16 coeff[9], + const u16 postoff[3]) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - bool limited_color_range = false; enum pipe pipe = crtc->pipe; - u16 coeffs[9] = {}; - int i; + + I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]); + I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]); + I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]); + + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]); + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2]); + + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]); + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5]); + + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]); + I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8]); + + I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]); + I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]); + I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]); +} + +static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); /* * FIXME if there's a gamma LUT after the CSC, we should * do the range compression using the gamma LUT instead. */ - if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) - limited_color_range = crtc_state->limited_color_range; + return crtc_state->limited_color_range && + (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || + IS_GEN_RANGE(dev_priv, 9, 10)); +} - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || - crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) { - ilk_load_ycbcr_conversion_matrix(crtc); - return; - } else if (crtc_state->base.ctm) { - struct drm_color_ctm *ctm = crtc_state->base.ctm->data; - const u64 *input; - u64 temp[9]; +static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, + u16 coeffs[9]) +{ + const struct drm_color_ctm *ctm = crtc_state->base.ctm->data; + const u64 *input; + u64 temp[9]; + int i; - if (limited_color_range) - input = ctm_mult_by_limited(temp, ctm->matrix); - else - input = ctm->matrix; + if (ilk_csc_limited_range(crtc_state)) + input = ctm_mult_by_limited(temp, ctm->matrix); + else + input = ctm->matrix; - /* - * Convert fixed point S31.32 input to format supported by the - * hardware. - */ - for (i = 0; i < ARRAY_SIZE(coeffs); i++) { - u64 abs_coeff = ((1ULL << 63) - 1) & input[i]; + /* + * Convert fixed point S31.32 input to format supported by the + * hardware. + */ + for (i = 0; i < 9; i++) { + u64 abs_coeff = ((1ULL << 63) - 1) & input[i]; - /* - * Clamp input value to min/max supported by - * hardware. - */ - abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); - - /* sign bit */ - if (CTM_COEFF_NEGATIVE(input[i])) - coeffs[i] |= 1 << 15; - - if (abs_coeff < CTM_COEFF_0_125) - coeffs[i] |= (3 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 12); - else if (abs_coeff < CTM_COEFF_0_25) - coeffs[i] |= (2 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 11); - else if (abs_coeff < CTM_COEFF_0_5) - coeffs[i] |= (1 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 10); - else if (abs_coeff < CTM_COEFF_1_0) - coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); - else if (abs_coeff < CTM_COEFF_2_0) - coeffs[i] |= (7 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 8); - else - coeffs[i] |= (6 << 12) | - ILK_CSC_COEFF_FP(abs_coeff, 7); - } - } else { /* - * Load an identity matrix if no coefficients are provided. - * - * TODO: Check what kind of values actually come out of the - * pipe with these coeff/postoff values and adjust to get the - * best accuracy. Perhaps we even need to take the bpc value - * into consideration. + * Clamp input value to min/max supported by + * hardware. */ - for (i = 0; i < 3; i++) { - if (limited_color_range) - coeffs[i * 3 + i] = - ILK_CSC_COEFF_LIMITED_RANGE; - else - coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0; - } + abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); + + coeffs[i] = 0; + + /* sign bit */ + if (CTM_COEFF_NEGATIVE(input[i])) + coeffs[i] |= 1 << 15; + + if (abs_coeff < CTM_COEFF_0_125) + coeffs[i] |= (3 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 12); + else if (abs_coeff < CTM_COEFF_0_25) + coeffs[i] |= (2 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 11); + else if (abs_coeff < CTM_COEFF_0_5) + coeffs[i] |= (1 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 10); + else if (abs_coeff < CTM_COEFF_1_0) + coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9); + else if (abs_coeff < CTM_COEFF_2_0) + coeffs[i] |= (7 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 8); + else + coeffs[i] |= (6 << 12) | + ILK_CSC_COEFF_FP(abs_coeff, 7); } +} - I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]); - I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16); - - I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]); - I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16); - - I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]); - I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16); - - I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); +static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + bool limited_color_range = ilk_csc_limited_range(crtc_state); - if (INTEL_GEN(dev_priv) > 6) { - u16 postoff = 0; + if (crtc_state->base.ctm) { + u16 coeff[9]; + + ilk_csc_convert_ctm(crtc_state, coeff); + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff, + limited_color_range ? + ilk_csc_postoff_limited_range : + ilk_csc_off_zero); + } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_rgb_to_ycbcr, + ilk_csc_postoff_rgb_to_ycbcr); + } else if (limited_color_range) { + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_limited_range, + ilk_csc_postoff_limited_range); + } else if (crtc_state->csc_enable) { + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_identity, + ilk_csc_off_zero); + } - if (limited_color_range) - postoff = (16 * (1 << 12) / 255) & 0x1fff; + I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); +} - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); +static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - I915_WRITE(PIPE_CSC_MODE(pipe), 0); - } else { - u32 mode = CSC_MODE_YUV_TO_RGB; + if (crtc_state->base.ctm) { + u16 coeff[9]; - if (limited_color_range) - mode |= CSC_BLACK_SCREEN_OFFSET; + ilk_csc_convert_ctm(crtc_state, coeff); + ilk_update_pipe_csc(crtc, ilk_csc_off_zero, + coeff, ilk_csc_off_zero); + } - I915_WRITE(PIPE_CSC_MODE(pipe), mode); + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) { + icl_update_output_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_rgb_to_ycbcr, + ilk_csc_postoff_rgb_to_ycbcr); + } else if (crtc_state->limited_color_range) { + icl_update_output_csc(crtc, ilk_csc_off_zero, + ilk_csc_coeff_limited_range, + ilk_csc_postoff_limited_range); } + + I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode); } /* @@ -262,7 +315,6 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - u32 mode; if (crtc_state->base.ctm) { const struct drm_color_ctm *ctm = crtc_state->base.ctm->data; @@ -296,12 +348,7 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]); } - mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0); - if (!crtc_state_is_legacy_gamma(crtc_state)) { - mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | - (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); - } - I915_WRITE(CGM_PIPE_MODE(pipe), mode); + I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode); } /* Loads the legacy palette/gamma unit for the CRTC. */ @@ -351,6 +398,32 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut); } +static void i9xx_color_commit(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 val; + + val = I915_READ(PIPECONF(pipe)); + val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(pipe), val); +} + +static void ilk_color_commit(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 val; + + val = I915_READ(PIPECONF(pipe)); + val &= ~PIPECONF_GAMMA_MODE_MASK_ILK; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(pipe), val); +} + static void hsw_color_commit(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); @@ -361,6 +434,32 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state) ilk_load_csc_matrix(crtc_state); } +static void skl_color_commit(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 val = 0; + + /* + * We don't (yet) allow userspace to control the pipe background color, + * so force it to black, but apply pipe gamma and CSC appropriately + * so that its handling will match how we program our planes. + */ + if (crtc_state->gamma_enable) + val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE; + if (crtc_state->csc_enable) + val |= SKL_BOTTOM_COLOR_CSC_ENABLE; + I915_WRITE(SKL_BOTTOM_COLOR(pipe), val); + + I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode); + + if (INTEL_GEN(dev_priv) >= 11) + icl_load_csc_matrix(crtc_state); + else + ilk_load_csc_matrix(crtc_state); +} + static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); @@ -440,6 +539,12 @@ static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 of I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1); I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1); } + + /* + * Reset the index, otherwise it prevents the legacy palette to be + * written properly. + */ + I915_WRITE(PREC_PAL_INDEX(pipe), 0); } /* Loads the palette/gamma unit for the CRTC on Broadwell+. */ @@ -447,7 +552,6 @@ static void broadwell_load_luts(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; if (crtc_state_is_legacy_gamma(crtc_state)) { i9xx_load_luts(crtc_state); @@ -455,12 +559,6 @@ static void broadwell_load_luts(const struct intel_crtc_state *crtc_state) bdw_load_degamma_lut(crtc_state); bdw_load_gamma_lut(crtc_state, INTEL_INFO(dev_priv)->color.degamma_lut_size); - - /* - * Reset the index, otherwise it prevents the legacy palette to be - * written properly. - */ - I915_WRITE(PREC_PAL_INDEX(pipe), 0); } } @@ -469,7 +567,7 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - const u32 lut_size = 33; + const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size; u32 i; /* @@ -480,14 +578,32 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0); I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT); - /* - * FIXME: The pipe degamma table in geminilake doesn't support - * different values per channel, so this just loads a linear table. - */ - for (i = 0; i < lut_size; i++) { - u32 v = (i * (1 << 16)) / (lut_size - 1); + if (crtc_state->base.degamma_lut) { + struct drm_color_lut *lut = crtc_state->base.degamma_lut->data; + + for (i = 0; i < lut_size; i++) { + /* + * First 33 entries represent range from 0 to 1.0 + * 34th and 35th entry will represent extended range + * inputs 3.0 and 7.0 respectively, currently clamped + * at 1.0. Since the precision is 16bit, the user + * value can be directly filled to register. + * The pipe degamma table in GLK+ onwards doesn't + * support different values per channel, so this just + * programs green value which will be equal to Red and + * Blue into the lut registers. + * ToDo: Extend to max 7.0. Enable 32 bit input value + * as compared to just 16 to achieve this. + */ + I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green); + } + } else { + /* load a linear table. */ + for (i = 0; i < lut_size; i++) { + u32 v = (i * (1 << 16)) / (lut_size - 1); - I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v); + I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v); + } } /* Clamp values > 1.0. */ @@ -497,23 +613,23 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) static void glk_load_luts(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - glk_load_degamma_lut(crtc_state); - if (crtc_state_is_legacy_gamma(crtc_state)) { + if (crtc_state_is_legacy_gamma(crtc_state)) i9xx_load_luts(crtc_state); - } else { + else bdw_load_gamma_lut(crtc_state, 0); +} - /* - * Reset the index, otherwise it prevents the legacy palette to be - * written properly. - */ - I915_WRITE(PREC_PAL_INDEX(pipe), 0); - } +static void icl_load_luts(const struct intel_crtc_state *crtc_state) +{ + glk_load_degamma_lut(crtc_state); + + if (crtc_state_is_legacy_gamma(crtc_state)) + i9xx_load_luts(crtc_state); + else + /* ToDo: Add support for multi segment gamma LUT */ + bdw_load_gamma_lut(crtc_state, 0); } static void cherryview_load_luts(const struct intel_crtc_state *crtc_state) @@ -585,8 +701,57 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - if (dev_priv->display.color_commit) - dev_priv->display.color_commit(crtc_state); + dev_priv->display.color_commit(crtc_state); +} + +static bool need_plane_update(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + + /* + * On pre-SKL the pipe gamma enable and pipe csc enable for + * the pipe bottom color are configured via the primary plane. + * We have to reconfigure that even if the plane is inactive. + */ + return crtc_state->active_planes & BIT(plane->id) || + (INTEL_GEN(dev_priv) < 9 && + plane->id == PLANE_PRIMARY); +} + +static int +intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_atomic_state *state = + to_intel_atomic_state(new_crtc_state->base.state); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_plane *plane; + + if (!new_crtc_state->base.active || + drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) + return 0; + + if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable && + new_crtc_state->csc_enable == old_crtc_state->csc_enable) + return 0; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + + if (!need_plane_update(plane, new_crtc_state)) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane->id); + } + + return 0; } static int check_lut_size(const struct drm_property_blob *lut, int expected) @@ -606,22 +771,70 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected) return 0; } +static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state) +{ + u32 cgm_mode = 0; + + if (crtc_state_is_legacy_gamma(crtc_state)) + return 0; + + if (crtc_state->base.degamma_lut) + cgm_mode |= CGM_PIPE_MODE_DEGAMMA; + if (crtc_state->base.ctm) + cgm_mode |= CGM_PIPE_MODE_CSC; + if (crtc_state->base.gamma_lut) + cgm_mode |= CGM_PIPE_MODE_GAMMA; + + return cgm_mode; +} + int intel_color_check(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut; + bool limited_color_range = false; int gamma_length, degamma_length; u32 gamma_tests, degamma_tests; + int ret; degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size; gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size; degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests; gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests; + /* C8 needs the legacy LUT all to itself */ + if (crtc_state->c8_planes && + !crtc_state_is_legacy_gamma(crtc_state)) + return -EINVAL; + + crtc_state->gamma_enable = (gamma_lut || degamma_lut) && + !crtc_state->c8_planes; + + if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + limited_color_range = crtc_state->limited_color_range; + + crtc_state->csc_enable = + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || + crtc_state->base.ctm || limited_color_range; + + ret = intel_color_add_affected_planes(crtc_state); + if (ret) + return ret; + + crtc_state->csc_mode = 0; + + if (IS_CHERRYVIEW(dev_priv)) + crtc_state->cgm_mode = chv_cgm_mode(crtc_state); + /* Always allow legacy gamma LUT with no further checking. */ - if (crtc_state_is_legacy_gamma(crtc_state)) { + if (!crtc_state->gamma_enable || + crtc_state_is_legacy_gamma(crtc_state)) { crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; + if (INTEL_GEN(dev_priv) >= 11 && + crtc_state->gamma_enable) + crtc_state->gamma_mode |= POST_CSC_GAMMA_ENABLE; return 0; } @@ -633,13 +846,26 @@ int intel_color_check(struct intel_crtc_state *crtc_state) drm_color_lut_check(gamma_lut, gamma_tests)) return -EINVAL; - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) + crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT | + PRE_CSC_GAMMA_ENABLE | + POST_CSC_GAMMA_ENABLE; + else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT; else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT; else crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; + if (INTEL_GEN(dev_priv) >= 11) { + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB || + crtc_state->limited_color_range) + crtc_state->csc_mode |= ICL_OUTPUT_CSC_ENABLE; + + if (crtc_state->base.ctm) + crtc_state->csc_mode |= ICL_CSC_ENABLE; + } + return 0; } @@ -649,20 +875,29 @@ void intel_color_init(struct intel_crtc *crtc) drm_mode_crtc_set_gamma_size(&crtc->base, 256); - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.load_luts = cherryview_load_luts; - } else if (IS_HASWELL(dev_priv)) { - dev_priv->display.load_luts = i9xx_load_luts; - dev_priv->display.color_commit = hsw_color_commit; - } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) || - IS_BROXTON(dev_priv)) { - dev_priv->display.load_luts = broadwell_load_luts; - dev_priv->display.color_commit = hsw_color_commit; - } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { - dev_priv->display.load_luts = glk_load_luts; - dev_priv->display.color_commit = hsw_color_commit; + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + dev_priv->display.load_luts = cherryview_load_luts; + else + dev_priv->display.load_luts = i9xx_load_luts; + + dev_priv->display.color_commit = i9xx_color_commit; } else { - dev_priv->display.load_luts = i9xx_load_luts; + if (INTEL_GEN(dev_priv) >= 11) + dev_priv->display.load_luts = icl_load_luts; + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + dev_priv->display.load_luts = glk_load_luts; + else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + dev_priv->display.load_luts = broadwell_load_luts; + else + dev_priv->display.load_luts = i9xx_load_luts; + + if (INTEL_GEN(dev_priv) >= 9) + dev_priv->display.color_commit = skl_color_commit; + else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + dev_priv->display.color_commit = hsw_color_commit; + else + dev_priv->display.color_commit = ilk_color_commit; } /* Enable color management support when we have degamma & gamma LUTs. */ diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c index 8352d0bd8813..848dd9e728d8 100644 --- a/drivers/gpu/drm/i915/intel_connector.c +++ b/drivers/gpu/drm/i915/intel_connector.c @@ -88,6 +88,8 @@ void intel_connector_destroy(struct drm_connector *connector) kfree(intel_connector->detect_edid); + intel_hdcp_cleanup(intel_connector); + if (!IS_ERR_OR_NULL(intel_connector->edid)) kfree(intel_connector->edid); diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c new file mode 100644 index 000000000000..8931e0fee873 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_context.c @@ -0,0 +1,269 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_gem_context.h" +#include "i915_globals.h" +#include "intel_context.h" +#include "intel_ringbuffer.h" + +static struct i915_global_context { + struct i915_global base; + struct kmem_cache *slab_ce; +} global; + +struct intel_context *intel_context_alloc(void) +{ + return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); +} + +void intel_context_free(struct intel_context *ce) +{ + kmem_cache_free(global.slab_ce, ce); +} + +struct intel_context * +intel_context_lookup(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce = NULL; + struct rb_node *p; + + spin_lock(&ctx->hw_contexts_lock); + p = ctx->hw_contexts.rb_node; + while (p) { + struct intel_context *this = + rb_entry(p, struct intel_context, node); + + if (this->engine == engine) { + GEM_BUG_ON(this->gem_context != ctx); + ce = this; + break; + } + + if (this->engine < engine) + p = p->rb_right; + else + p = p->rb_left; + } + spin_unlock(&ctx->hw_contexts_lock); + + return ce; +} + +struct intel_context * +__intel_context_insert(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_context *ce) +{ + struct rb_node **p, *parent; + int err = 0; + + spin_lock(&ctx->hw_contexts_lock); + + parent = NULL; + p = &ctx->hw_contexts.rb_node; + while (*p) { + struct intel_context *this; + + parent = *p; + this = rb_entry(parent, struct intel_context, node); + + if (this->engine == engine) { + err = -EEXIST; + ce = this; + break; + } + + if (this->engine < engine) + p = &parent->rb_right; + else + p = &parent->rb_left; + } + if (!err) { + rb_link_node(&ce->node, parent, p); + rb_insert_color(&ce->node, &ctx->hw_contexts); + } + + spin_unlock(&ctx->hw_contexts_lock); + + return ce; +} + +void __intel_context_remove(struct intel_context *ce) +{ + struct i915_gem_context *ctx = ce->gem_context; + + spin_lock(&ctx->hw_contexts_lock); + rb_erase(&ce->node, &ctx->hw_contexts); + spin_unlock(&ctx->hw_contexts_lock); +} + +static struct intel_context * +intel_context_instance(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce, *pos; + + ce = intel_context_lookup(ctx, engine); + if (likely(ce)) + return ce; + + ce = intel_context_alloc(); + if (!ce) + return ERR_PTR(-ENOMEM); + + intel_context_init(ce, ctx, engine); + + pos = __intel_context_insert(ctx, engine, ce); + if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */ + intel_context_free(ce); + + GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos); + return pos; +} + +struct intel_context * +intel_context_pin_lock(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) + __acquires(ce->pin_mutex) +{ + struct intel_context *ce; + + ce = intel_context_instance(ctx, engine); + if (IS_ERR(ce)) + return ce; + + if (mutex_lock_interruptible(&ce->pin_mutex)) + return ERR_PTR(-EINTR); + + return ce; +} + +struct intel_context * +intel_context_pin(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + struct intel_context *ce; + int err; + + ce = intel_context_instance(ctx, engine); + if (IS_ERR(ce)) + return ce; + + if (likely(atomic_inc_not_zero(&ce->pin_count))) + return ce; + + if (mutex_lock_interruptible(&ce->pin_mutex)) + return ERR_PTR(-EINTR); + + if (likely(!atomic_read(&ce->pin_count))) { + err = ce->ops->pin(ce); + if (err) + goto err; + + i915_gem_context_get(ctx); + GEM_BUG_ON(ce->gem_context != ctx); + + mutex_lock(&ctx->mutex); + list_add(&ce->active_link, &ctx->active_engines); + mutex_unlock(&ctx->mutex); + + intel_context_get(ce); + smp_mb__before_atomic(); /* flush pin before it is visible */ + } + + atomic_inc(&ce->pin_count); + GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ + + mutex_unlock(&ce->pin_mutex); + return ce; + +err: + mutex_unlock(&ce->pin_mutex); + return ERR_PTR(err); +} + +void intel_context_unpin(struct intel_context *ce) +{ + if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) + return; + + /* We may be called from inside intel_context_pin() to evict another */ + intel_context_get(ce); + mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING); + + if (likely(atomic_dec_and_test(&ce->pin_count))) { + ce->ops->unpin(ce); + + mutex_lock(&ce->gem_context->mutex); + list_del(&ce->active_link); + mutex_unlock(&ce->gem_context->mutex); + + i915_gem_context_put(ce->gem_context); + intel_context_put(ce); + } + + mutex_unlock(&ce->pin_mutex); + intel_context_put(ce); +} + +static void intel_context_retire(struct i915_active_request *active, + struct i915_request *rq) +{ + struct intel_context *ce = + container_of(active, typeof(*ce), active_tracker); + + intel_context_unpin(ce); +} + +void +intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + kref_init(&ce->ref); + + ce->gem_context = ctx; + ce->engine = engine; + ce->ops = engine->cops; + + INIT_LIST_HEAD(&ce->signal_link); + INIT_LIST_HEAD(&ce->signals); + + mutex_init(&ce->pin_mutex); + + /* Use the whole device by default */ + ce->sseu = intel_device_default_sseu(ctx->i915); + + i915_active_request_init(&ce->active_tracker, + NULL, intel_context_retire); +} + +static void i915_global_context_shrink(void) +{ + kmem_cache_shrink(global.slab_ce); +} + +static void i915_global_context_exit(void) +{ + kmem_cache_destroy(global.slab_ce); +} + +static struct i915_global_context global = { { + .shrink = i915_global_context_shrink, + .exit = i915_global_context_exit, +} }; + +int __init i915_global_context_init(void) +{ + global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); + if (!global.slab_ce) + return -ENOMEM; + + i915_global_register(&global.base); + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h new file mode 100644 index 000000000000..ebc861b1a49e --- /dev/null +++ b/drivers/gpu/drm/i915/intel_context.h @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_CONTEXT_H__ +#define __INTEL_CONTEXT_H__ + +#include <linux/lockdep.h> + +#include "intel_context_types.h" +#include "intel_engine_types.h" + +struct intel_context *intel_context_alloc(void); +void intel_context_free(struct intel_context *ce); + +void intel_context_init(struct intel_context *ce, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + +/** + * intel_context_lookup - Find the matching HW context for this (ctx, engine) + * @ctx - the parent GEM context + * @engine - the target HW engine + * + * May return NULL if the HW context hasn't been instantiated (i.e. unused). + */ +struct intel_context * +intel_context_lookup(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + +/** + * intel_context_pin_lock - Stablises the 'pinned' status of the HW context + * @ctx - the parent GEM context + * @engine - the target HW engine + * + * Acquire a lock on the pinned status of the HW context, such that the context + * can neither be bound to the GPU or unbound whilst the lock is held, i.e. + * intel_context_is_pinned() remains stable. + */ +struct intel_context * +intel_context_pin_lock(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + +static inline bool +intel_context_is_pinned(struct intel_context *ce) +{ + return atomic_read(&ce->pin_count); +} + +static inline void intel_context_pin_unlock(struct intel_context *ce) +__releases(ce->pin_mutex) +{ + mutex_unlock(&ce->pin_mutex); +} + +struct intel_context * +__intel_context_insert(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_context *ce); +void +__intel_context_remove(struct intel_context *ce); + +struct intel_context * +intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine); + +static inline void __intel_context_pin(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_pinned(ce)); + atomic_inc(&ce->pin_count); +} + +void intel_context_unpin(struct intel_context *ce); + +static inline struct intel_context *intel_context_get(struct intel_context *ce) +{ + kref_get(&ce->ref); + return ce; +} + +static inline void intel_context_put(struct intel_context *ce) +{ + kref_put(&ce->ref, ce->ops->destroy); +} + +#endif /* __INTEL_CONTEXT_H__ */ diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h new file mode 100644 index 000000000000..624729a35875 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_context_types.h @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_CONTEXT_TYPES__ +#define __INTEL_CONTEXT_TYPES__ + +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/types.h> + +#include "i915_active_types.h" + +struct i915_gem_context; +struct i915_vma; +struct intel_context; +struct intel_ring; + +struct intel_context_ops { + int (*pin)(struct intel_context *ce); + void (*unpin)(struct intel_context *ce); + + void (*destroy)(struct kref *kref); +}; + +/* + * Powergating configuration for a particular (context,engine). + */ +struct intel_sseu { + u8 slice_mask; + u8 subslice_mask; + u8 min_eus_per_subslice; + u8 max_eus_per_subslice; +}; + +struct intel_context { + struct kref ref; + + struct i915_gem_context *gem_context; + struct intel_engine_cs *engine; + struct intel_engine_cs *active; + + struct list_head active_link; + struct list_head signal_link; + struct list_head signals; + + struct i915_vma *state; + struct intel_ring *ring; + + u32 *lrc_reg_state; + u64 lrc_desc; + + atomic_t pin_count; + struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ + + /** + * active_tracker: Active tracker for the external rq activity + * on this intel_context object. + */ + struct i915_active_request active_tracker; + + const struct intel_context_ops *ops; + struct rb_node node; + + /** sseu: Control eu/slice partitioning */ + struct intel_sseu sseu; +}; + +#endif /* __INTEL_CONTEXT_TYPES__ */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 3716b2ee362f..50530e49982c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -435,7 +435,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, 1000)) @@ -489,7 +489,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) I915_WRITE(crt->adpa_reg, adpa); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, crt->adpa_reg, ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0, 1000)) { @@ -542,7 +542,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ - if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN, + if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN, CRT_HOTPLUG_FORCE_DETECT, 0, 1000)) DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index e8ac04c33e29..862a8f686ef5 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -486,7 +486,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 12) { /* Allow to load fw via parameter using the last known size */ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE; - } else if (IS_ICELAKE(dev_priv)) { + } else if (IS_GEN(dev_priv, 11)) { csr->fw_path = ICL_CSR_PATH; csr->required_version = ICL_CSR_VERSION_REQUIRED; csr->max_fw_size = ICL_CSR_MAX_FW_SIZE; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index ca705546a0ab..3f1e491bd0c0 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -851,7 +851,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (intel_port_is_combophy(dev_priv, port)) icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, 0, &n_entries); @@ -1240,24 +1240,15 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, return (refclk * n * 100) / (p * r); } -static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, - enum intel_dpll_id pll_id) +static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state) { - i915_reg_t cfgcr1_reg, cfgcr2_reg; - u32 cfgcr1_val, cfgcr2_val; u32 p0, p1, p2, dco_freq; - cfgcr1_reg = DPLL_CFGCR1(pll_id); - cfgcr2_reg = DPLL_CFGCR2(pll_id); - - cfgcr1_val = I915_READ(cfgcr1_reg); - cfgcr2_val = I915_READ(cfgcr2_reg); + p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK; + p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK; - p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK; - p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK; - - if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1)) - p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; + if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1)) + p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8; else p1 = 1; @@ -1292,10 +1283,11 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, break; } - dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000; + dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) + * 24 * 1000; - dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 * - 1000) / 0x8000; + dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) + * 24 * 1000) / 0x8000; if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) return 0; @@ -1304,24 +1296,15 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv, } int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - enum intel_dpll_id pll_id) + struct intel_dpll_hw_state *pll_state) { - u32 cfgcr0, cfgcr1; u32 p0, p1, p2, dco_freq, ref_clock; - if (INTEL_GEN(dev_priv) >= 11) { - cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id)); - cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id)); - } else { - cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); - cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id)); - } - - p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK; - p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK; + p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK; + p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK; - if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) - p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> + if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) + p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> DPLL_CFGCR1_QDIV_RATIO_SHIFT; else p1 = 1; @@ -1349,16 +1332,17 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, case DPLL_CFGCR1_KDIV_2: p2 = 2; break; - case DPLL_CFGCR1_KDIV_4: - p2 = 4; + case DPLL_CFGCR1_KDIV_3: + p2 = 3; break; } ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; + dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) + * ref_clock; - dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> + dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0)) @@ -1390,25 +1374,21 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv, } static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, - enum port port) + const struct intel_dpll_hw_state *pll_state) { - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 mg_pll_div0, mg_clktop_hsclkctl; - u32 m1, m2_int, m2_frac, div1, div2, refclk; + u32 m1, m2_int, m2_frac, div1, div2, ref_clock; u64 tmp; - refclk = dev_priv->cdclk.hw.ref; - - mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port)); - mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); + ref_clock = dev_priv->cdclk.hw.ref; - m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK; - m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; - m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? - (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> - MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; + m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? + (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> + MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; - switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { + switch (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2: div1 = 2; break; @@ -1422,12 +1402,14 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, div1 = 7; break; default: - MISSING_CASE(mg_clktop_hsclkctl); + MISSING_CASE(pll_state->mg_clktop2_hsclkctl); return 0; } - div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> + div2 = (pll_state->mg_clktop2_hsclkctl & + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >> MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT; + /* div2 value of 0 is same as 1 means no div */ if (div2 == 0) div2 = 1; @@ -1436,8 +1418,8 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, * Adjust the original formula to delay the division by 2^22 in order to * minimize possible rounding errors. */ - tmp = (u64)m1 * m2_int * refclk + - (((u64)m1 * m2_frac * refclk) >> 22); + tmp = (u64)m1 * m2_int * ref_clock + + (((u64)m1 * m2_frac * ref_clock) >> 22); tmp = div_u64(tmp, 5 * div1 * div2); return tmp; @@ -1471,25 +1453,24 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; enum port port = encoder->port; - int link_clock = 0; - u32 pll_id; + int link_clock; - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); if (intel_port_is_combophy(dev_priv, port)) { - if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) - link_clock = cnl_calc_wrpll_link(dev_priv, pll_id); - else - link_clock = icl_calc_dp_combo_pll_link(dev_priv, - pll_id); + link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); } else { + enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv, + pipe_config->shared_dpll); + if (pll_id == DPLL_ID_ICL_TBTPLL) link_clock = icl_calc_tbt_pll_link(dev_priv, port); else - link_clock = icl_calc_mg_pll_link(dev_priv, port); + link_clock = icl_calc_mg_pll_link(dev_priv, pll_state); } pipe_config->port_clock = link_clock; + ddi_dotclock_get(pipe_config); } @@ -1497,18 +1478,13 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int link_clock = 0; - u32 cfgcr0; - enum intel_dpll_id pll_id; - - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; + int link_clock; - cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id)); - - if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { - link_clock = cnl_calc_wrpll_link(dev_priv, pll_id); + if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { + link_clock = cnl_calc_wrpll_link(dev_priv, pll_state); } else { - link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; + link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK; switch (link_clock) { case DPLL_CFGCR0_LINK_RATE_810: @@ -1548,22 +1524,20 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder, } static void skl_ddi_clock_get(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int link_clock = 0; - u32 dpll_ctl1; - enum intel_dpll_id pll_id; + struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state; + int link_clock; - pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll); - - dpll_ctl1 = I915_READ(DPLL_CTRL1); - - if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) { - link_clock = skl_calc_wrpll_link(dev_priv, pll_id); + /* + * ctrl1 register is already shifted for each pll, just use 0 to get + * the internal shift for each field + */ + if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) { + link_clock = skl_calc_wrpll_link(pll_state); } else { - link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id); - link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id); + link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0); + link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0); switch (link_clock) { case DPLL_CTRL1_LINK_RATE_810: @@ -1643,24 +1617,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder, ddi_dotclock_get(pipe_config); } -static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state) +static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state) { - struct intel_dpll_hw_state *state; struct dpll clock; - /* For DDI ports we always use a shared PLL. */ - if (WARN_ON(!crtc_state->shared_dpll)) - return 0; - - state = &crtc_state->dpll_hw_state; - clock.m1 = 2; - clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22; - if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE) - clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK; - clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; - clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; - clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; + clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22; + if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE) + clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK; + clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT; + clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT; + clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT; return chv_calc_dpll_params(100000, &clock); } @@ -1668,7 +1635,8 @@ static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state) static void bxt_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - pipe_config->port_clock = bxt_calc_pll_link(pipe_config); + pipe_config->port_clock = + bxt_calc_pll_link(&pipe_config->dpll_hw_state); ddi_dotclock_get(pipe_config); } @@ -1678,7 +1646,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_clock_get(encoder, pipe_config); else if (IS_CANNONLAKE(dev_priv)) cnl_ddi_clock_get(encoder, pipe_config); @@ -1911,7 +1879,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (port == PORT_A) + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; @@ -1973,7 +1941,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, if (!(tmp & DDI_BUF_CTL_ENABLE)) goto out; - if (port == PORT_A) { + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) { tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { @@ -2225,7 +2193,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) enum port port = encoder->port; int n_entries; - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (intel_port_is_combophy(dev_priv, port)) icl_get_combo_buf_trans(dev_priv, port, encoder->type, intel_dp->link_rate, &n_entries); @@ -2317,13 +2285,13 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overrite individual loadgen */ for (ln = 0; ln < 4; ln++) { - val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); - I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); + I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); } /* Program PORT_TX_DW5 */ @@ -2379,14 +2347,14 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); + I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ @@ -2448,13 +2416,13 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK); val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); - I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); } /* Program PORT_TX_DW7 */ @@ -2505,14 +2473,14 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln <= 3; ln++) { - val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); + val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port)); val &= ~LOADGEN_SELECT; if ((rate <= 600000 && width == 4 && ln >= 1) || (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { val |= LOADGEN_SELECT; } - I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); + I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ @@ -2555,33 +2523,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_LINK_PARAMS(port, ln)); + val = I915_READ(MG_TX1_LINK_PARAMS(ln, port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val); + I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val); - val = I915_READ(MG_TX2_LINK_PARAMS(port, ln)); + val = I915_READ(MG_TX2_LINK_PARAMS(ln, port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val); + I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_SWINGCTRL(port, ln)); + val = I915_READ(MG_TX1_SWINGCTRL(ln, port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val); + I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val); - val = I915_READ(MG_TX2_SWINGCTRL(port, ln)); + val = I915_READ(MG_TX2_SWINGCTRL(ln, port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val); + I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DRVCTRL(port, ln)); + val = I915_READ(MG_TX1_DRVCTRL(ln, port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2589,9 +2557,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX1_DRVCTRL(port, ln), val); + I915_WRITE(MG_TX1_DRVCTRL(ln, port), val); - val = I915_READ(MG_TX2_DRVCTRL(port, ln)); + val = I915_READ(MG_TX2_DRVCTRL(ln, port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2599,7 +2567,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX2_DRVCTRL(port, ln), val); + I915_WRITE(MG_TX2_DRVCTRL(ln, port), val); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } @@ -2610,17 +2578,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_CLKHUB(port, ln)); + val = I915_READ(MG_CLKHUB(ln, port)); if (link_clock < 300000) val |= CFG_LOW_RATE_LKREN_EN; else val &= ~CFG_LOW_RATE_LKREN_EN; - I915_WRITE(MG_CLKHUB(port, ln), val); + I915_WRITE(MG_CLKHUB(ln, port), val); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DCC(port, ln)); + val = I915_READ(MG_TX1_DCC(ln, port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2628,9 +2596,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX1_DCC(port, ln), val); + I915_WRITE(MG_TX1_DCC(ln, port), val); - val = I915_READ(MG_TX2_DCC(port, ln)); + val = I915_READ(MG_TX2_DCC(ln, port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2638,18 +2606,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX2_DCC(port, ln), val); + I915_WRITE(MG_TX2_DCC(ln, port), val); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_PISO_READLOAD(port, ln)); + val = I915_READ(MG_TX1_PISO_READLOAD(ln, port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val); + I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val); - val = I915_READ(MG_TX2_PISO_READLOAD(port, ln)); + val = I915_READ(MG_TX2_PISO_READLOAD(ln, port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val); + I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val); } } @@ -2698,7 +2666,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dport->base; int level = intel_ddi_dp_level(intel_dp); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) @@ -2867,7 +2835,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, mutex_lock(&dev_priv->dpll_lock); - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (!intel_port_is_combophy(dev_priv, port)) I915_WRITE(DDI_CLK_SEL(port), icl_pll_to_ddi_clk_sel(encoder, crtc_state)); @@ -2909,7 +2877,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { if (!intel_port_is_combophy(dev_priv, port)) I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE); } else if (IS_CANNONLAKE(dev_priv)) { @@ -2928,7 +2896,7 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) }; + i915_reg_t mg_regs[2] = { MG_DP_MODE(0, port), MG_DP_MODE(1, port) }; u32 val; int i; @@ -2999,8 +2967,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT) return; - ln0 = I915_READ(MG_DP_MODE(port, 0)); - ln1 = I915_READ(MG_DP_MODE(port, 1)); + ln0 = I915_READ(MG_DP_MODE(0, port)); + ln1 = I915_READ(MG_DP_MODE(1, port)); switch (intel_dig_port->tc_type) { case TC_PORT_TYPEC: @@ -3050,8 +3018,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) return; } - I915_WRITE(MG_DP_MODE(port, 0), ln0); - I915_WRITE(MG_DP_MODE(port, 1), ln1); + I915_WRITE(MG_DP_MODE(0, port), ln0); + I915_WRITE(MG_DP_MODE(1, port), ln1); } static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, @@ -3078,7 +3046,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, val |= DP_TP_CTL_FEC_ENABLE; I915_WRITE(DP_TP_CTL(port), val); - if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port), + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), DP_TP_STATUS_FEC_ENABLE_LIVE, DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) @@ -3126,7 +3094,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, icl_program_mg_dp_mode(dig_port); icl_disable_phy_clock_gating(dig_port); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) @@ -3175,7 +3143,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, icl_program_mg_dp_mode(dig_port); icl_disable_phy_clock_gating(dig_port); - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, INTEL_OUTPUT_HDMI); else if (IS_CANNONLAKE(dev_priv)) @@ -3556,7 +3524,9 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - intel_psr_enable(intel_dp, crtc_state); + intel_ddi_set_pipe_settings(crtc_state); + + intel_psr_update(intel_dp, crtc_state); intel_edp_drrs_enable(intel_dp, crtc_state); intel_panel_update_backlight(encoder, crtc_state, conn_state); @@ -3568,6 +3538,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder, { if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); + + if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(conn_state->connector)); + else if (conn_state->content_protection == + DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + intel_hdcp_disable(to_intel_connector(conn_state->connector)); } static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, @@ -3704,7 +3681,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state) { - if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000) + if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 1; else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000) crtc_state->min_voltage_level = 2; @@ -3757,7 +3734,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->has_hdmi_sink = true; intel_dig_port = enc_to_dig_port(&encoder->base); - if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) + pipe_config->infoframes.enable |= + intel_hdmi_infoframes_enabled(encoder, pipe_config); + + if (pipe_config->infoframes.enable) pipe_config->has_infoframe = true; if (temp & TRANS_DDI_HDMI_SCRAMBLING) @@ -3821,6 +3801,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder, bxt_ddi_phy_get_lane_lat_optim_mask(encoder); intel_ddi_compute_min_voltage_level(dev_priv, pipe_config); + + intel_hdmi_read_gcp_infoframe(encoder, pipe_config); + + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_AVI, + &pipe_config->infoframes.avi); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_SPD, + &pipe_config->infoframes.spd); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_VENDOR, + &pipe_config->infoframes.hdmi); } static enum intel_output_type @@ -3849,7 +3841,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, enum port port = encoder->port; int ret; - if (port == PORT_A) + if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) @@ -3951,23 +3943,10 @@ static int modeset_pipe(struct drm_crtc *crtc, goto out; } - crtc_state->mode_changed = true; - - ret = drm_atomic_add_affected_connectors(state, crtc); - if (ret) - goto out; - - ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) - goto out; + crtc_state->connectors_changed = true; ret = drm_atomic_commit(state); - if (ret) - goto out; - - return 0; - - out: +out: drm_atomic_state_put(state); return ret; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 855a5074ad77..e0f5e0231d04 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -57,6 +57,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(COFFEELAKE), PLATFORM_NAME(CANNONLAKE), PLATFORM_NAME(ICELAKE), + PLATFORM_NAME(ELKHARTLAKE), }; #undef PLATFORM_NAME @@ -155,9 +156,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) u8 eu_en; int s; - sseu->max_slices = 1; - sseu->max_subslices = 8; - sseu->max_eus_per_subslice = 8; + if (IS_ELKHARTLAKE(dev_priv)) { + sseu->max_slices = 1; + sseu->max_subslices = 4; + sseu->max_eus_per_subslice = 8; + } else { + sseu->max_slices = 1; + sseu->max_subslices = 8; + sseu->max_eus_per_subslice = 8; + } s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); @@ -738,9 +745,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) runtime->num_scalers[PIPE_C] = 1; } - BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t)); + BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES); - if (IS_GEN(dev_priv, 11)) + if (INTEL_GEN(dev_priv) >= 11) for_each_pipe(dev_priv, pipe) runtime->num_sprites[pipe] = 6; else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv)) @@ -844,7 +851,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { DRM_INFO("Disabling ppGTT for VT-d support\n"); - info->ppgtt = INTEL_PPGTT_NONE; + info->ppgtt_type = INTEL_PPGTT_NONE; } /* Initialize command stream timestamp frequency */ @@ -871,23 +878,24 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) unsigned int logical_vdbox = 0; unsigned int i; u32 media_fuse; + u16 vdbox_mask; + u16 vebox_mask; if (INTEL_GEN(dev_priv) < 11) return; media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); - RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; - RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> - GEN11_GT_VEBOX_DISABLE_SHIFT; + vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; + vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> + GEN11_GT_VEBOX_DISABLE_SHIFT; - DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable); for (i = 0; i < I915_MAX_VCS; i++) { if (!HAS_ENGINE(dev_priv, _VCS(i))) continue; - if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) { - info->ring_mask &= ~ENGINE_MASK(_VCS(i)); + if (!(BIT(i) & vdbox_mask)) { + info->engine_mask &= ~BIT(_VCS(i)); DRM_DEBUG_DRIVER("vcs%u fused off\n", i); continue; } @@ -899,15 +907,20 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) if (logical_vdbox++ % 2 == 0) RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); } + DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n", + vdbox_mask, VDBOX_MASK(dev_priv)); + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); - DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable); for (i = 0; i < I915_MAX_VECS; i++) { if (!HAS_ENGINE(dev_priv, _VECS(i))) continue; - if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) { - info->ring_mask &= ~ENGINE_MASK(_VECS(i)); + if (!(BIT(i) & vebox_mask)) { + info->engine_mask &= ~BIT(_VECS(i)); DRM_DEBUG_DRIVER("vecs%u fused off\n", i); } } + DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n", + vebox_mask, VEBOX_MASK(dev_priv)); + GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv)); } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index e8b8661df746..7e04b4829aba 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -73,14 +73,14 @@ enum intel_platform { INTEL_CANNONLAKE, /* gen11 */ INTEL_ICELAKE, + INTEL_ELKHARTLAKE, INTEL_MAX_PLATFORMS }; -enum intel_ppgtt { +enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL, - INTEL_PPGTT_FULL_4LVL, }; #define DEV_INFO_FOR_EACH_FLAG(func) \ @@ -150,19 +150,21 @@ struct sseu_dev_info { u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES]; }; -typedef u8 intel_ring_mask_t; +typedef u8 intel_engine_mask_t; struct intel_device_info { u16 gen_mask; u8 gen; u8 gt; /* GT number, 0 if undefined */ - intel_ring_mask_t ring_mask; /* Rings supported by the HW */ + intel_engine_mask_t engine_mask; /* Engines supported by the HW */ enum intel_platform platform; u32 platform_mask; - enum intel_ppgtt ppgtt; + enum intel_ppgtt_type ppgtt_type; + unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */ + unsigned int page_sizes; /* page sizes supported by the HW */ u32 display_mmio_offset; @@ -200,17 +202,13 @@ struct intel_runtime_info { u8 num_sprites[I915_MAX_PIPES]; u8 num_scalers[I915_MAX_PIPES]; - u8 num_rings; + u8 num_engines; /* Slice/subslice/EU info */ struct sseu_dev_info sseu; u32 cs_timestamp_frequency_khz; - /* Enabled (not fused off) media engine bitmasks. */ - u8 vdbox_enable; - u8 vebox_enable; - /* Media engine access to SFC per instance */ u8 vdbox_sfc_access; }; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 94496488641c..8576a7f799f2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -595,7 +595,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, const struct intel_crtc_state *crtc_state, int target) { - struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* @@ -603,7 +603,7 @@ i9xx_select_p2_div(const struct intel_limit *limit, * We haven't figured out how to reliably set up different * single/dual channel state, if we even can. */ - if (intel_is_dual_link_lvds(dev)) + if (intel_is_dual_link_lvds(dev_priv)) return limit->p2.p2_fast; else return limit->p2.p2_slow; @@ -951,14 +951,15 @@ chv_find_best_dpll(const struct intel_limit *limit, return found; } -bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, +bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock) { int refclk = 100000; const struct intel_limit *limit = &intel_limits_bxt; return chv_find_best_dpll(limit, crtc_state, - target_clock, refclk, NULL, best_clock); + crtc_state->port_clock, refclk, + NULL, best_clock); } bool intel_crtc_active(struct intel_crtc *crtc) @@ -1039,7 +1040,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, reg, I965_PIPECONF_ACTIVE, 0, 100)) WARN(1, "pipe_off wait timed out\n"); @@ -1345,7 +1346,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc, POSTING_READ(DPLL(pipe)); udelay(150); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, @@ -1398,7 +1399,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc, I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); /* Check PLL is locked */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1)) DRM_ERROR("PLL %d failed to lock\n", pipe); @@ -1441,17 +1442,12 @@ static void chv_enable_pll(struct intel_crtc *crtc, } } -static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) +static bool i9xx_has_pps(struct drm_i915_private *dev_priv) { - struct intel_crtc *crtc; - int count = 0; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - count += crtc->base.state->active && - intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); - } + if (IS_I830(dev_priv)) + return false; - return count; + return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); } static void i9xx_enable_pll(struct intel_crtc *crtc, @@ -1465,29 +1461,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc, assert_pipe_disabled(dev_priv, crtc->pipe); /* PLL is protected by panel, make sure we can write it */ - if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) + if (i9xx_has_pps(dev_priv)) assert_panel_unlocked(dev_priv, crtc->pipe); - /* Enable DVO 2x clock on both PLLs if necessary */ - if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { - /* - * It appears to be important that we don't enable this - * for the current pipe before otherwise configuring the - * PLL. No idea how this should be handled if multiple - * DVO outputs are enabled simultaneosly. - */ - dpll |= DPLL_DVO_2X_MODE; - I915_WRITE(DPLL(!crtc->pipe), - I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); - } - /* * Apparently we need to have VGA mode enabled prior to changing * the P1/P2 dividers. Otherwise the DPLL will keep using the old * dividers, even though the register value does change. */ - I915_WRITE(reg, 0); - + I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS); I915_WRITE(reg, dpll); /* Wait for the clocks to stabilize. */ @@ -1520,16 +1502,6 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - /* Disable DVO 2x clock on both PLLs if necessary */ - if (IS_I830(dev_priv) && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) && - !intel_num_dvo_pipes(dev_priv)) { - I915_WRITE(DPLL(PIPE_B), - I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); - I915_WRITE(DPLL(PIPE_A), - I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); - } - /* Don't disable pipe or pipe PLLs if needed */ if (IS_I830(dev_priv)) return; @@ -1608,7 +1580,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, BUG(); } - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, dpll_reg, port_mask, expected_mask, 1000)) WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", @@ -1658,17 +1630,18 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s } val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) + if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else val |= TRANS_INTERLACED; - else + } else { val |= TRANS_PROGRESSIVE; + } I915_WRITE(reg, val | TRANS_ENABLE); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 100)) DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); @@ -1698,7 +1671,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, val |= TRANS_PROGRESSIVE; I915_WRITE(LPT_TRANSCONF, val); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LPT_TRANSCONF, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, @@ -1724,7 +1697,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_ENABLE; I915_WRITE(reg, val); /* wait for PCH transcoder off, transcoder state */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, reg, TRANS_STATE_ENABLE, 0, 50)) DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); @@ -1746,7 +1719,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) val &= ~TRANS_ENABLE; I915_WRITE(LPT_TRANSCONF, val); /* wait for PCH transcoder off, transcoder state */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 50)) DRM_ERROR("Failed to disable PCH transcoder\n"); @@ -1830,6 +1803,8 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state) /* FIXME: assert CPU port conditions for SNB+ */ } + trace_intel_pipe_enable(dev_priv, pipe); + reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); if (val & PIPECONF_ENABLE) { @@ -1869,6 +1844,8 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); + trace_intel_pipe_disable(dev_priv, pipe); + reg = PIPECONF(cpu_transcoder); val = I915_READ(reg); if ((val & PIPECONF_ENABLE) == 0) @@ -2855,8 +2832,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (plane->id == PLANE_PRIMARY) intel_pre_disable_primary_noatomic(&crtc->base); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc_state); + intel_disable_plane(plane, crtc_state); } static void @@ -3260,9 +3236,10 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dspcntr = 0; - dspcntr |= DISPPLANE_GAMMA_ENABLE; + if (crtc_state->gamma_enable) + dspcntr |= DISPPLANE_GAMMA_ENABLE; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (crtc_state->csc_enable) dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; if (INTEL_GEN(dev_priv) < 5) @@ -3489,7 +3466,7 @@ static void i9xx_disable_plane(struct intel_plane *plane, * * On pre-g4x there is no way to gamma correct the * pipe bottom color but we'll keep on doing this - * anyway. + * anyway so that the crtc state readout works correctly. */ dspcntr = i9xx_plane_ctl_crtc(crtc_state); @@ -3764,8 +3741,11 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return plane_ctl; - plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; - plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; + if (crtc_state->gamma_enable) + plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; return plane_ctl; } @@ -3817,8 +3797,11 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) >= 11) return plane_color_ctl; - plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; - plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; + if (crtc_state->gamma_enable) + plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE; return plane_color_ctl; } @@ -3977,9 +3960,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) * The display has been reset as well, * so need a full re-initialization. */ - intel_runtime_pm_disable_interrupts(dev_priv); - intel_runtime_pm_enable_interrupts(dev_priv); - intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev); intel_init_clock_gating(dev_priv); @@ -4019,13 +3999,13 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc) * and rounding for per-pixel values 00 and 0xff */ tmp |= PER_PIXEL_ALPHA_BYPASS_EN; - /* - * W/A for underruns with linear/X-tiled with - * WM1+ disabled. + * Display WA # 1605353570: icl + * Set the pixel rounding bit to 1 for allowing + * passthrough of Frame buffer pixels unmodified + * across pipe */ - tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS; - + tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; I915_WRITE(PIPE_CHICKEN(pipe), tmp); } @@ -4064,16 +4044,6 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta ironlake_pfit_disable(old_crtc_state); } - /* - * We don't (yet) allow userspace to control the pipe background color, - * so force it to black, but apply pipe gamma and CSC so that its - * handling will match how we program our planes. - */ - if (INTEL_GEN(dev_priv) >= 9) - I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe), - SKL_BOTTOM_COLOR_GAMMA_ENABLE | - SKL_BOTTOM_COLOR_CSC_ENABLE); - if (INTEL_GEN(dev_priv) >= 11) icl_set_pipe_chicken(crtc); } @@ -5101,10 +5071,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, /* range checks */ if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || - (IS_GEN(dev_priv, 11) && + (INTEL_GEN(dev_priv) >= 11 && (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H || dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) || - (!IS_GEN(dev_priv, 11) && + (INTEL_GEN(dev_priv) < 11 && (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) { DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " @@ -5329,7 +5299,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state) * and don't wait for vblanks until the end of crtc_enable, then * the HW state readout code will complain that the expected * IPS_CTL value is not the one we read. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, IPS_CTL, IPS_ENABLE, IPS_ENABLE, 50)) DRM_ERROR("Timed out waiting for IPS enable\n"); @@ -5354,7 +5324,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) * 42ms timeout value leads to occasional timeouts so use 100ms * instead. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, IPS_CTL, IPS_ENABLE, 0, 100)) DRM_ERROR("Timed out waiting for IPS disable\n"); @@ -5673,7 +5643,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - plane->disable_plane(plane, new_crtc_state); + intel_disable_plane(plane, new_crtc_state); if (old_plane_state->base.visible) fb_bits |= plane->frontbuffer_bit; @@ -5824,6 +5794,14 @@ static void intel_encoders_update_pipe(struct drm_crtc *crtc, } } +static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + + plane->disable_plane(plane, crtc_state); +} + static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_atomic_state *old_state) { @@ -5889,6 +5867,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, */ intel_color_load_luts(pipe_config); intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma for pipe bottom color */ + intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) dev_priv->display.initial_watermarks(old_intel_state, pipe_config); @@ -6017,6 +5997,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, */ intel_color_load_luts(pipe_config); intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma/csc for pipe bottom color */ + if (INTEL_GEN(dev_priv) < 9) + intel_disable_primary_plane(pipe_config); if (INTEL_GEN(dev_priv) >= 11) icl_set_pipe_chicken(intel_crtc); @@ -6197,7 +6180,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port) if (port == PORT_NONE) return false; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) return port <= PORT_B; return false; @@ -6205,7 +6188,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port) bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port) { - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) return port >= PORT_C && port <= PORT_F; return false; @@ -6374,6 +6357,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, intel_color_load_luts(pipe_config); intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma for pipe bottom color */ + intel_disable_primary_plane(pipe_config); dev_priv->display.initial_watermarks(old_intel_state, pipe_config); @@ -6431,6 +6416,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, intel_color_load_luts(pipe_config); intel_color_commit(pipe_config); + /* update DSPCNTR to configure gamma for pipe bottom color */ + intel_disable_primary_plane(pipe_config); if (dev_priv->display.initial_watermarks != NULL) dev_priv->display.initial_watermarks(old_intel_state, @@ -6813,7 +6800,13 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state) if (!hsw_crtc_state_ips_capable(crtc_state)) return false; - if (crtc_state->ips_force_disable) + /* + * When IPS gets enabled, the pipe CRC changes. Since IPS gets + * enabled and disabled dynamically based on package C states, + * user space can't make reliable use of the CRCs, so let's just + * completely disable it. + */ + if (crtc_state->crc_enabled) return false; /* IPS should be fine as long as at least one plane is enabled. */ @@ -6888,8 +6881,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int clock_limit = dev_priv->max_dotclk_freq; @@ -6939,7 +6931,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, } if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && - intel_is_dual_link_lvds(dev)) { + intel_is_dual_link_lvds(dev_priv)) { DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n"); return -EINVAL; } @@ -7556,7 +7548,19 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev_priv) && + /* + * Bspec: + * "[Almador Errata}: For the correct operation of the muxed DVO pins + * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data, + * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock + * Enable) must be set to “1” in both the DPLL A Control Register + * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)." + * + * For simplicity We simply keep both bits always enabled in + * both DPLLS. The spec says we should disable the DVO 2X clock + * when not needed, but this seems to work fine in practice. + */ + if (IS_I830(dev_priv) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; @@ -7764,13 +7768,16 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; - } else + } else { pipeconf |= PIPECONF_PROGRESSIVE; + } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && crtc_state->limited_color_range) pipeconf |= PIPECONF_COLOR_RANGE_SELECT; + pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(crtc->pipe), pipeconf); POSTING_READ(PIPECONF(crtc->pipe)); } @@ -7814,8 +7821,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, static int g4x_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 96000; @@ -7828,7 +7834,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc, DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); } - if (intel_is_dual_link_lvds(dev)) + if (intel_is_dual_link_lvds(dev_priv)) limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; @@ -7964,14 +7970,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, return 0; } +static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) +{ + if (IS_I830(dev_priv)) + return false; + + return INTEL_GEN(dev_priv) >= 4 || + IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); +} + static void i9xx_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 tmp; - if (INTEL_GEN(dev_priv) <= 3 && - (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) + if (!i9xx_has_pfit(dev_priv)) return; tmp = I915_READ(PFIT_CONTROL); @@ -8178,6 +8192,24 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, pipe_config->output_format = output; } +static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + u32 tmp; + + tmp = I915_READ(DSPCNTR(i9xx_plane)); + + if (tmp & DISPPLANE_GAMMA_ENABLE) + crtc_state->gamma_enable = true; + + if (!HAS_GMCH(dev_priv) && + tmp & DISPPLANE_PIPE_CSC_ENABLE) + crtc_state->csc_enable = true; +} + static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -8223,6 +8255,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, (tmp & PIPECONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; + pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> + PIPECONF_GAMMA_MODE_SHIFT; + + if (IS_CHERRYVIEW(dev_priv)) + pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe)); + + i9xx_get_pipe_color_config(pipe_config); + if (INTEL_GEN(dev_priv) < 4) pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; @@ -8255,14 +8295,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, } pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - /* - * DPLL_DVO_2X_MODE must be enabled for both DPLLs - * on 830. Filter it out here so that we don't - * report errors due to that. - */ - if (IS_I830(dev_priv)) - pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; - pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); } else { @@ -8762,6 +8794,8 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) if (crtc_state->limited_color_range) val |= PIPECONF_COLOR_RANGE_SELECT; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + I915_WRITE(PIPECONF(pipe), val); POSTING_READ(PIPECONF(pipe)); } @@ -8842,13 +8876,11 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) return i9xx_dpll_compute_m(dpll) < factor * dpll->n; } -static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, +static void ironlake_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct dpll *reduced_clock) { - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll, fp, fp2; int factor; @@ -8857,10 +8889,12 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->vbt.lvds_ssc_freq == 100000) || - (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev))) + (HAS_PCH_IBX(dev_priv) && + intel_is_dual_link_lvds(dev_priv))) factor = 25; - } else if (crtc_state->sdvo_tv_clock) + } else if (crtc_state->sdvo_tv_clock) { factor = 20; + } fp = i9xx_dpll_compute_fp(&crtc_state->dpll); @@ -8947,8 +8981,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct intel_limit *limit; int refclk = 120000; @@ -8966,7 +8999,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, refclk = dev_priv->vbt.lvds_ssc_freq; } - if (intel_is_dual_link_lvds(dev)) { + if (intel_is_dual_link_lvds(dev_priv)) { if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; else @@ -8990,7 +9023,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, ironlake_compute_dpll(crtc, crtc_state, NULL); - if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) { + if (!intel_get_shared_dpll(crtc_state, NULL)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -9296,6 +9329,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (tmp & PIPECONF_COLOR_RANGE_SELECT) pipe_config->limited_color_range = true; + pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> + PIPECONF_GAMMA_MODE_SHIFT; + + pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); + + i9xx_get_pipe_color_config(pipe_config); + if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; @@ -9441,7 +9481,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, I915_WRITE(LCPLL_CTL, val); POSTING_READ(LCPLL_CTL); - if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) + if (intel_wait_for_register(&dev_priv->uncore, + LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) DRM_ERROR("LCPLL still locked\n"); val = hsw_read_dcomp(dev_priv); @@ -9479,7 +9520,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) * Make sure we're not on PC8 state before disabling PC8, otherwise * we'll hang the machine. To prevent PC8 state, just enable force_wake. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); if (val & LCPLL_POWER_DOWN_ALLOW) { val &= ~LCPLL_POWER_DOWN_ALLOW; @@ -9496,7 +9537,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val &= ~LCPLL_PLL_DISABLE; I915_WRITE(LCPLL_CTL, val); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5)) DRM_ERROR("LCPLL not locked yet\n"); @@ -9511,7 +9552,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) DRM_ERROR("Switching back to LCPLL failed\n"); } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); @@ -9580,11 +9621,11 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc, to_intel_atomic_state(crtc_state->base.state); if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) || - IS_ICELAKE(dev_priv)) { + INTEL_GEN(dev_priv) >= 11) { struct intel_encoder *encoder = intel_get_crtc_new_encoder(state, crtc_state); - if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { + if (!intel_get_shared_dpll(crtc_state, encoder)) { DRM_DEBUG_KMS("failed to find PLL for pipe %c\n", pipe_name(crtc->pipe)); return -EINVAL; @@ -9622,9 +9663,6 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv, temp = I915_READ(DPCLKA_CFGCR0_ICL) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); - - if (WARN_ON(!intel_dpll_is_combophy(id))) - return; } else if (intel_port_is_tc(dev_priv, port)) { id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port)); } else { @@ -9718,15 +9756,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum intel_display_power_domain power_domain; - unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); + unsigned long panel_transcoder_mask = 0; unsigned long enabled_panel_transcoders = 0; enum transcoder panel_transcoder; u32 tmp; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1); + if (HAS_TRANSCODER_EDP(dev_priv)) + panel_transcoder_mask |= BIT(TRANSCODER_EDP); + /* * The pipe->transcoder mapping is fixed with the exception of the eDP * and DSI transcoders handled below. @@ -9856,7 +9897,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icelake_get_ddi_pll(dev_priv, port, pipe_config); else if (IS_CANNONLAKE(dev_priv)) cannonlake_get_ddi_pll(dev_priv, port, pipe_config); @@ -9919,7 +9960,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, goto out; if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || - IS_ICELAKE(dev_priv)) { + INTEL_GEN(dev_priv) >= 11) { haswell_get_ddi_port_state(crtc, pipe_config); intel_get_pipe_timings(crtc, pipe_config); } @@ -9927,8 +9968,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_src_size(crtc, pipe_config); intel_get_crtc_ycbcr_config(crtc, pipe_config); - pipe_config->gamma_mode = - I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; + pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); + + pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe)); + + if (INTEL_GEN(dev_priv) >= 9) { + u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe)); + + if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE) + pipe_config->gamma_enable = true; + + if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE) + pipe_config->csc_enable = true; + } else { + i9xx_get_pipe_color_config(pipe_config); + } power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { @@ -10100,7 +10154,12 @@ i845_cursor_max_stride(struct intel_plane *plane, static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) { - return CURSOR_GAMMA_ENABLE; + u32 cntl = 0; + + if (crtc_state->gamma_enable) + cntl |= CURSOR_GAMMA_ENABLE; + + return cntl; } static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, @@ -10254,9 +10313,10 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) if (INTEL_GEN(dev_priv) >= 11) return cntl; - cntl |= MCURSOR_GAMMA_ENABLE; + if (crtc_state->gamma_enable) + cntl = MCURSOR_GAMMA_ENABLE; - if (HAS_DDI(dev_priv)) + if (crtc_state->csc_enable) cntl |= MCURSOR_PIPE_CSC_ENABLE; if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) @@ -11245,16 +11305,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, return ret; } - if (mode_changed || crtc_state->color_mgmt_changed) { + if (mode_changed || pipe_config->update_pipe || + crtc_state->color_mgmt_changed) { ret = intel_color_check(pipe_config); if (ret) return ret; - - /* - * Changing color management on Intel hardware is - * handled as part of planes update. - */ - crtc_state->planes_changed = true; } ret = 0; @@ -11425,6 +11480,16 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, m_n->link_m, m_n->link_n, m_n->tu); } +static void +intel_dump_infoframe(struct drm_i915_private *dev_priv, + const union hdmi_infoframe *frame) +{ + if ((drm_debug & DRM_UT_KMS) == 0) + return; + + hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame); +} + #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x static const char * const output_type_str[] = { @@ -11528,6 +11593,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", pipe_config->has_audio, pipe_config->has_infoframe); + DRM_DEBUG_KMS("infoframes enabled: 0x%x\n", + pipe_config->infoframes.enable); + + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) + DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp); + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) + intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi); + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD)) + intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd); + if (pipe_config->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR)) + intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi); + DRM_DEBUG_KMS("requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->base.mode); DRM_DEBUG_KMS("adjusted mode:\n"); @@ -11676,7 +11757,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) saved_state->shared_dpll = crtc_state->shared_dpll; saved_state->dpll_hw_state = crtc_state->dpll_hw_state; saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru; - saved_state->ips_force_disable = crtc_state->ips_force_disable; + saved_state->crc_enabled = crtc_state->crc_enabled; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) saved_state->wm = crtc_state->wm; @@ -11895,6 +11976,37 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, return false; } +static bool +intel_compare_infoframe(const union hdmi_infoframe *a, + const union hdmi_infoframe *b) +{ + return memcmp(a, b, sizeof(*a)) == 0; +} + +static void +pipe_config_infoframe_err(struct drm_i915_private *dev_priv, + bool adjust, const char *name, + const union hdmi_infoframe *a, + const union hdmi_infoframe *b) +{ + if (adjust) { + if ((drm_debug & DRM_UT_KMS) == 0) + return; + + drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name); + drm_dbg(DRM_UT_KMS, "expected:"); + hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); + drm_dbg(DRM_UT_KMS, "found"); + hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); + } else { + drm_err("mismatch in %s infoframe", name); + drm_err("expected:"); + hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); + drm_err("found"); + hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); + } +} + static void __printf(3, 4) pipe_config_err(bool adjust, const char *name, const char *format, ...) { @@ -12078,7 +12190,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, } \ } while (0) -#define PIPE_CONF_QUIRK(quirk) \ +#define PIPE_CONF_CHECK_INFOFRAME(name) do { \ + if (!intel_compare_infoframe(¤t_config->infoframes.name, \ + &pipe_config->infoframes.name)) { \ + pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \ + ¤t_config->infoframes.name, \ + &pipe_config->infoframes.name); \ + ret = false; \ + } \ +} while (0) + +#define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) PIPE_CONF_CHECK_I(cpu_transcoder); @@ -12159,6 +12281,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_I(scaler_state.scaler_id); PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + + PIPE_CONF_CHECK_X(gamma_mode); + if (IS_CHERRYVIEW(dev_priv)) + PIPE_CONF_CHECK_X(cgm_mode); + else + PIPE_CONF_CHECK_X(csc_mode); + PIPE_CONF_CHECK_BOOL(gamma_enable); + PIPE_CONF_CHECK_BOOL(csc_enable); } PIPE_CONF_CHECK_BOOL(double_wide); @@ -12207,6 +12337,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv, PIPE_CONF_CHECK_I(min_voltage_level); + PIPE_CONF_CHECK_X(infoframes.enable); + PIPE_CONF_CHECK_X(infoframes.gcp); + PIPE_CONF_CHECK_INFOFRAME(avi); + PIPE_CONF_CHECK_INFOFRAME(spd); + PIPE_CONF_CHECK_INFOFRAME(hdmi); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL @@ -12241,12 +12377,15 @@ static void verify_wm_state(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct skl_ddb_allocation hw_ddb, *sw_ddb; - struct skl_pipe_wm hw_wm, *sw_wm; - struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; + struct skl_hw_state { + struct skl_ddb_entry ddb_y[I915_MAX_PLANES]; + struct skl_ddb_entry ddb_uv[I915_MAX_PLANES]; + struct skl_ddb_allocation ddb; + struct skl_pipe_wm wm; + } *hw; + struct skl_ddb_allocation *sw_ddb; + struct skl_pipe_wm *sw_wm; struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; - struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES]; - struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); const enum pipe pipe = intel_crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); @@ -12254,22 +12393,29 @@ static void verify_wm_state(struct drm_crtc *crtc, if (INTEL_GEN(dev_priv) < 9 || !new_state->active) return; - skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm); + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + return; + + skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm); sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; - skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv); + skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv); - skl_ddb_get_hw_state(dev_priv, &hw_ddb); + skl_ddb_get_hw_state(dev_priv, &hw->ddb); sw_ddb = &dev_priv->wm.skl_hw.ddb; - if (INTEL_GEN(dev_priv) >= 11) - if (hw_ddb.enabled_slices != sw_ddb->enabled_slices) - DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", - sw_ddb->enabled_slices, - hw_ddb.enabled_slices); + if (INTEL_GEN(dev_priv) >= 11 && + hw->ddb.enabled_slices != sw_ddb->enabled_slices) + DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n", + sw_ddb->enabled_slices, + hw->ddb.enabled_slices); + /* planes */ for_each_universal_plane(dev_priv, pipe, plane) { - hw_plane_wm = &hw_wm.planes[plane]; + struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; + + hw_plane_wm = &hw->wm.planes[plane]; sw_plane_wm = &sw_wm->planes[plane]; /* Watermarks */ @@ -12301,7 +12447,7 @@ static void verify_wm_state(struct drm_crtc *crtc, } /* DDB */ - hw_ddb_entry = &hw_ddb_y[plane]; + hw_ddb_entry = &hw->ddb_y[plane]; sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { @@ -12319,7 +12465,9 @@ static void verify_wm_state(struct drm_crtc *crtc, * once the plane becomes visible, we can skip this check */ if (1) { - hw_plane_wm = &hw_wm.planes[PLANE_CURSOR]; + struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; + + hw_plane_wm = &hw->wm.planes[PLANE_CURSOR]; sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; /* Watermarks */ @@ -12351,7 +12499,7 @@ static void verify_wm_state(struct drm_crtc *crtc, } /* DDB */ - hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR]; + hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR]; sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR]; if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { @@ -12361,6 +12509,8 @@ static void verify_wm_state(struct drm_crtc *crtc, hw_ddb_entry->start, hw_ddb_entry->end); } } + + kfree(hw); } static void @@ -12517,7 +12667,8 @@ intel_verify_planes(struct intel_atomic_state *state) for_each_new_intel_plane_in_state(state, plane, plane_state, i) - assert_plane(plane, plane_state->base.visible); + assert_plane(plane, plane_state->slave || + plane_state->base.visible); } static void @@ -13383,7 +13534,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * so enable debugging for the next modeset - and hope we catch * the culprit. */ - intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } @@ -13576,7 +13727,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait, * vblank without our intervention, so leave RPS alone. */ if (!i915_request_started(rq)) - gen6_rps_boost(rq, NULL); + gen6_rps_boost(rq); i915_request_put(rq); drm_crtc_vblank_put(wait->crtc); @@ -14109,14 +14260,11 @@ intel_legacy_cursor_update(struct drm_plane *plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - if (plane->state->visible) { - trace_intel_update_plane(plane, to_intel_crtc(crtc)); - intel_plane->update_plane(intel_plane, crtc_state, - to_intel_plane_state(plane->state)); - } else { - trace_intel_disable_plane(plane, to_intel_crtc(crtc)); - intel_plane->disable_plane(intel_plane, crtc_state); - } + if (plane->state->visible) + intel_update_plane(intel_plane, crtc_state, + to_intel_plane_state(plane->state)); + else + intel_disable_plane(intel_plane, crtc_state); intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); @@ -14566,7 +14714,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) if (!HAS_DISPLAY(dev_priv)) return; - if (IS_ICELAKE(dev_priv)) { + if (IS_ELKHARTLAKE(dev_priv)) { + intel_ddi_init(dev_priv, PORT_A); + intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_C); + icl_dsi_init(dev_priv); + } else if (INTEL_GEN(dev_priv) >= 11) { intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); intel_ddi_init(dev_priv, PORT_C); @@ -15467,6 +15620,8 @@ int intel_modeset_init(struct drm_device *dev) intel_update_czclk(dev_priv); intel_modeset_init_hw(dev); + intel_hdcp_component_init(dev_priv); + if (dev_priv->max_cdclk_freq == 0) intel_update_max_cdclk(dev_priv); @@ -15542,7 +15697,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) pipe_name(pipe), clock.vco, clock.dot); fp = i9xx_dpll_compute_fp(&clock); - dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) | + dpll = DPLL_DVO_2X_MODE | DPLL_VGA_MODE_DIS | ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | PLL_P2_DIVIDE_BY_4 | @@ -16328,6 +16483,8 @@ void intel_modeset_cleanup(struct drm_device *dev) /* flush any delayed tasks or pending work */ flush_scheduled_work(); + intel_hdcp_component_fini(dev_priv); + drm_mode_config_cleanup(dev); intel_overlay_cleanup(dev_priv); @@ -16374,8 +16531,6 @@ struct intel_display_error_state { u32 power_well_driver; - int num_transcoders; - struct intel_cursor_error_state { u32 control; u32 position; @@ -16400,6 +16555,7 @@ struct intel_display_error_state { } plane[I915_MAX_PIPES]; struct intel_transcoder_error_state { + bool available; bool power_domain_on; enum transcoder cpu_transcoder; @@ -16426,6 +16582,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) }; int i; + BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); + if (!HAS_DISPLAY(dev_priv)) return NULL; @@ -16466,14 +16624,13 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } - /* Note: this does not include DSI transcoders. */ - error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; - if (HAS_DDI(dev_priv)) - error->num_transcoders++; /* Account for eDP. */ - - for (i = 0; i < error->num_transcoders; i++) { + for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { enum transcoder cpu_transcoder = transcoders[i]; + if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder]) + continue; + + error->transcoder[i].available = true; error->transcoder[i].power_domain_on = __intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder)); @@ -16537,7 +16694,10 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " BASE: %08x\n", error->cursor[i].base); } - for (i = 0; i < error->num_transcoders; i++) { + for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) { + if (!error->transcoder[i].available) + continue; + err_printf(m, "CPU transcoder: %s\n", transcoder_name(error->transcoder[i].cpu_transcoder)); err_printf(m, " Power: %s\n", diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index c7c068662288..2220588e86ac 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -26,6 +26,7 @@ #define _INTEL_DISPLAY_H_ #include <drm/drm_util.h> +#include <drm/i915_drm.h> enum i915_gpio { GPIOA, @@ -150,21 +151,6 @@ enum plane_id { for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ for_each_if((__crtc)->plane_ids_mask & BIT(__p)) -enum port { - PORT_NONE = -1, - - PORT_A = 0, - PORT_B, - PORT_C, - PORT_D, - PORT_E, - PORT_F, - - I915_MAX_PORTS -}; - -#define port_name(p) ((p) + 'A') - /* * Ports identifier referenced from other drivers. * Expected to remain stable over time diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index cf709835fb9a..72c49070ed14 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -949,8 +949,11 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp, regs->pp_stat = PP_STATUS(pps_idx); regs->pp_on = PP_ON_DELAYS(pps_idx); regs->pp_off = PP_OFF_DELAYS(pps_idx); - if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) && - !HAS_PCH_ICP(dev_priv)) + + /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */ + if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) + regs->pp_div = INVALID_MMIO_REG; + else regs->pp_div = PP_DIVISOR(pps_idx); } @@ -1720,12 +1723,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, } } -struct link_config_limits { - int min_clock, max_clock; - int min_lane_count, max_lane_count; - int min_bpp, max_bpp; -}; - static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, const struct intel_crtc_state *pipe_config) { @@ -1788,7 +1785,7 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp, } /* Adjust link config limits based on compliance test requests. */ -static void +void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config, struct link_config_limits *limits) @@ -2104,6 +2101,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, return 0; } +bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { + /* + * See: + * CEA-861-E - 5.1 Default Encoding Parameters + * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry + */ + return crtc_state->pipe_bpp != 18 && + drm_default_rgb_quant_range(adjusted_mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; + } else { + return intel_conn_state->broadcast_rgb == + INTEL_BROADCAST_RGB_LIMITED; + } +} + int intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, @@ -2172,20 +2192,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (ret < 0) return ret; - if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { - /* - * See: - * CEA-861-E - 5.1 Default Encoding Parameters - * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry - */ - pipe_config->limited_color_range = - pipe_config->pipe_bpp != 18 && - drm_default_rgb_quant_range(adjusted_mode) == - HDMI_QUANTIZATION_RANGE_LIMITED; - } else { - pipe_config->limited_color_range = - intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; - } + pipe_config->limited_color_range = + intel_dp_limited_color_range(pipe_config, conn_state); if (!pipe_config->dsc_params.compression_enable) intel_link_compute_m_n(pipe_config->pipe_bpp, @@ -2345,7 +2353,7 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, pp_stat_reg, mask, value, 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", @@ -3934,7 +3942,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (port == PORT_A) return; - if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port), + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), DP_TP_STATUS_IDLE_DONE, DP_TP_STATUS_IDLE_DONE, 1)) @@ -4780,7 +4788,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp) intel_dp_handle_test_request(intel_dp); if (val & DP_CP_IRQ) - intel_hdcp_check_link(intel_dp->attached_connector); + intel_hdcp_handle_cp_irq(intel_dp->attached_connector); if (val & DP_SINK_SPECIFIC_IRQ) DRM_DEBUG_DRIVER("Sink specific irq unhandled\n"); @@ -5623,6 +5631,18 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) edp_panel_vdd_off_sync(intel_dp); } +static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) +{ + long ret; + +#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count)) + ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C, + msecs_to_jiffies(timeout)); + + if (!ret) + DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n"); +} + static int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, u8 *an) @@ -5847,6 +5867,336 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, return 0; } +struct hdcp2_dp_errata_stream_type { + u8 msg_id; + u8 stream_type; +} __packed; + +static struct hdcp2_dp_msg_data { + u8 msg_id; + u32 offset; + bool msg_detectable; + u32 timeout; + u32 timeout2; /* Added for non_paired situation */ + } hdcp2_msg_data[] = { + {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0}, + {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET, + false, HDCP_2_2_CERT_TIMEOUT_MS, 0}, + {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET, + false, 0, 0}, + {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET, + false, 0, 0}, + {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET, + true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, + HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS}, + {HDCP_2_2_AKE_SEND_PAIRING_INFO, + DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true, + HDCP_2_2_PAIRING_TIMEOUT_MS, 0}, + {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0}, + {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET, + false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0}, + {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_SEND_RECVID_LIST, + DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true, + HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0}, + {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_STREAM_MANAGE, + DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false, + 0, 0}, + {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET, + false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0}, +/* local define to shovel this through the write_2_2 interface */ +#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50 + {HDCP_2_2_ERRATA_DP_STREAM_TYPE, + DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false, + 0, 0}, + }; + +static inline +int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, + u8 *rx_status) +{ + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, + HDCP_2_2_DP_RXSTATUS_LEN); + if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { + DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret); + return ret >= 0 ? -EIO : ret; + } + + return 0; +} + +static +int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, + u8 msg_id, bool *msg_ready) +{ + u8 rx_status; + int ret; + + *msg_ready = false; + ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + if (ret < 0) + return ret; + + switch (msg_id) { + case HDCP_2_2_AKE_SEND_HPRIME: + if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_AKE_SEND_PAIRING_INFO: + if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status)) + *msg_ready = true; + break; + case HDCP_2_2_REP_SEND_RECVID_LIST: + if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + *msg_ready = true; + break; + default: + DRM_ERROR("Unidentified msg_id: %d\n", msg_id); + return -EINVAL; + } + + return 0; +} + +static ssize_t +intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, + struct hdcp2_dp_msg_data *hdcp2_msg_data) +{ + struct intel_dp *dp = &intel_dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + u8 msg_id = hdcp2_msg_data->msg_id; + int ret, timeout; + bool msg_ready = false; + + if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired) + timeout = hdcp2_msg_data->timeout2; + else + timeout = hdcp2_msg_data->timeout; + + /* + * There is no way to detect the CERT, LPRIME and STREAM_READY + * availability. So Wait for timeout and read the msg. + */ + if (!hdcp2_msg_data->msg_detectable) { + mdelay(timeout); + ret = 0; + } else { + /* + * As we want to check the msg availability at timeout, Ignoring + * the timeout at wait for CP_IRQ. + */ + intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); + ret = hdcp2_detect_msg_availability(intel_dig_port, + msg_id, &msg_ready); + if (!msg_ready) + ret = -ETIMEDOUT; + } + + if (ret) + DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n", + hdcp2_msg_data->msg_id, ret, timeout); + + return ret; +} + +static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++) + if (hdcp2_msg_data[i].msg_id == msg_id) + return &hdcp2_msg_data[i]; + + return NULL; +} + +static +int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, + void *buf, size_t size) +{ + struct intel_dp *dp = &intel_dig_port->dp; + struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_write, len; + struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte); + if (!hdcp2_msg_data) + return -EINVAL; + + offset = hdcp2_msg_data->offset; + + /* No msg_id in DP HDCP2.2 msgs */ + bytes_to_write = size - 1; + byte++; + + hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count); + + while (bytes_to_write) { + len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; + + ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, + offset, (void *)byte, len); + if (ret < 0) + return ret; + + bytes_to_write -= ret; + byte += ret; + offset += ret; + } + + return size; +} + +static +ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) +{ + u8 rx_info[HDCP_2_2_RXINFO_LEN]; + u32 dev_cnt; + ssize_t ret; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RXINFO_OFFSET, + (void *)rx_info, HDCP_2_2_RXINFO_LEN); + if (ret != HDCP_2_2_RXINFO_LEN) + return ret >= 0 ? -EIO : ret; + + dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 | + HDCP_2_2_DEV_COUNT_LO(rx_info[1])); + + if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT) + dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT; + + ret = sizeof(struct hdcp2_rep_send_receiverid_list) - + HDCP_2_2_RECEIVER_IDS_MAX_LEN + + (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN); + + return ret; +} + +static +int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size) +{ + unsigned int offset; + u8 *byte = buf; + ssize_t ret, bytes_to_recv, len; + struct hdcp2_dp_msg_data *hdcp2_msg_data; + + hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id); + if (!hdcp2_msg_data) + return -EINVAL; + offset = hdcp2_msg_data->offset; + + ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); + if (ret < 0) + return ret; + + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { + ret = get_receiver_id_list_size(intel_dig_port); + if (ret < 0) + return ret; + + size = ret; + } + bytes_to_recv = size - 1; + + /* DP adaptation msgs has no msg_id */ + byte++; + + while (bytes_to_recv) { + len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? + DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; + + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, + (void *)byte, len); + if (ret < 0) { + DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret); + return ret; + } + + bytes_to_recv -= ret; + byte += ret; + offset += ret; + } + byte = buf; + *byte = msg_id; + + return size; +} + +static +int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, + bool is_repeater, u8 content_type) +{ + struct hdcp2_dp_errata_stream_type stream_type_msg; + + if (is_repeater) + return 0; + + /* + * Errata for DP: As Stream type is used for encryption, Receiver + * should be communicated with stream type for the decryption of the + * content. + * Repeater will be communicated with stream type as a part of it's + * auth later in time. + */ + stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; + stream_type_msg.stream_type = content_type; + + return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, + sizeof(stream_type_msg)); +} + +static +int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) +{ + u8 rx_status; + int ret; + + ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + if (ret) + return ret; + + if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status)) + ret = HDCP_REAUTH_REQUEST; + else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status)) + ret = HDCP_LINK_INTEGRITY_FAILURE; + else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status)) + ret = HDCP_TOPOLOGY_CHANGE; + + return ret; +} + +static +int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, + bool *capable) +{ + u8 rx_caps[3]; + int ret; + + *capable = false; + ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + DP_HDCP_2_2_REG_RX_CAPS_OFFSET, + rx_caps, HDCP_2_2_RXCAPS_LEN); + if (ret != HDCP_2_2_RXCAPS_LEN) + return ret >= 0 ? -EIO : ret; + + if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL && + HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) + *capable = true; + + return 0; +} + static const struct intel_hdcp_shim intel_dp_hdcp_shim = { .write_an_aksv = intel_dp_hdcp_write_an_aksv, .read_bksv = intel_dp_hdcp_read_bksv, @@ -5859,6 +6209,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = { .toggle_signalling = intel_dp_hdcp_toggle_signalling, .check_link = intel_dp_hdcp_check_link, .hdcp_capable = intel_dp_hdcp_capable, + .write_2_2_msg = intel_dp_hdcp2_write_msg, + .read_2_2_msg = intel_dp_hdcp2_read_msg, + .config_stream_type = intel_dp_hdcp2_config_stream_type, + .check_2_2_link = intel_dp_hdcp2_check_link, + .hdcp_2_2_capable = intel_dp_hdcp2_capable, + .protocol = HDCP_PROTOCOL_DP, }; static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) @@ -6072,43 +6428,34 @@ static void intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; + u32 pp_on, pp_off, pp_ctl; struct pps_registers regs; intel_pps_get_registers(intel_dp, ®s); - /* Workaround: Need to write PP_CONTROL with the unlock key as - * the very first thing. */ pp_ctl = ironlake_get_pp_control(intel_dp); + /* Ensure PPS is unlocked */ + if (!HAS_DDI(dev_priv)) + I915_WRITE(regs.pp_ctrl, pp_ctl); + pp_on = I915_READ(regs.pp_on); pp_off = I915_READ(regs.pp_off); - if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) && - !HAS_PCH_ICP(dev_priv)) { - I915_WRITE(regs.pp_ctrl, pp_ctl); - pp_div = I915_READ(regs.pp_div); - } /* Pull timing values out of registers */ - seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; - - seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; + seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on); + seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on); + seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off); + seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off); - seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; + if (i915_mmio_reg_valid(regs.pp_div)) { + u32 pp_div; - seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; + pp_div = I915_READ(regs.pp_div); - if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) { - seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> - BXT_POWER_CYCLE_DELAY_SHIFT) * 1000; + seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000; } else { - seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> - PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; + seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000; } } @@ -6233,7 +6580,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, bool force_disable_vdd) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 pp_on, pp_off, pp_div, port_sel = 0; + u32 pp_on, pp_off, port_sel = 0; int div = dev_priv->rawclk_freq / 1000; struct pps_registers regs; enum port port = dp_to_dig_port(intel_dp)->base.port; @@ -6268,23 +6615,10 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, I915_WRITE(regs.pp_ctrl, pp); } - pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | - (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT); - pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) | - (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); - /* Compute the divisor for the pp clock, simply match the Bspec - * formula. */ - if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) { - pp_div = I915_READ(regs.pp_ctrl); - pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; - pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) - << BXT_POWER_CYCLE_DELAY_SHIFT); - } else { - pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; - pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) - << PANEL_POWER_CYCLE_DELAY_SHIFT); - } + pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) | + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8); + pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) | + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10); /* Haswell doesn't have any port selection bits for the panel * power sequencer any more. */ @@ -6311,19 +6645,29 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp, I915_WRITE(regs.pp_on, pp_on); I915_WRITE(regs.pp_off, pp_off); - if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) - I915_WRITE(regs.pp_ctrl, pp_div); - else - I915_WRITE(regs.pp_div, pp_div); + + /* + * Compute the divisor for the pp clock, simply match the Bspec formula. + */ + if (i915_mmio_reg_valid(regs.pp_div)) { + I915_WRITE(regs.pp_div, + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); + } else { + u32 pp_ctl; + + pp_ctl = I915_READ(regs.pp_ctrl); + pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; + pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); + I915_WRITE(regs.pp_ctrl, pp_ctl); + } DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", I915_READ(regs.pp_on), I915_READ(regs.pp_off), - (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) || - HAS_PCH_ICP(dev_priv)) ? - (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) : - I915_READ(regs.pp_div)); + i915_mmio_reg_valid(regs.pp_div) ? + I915_READ(regs.pp_div) : + (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK)); } static void intel_dp_pps_init(struct intel_dp *intel_dp) @@ -6694,9 +7038,7 @@ intel_dp_drrs_init(struct intel_connector *connector, return NULL; } - downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode, - &connector->base); - + downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode); if (!downclock_mode) { DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); return NULL; @@ -6718,7 +7060,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; bool has_dpcd; - struct drm_display_mode *scan; enum pipe pipe = INVALID_PIPE; intel_wakeref_t wakeref; struct edid *edid; @@ -6734,7 +7075,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, * eDP and LVDS bail out early in this case to prevent interfering * with an already powered-on LVDS power sequencer. */ - if (intel_get_lvds_encoder(&dev_priv->drm)) { + if (intel_get_lvds_encoder(dev_priv)) { WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); DRM_INFO("LVDS was detected, not registering eDP\n"); @@ -6771,26 +7112,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } intel_connector->edid = edid; - /* prefer fixed mode from EDID if available */ - list_for_each_entry(scan, &connector->probed_modes, head) { - if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { - fixed_mode = drm_mode_duplicate(dev, scan); - downclock_mode = intel_dp_drrs_init( - intel_connector, fixed_mode); - break; - } - } + fixed_mode = intel_panel_edid_fixed_mode(intel_connector); + if (fixed_mode) + downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); /* fallback to VBT if available for eDP */ - if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { - fixed_mode = drm_mode_duplicate(dev, - dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) { - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; - } - } + if (!fixed_mode) + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); mutex_unlock(&dev->mode_config.mutex); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index fb67cd931117..19d81cef2ab6 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -29,72 +29,108 @@ #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> +static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + struct link_config_limits *limits) +{ + struct drm_atomic_state *state = crtc_state->base.state; + struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); + struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + void *port = connector->port; + bool constant_n = drm_dp_has_quirk(&intel_dp->desc, + DP_DPCD_QUIRK_CONSTANT_N); + int bpp, slots = -EINVAL; + + crtc_state->lane_count = limits->max_lane_count; + crtc_state->port_clock = limits->max_clock; + + for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + crtc_state->pipe_bpp = bpp; + + crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, + crtc_state->pipe_bpp); + + slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, + port, crtc_state->pbn); + if (slots == -EDEADLK) + return slots; + if (slots >= 0) + break; + } + + if (slots < 0) { + DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots); + return slots; + } + + intel_link_compute_m_n(crtc_state->pipe_bpp, + crtc_state->lane_count, + adjusted_mode->crtc_clock, + crtc_state->port_clock, + &crtc_state->dp_m_n, + constant_n); + crtc_state->dp_m_n.tu = slots; + + return 0; +} + static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_connector *connector = conn_state->connector; - void *port = to_intel_connector(connector)->port; - struct drm_atomic_state *state = pipe_config->base.state; - struct drm_crtc *crtc = pipe_config->base.crtc; - struct drm_crtc_state *old_crtc_state = - drm_atomic_get_old_crtc_state(state, crtc); - int bpp; - int lane_count, slots = - to_intel_crtc_state(old_crtc_state)->dp_m_n.tu; - const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - int mst_pbn; - bool constant_n = drm_dp_has_quirk(&intel_dp->desc, - DP_DPCD_QUIRK_CONSTANT_N); + struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + void *port = connector->port; + struct link_config_limits limits; + int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - bpp = 24; - if (intel_dp->compliance.test_data.bpc) { - bpp = intel_dp->compliance.test_data.bpc * 3; - DRM_DEBUG_KMS("Setting pipe bpp to %d\n", - bpp); - } + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + pipe_config->has_audio = + drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port); + else + pipe_config->has_audio = + intel_conn_state->force_audio == HDMI_AUDIO_ON; + /* * for MST we always configure max link bw - the spec doesn't * seem to suggest we should do otherwise. */ - lane_count = intel_dp_max_lane_count(intel_dp); - - pipe_config->lane_count = lane_count; + limits.min_clock = + limits.max_clock = intel_dp_max_link_rate(intel_dp); - pipe_config->pipe_bpp = bpp; + limits.min_lane_count = + limits.max_lane_count = intel_dp_max_lane_count(intel_dp); - pipe_config->port_clock = intel_dp_max_link_rate(intel_dp); + limits.min_bpp = 6 * 3; + limits.max_bpp = pipe_config->pipe_bpp; - if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port)) - pipe_config->has_audio = true; + intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); - mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); - pipe_config->pbn = mst_pbn; - - slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port, - mst_pbn); - if (slots < 0) { - DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", - slots); - return slots; - } - - intel_link_compute_m_n(bpp, lane_count, - adjusted_mode->crtc_clock, - pipe_config->port_clock, - &pipe_config->dp_m_n, - constant_n); + ret = intel_dp_mst_compute_link_config(encoder, pipe_config, + conn_state, &limits); + if (ret) + return ret; - pipe_config->dp_m_n.tu = slots; + pipe_config->limited_color_range = + intel_dp_limited_color_range(pipe_config, conn_state); if (IS_GEN9_LP(dev_priv)) pipe_config->lane_lat_optim_mask = @@ -117,7 +153,11 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, struct drm_crtc *new_crtc = new_conn_state->crtc; struct drm_crtc_state *crtc_state; struct drm_dp_mst_topology_mgr *mgr; - int ret = 0; + int ret; + + ret = intel_digital_connector_atomic_check(connector, new_conn_state); + if (ret) + return ret; if (!old_conn_state->crtc) return 0; @@ -289,7 +329,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port), DP_TP_STATUS_ACT_SENT, DP_TP_STATUS_ACT_SENT, @@ -354,11 +394,13 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force) static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .detect = intel_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .atomic_get_property = intel_digital_connector_atomic_get_property, + .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_connector_register, .early_unregister = intel_connector_unregister, .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; static int intel_dp_mst_get_modes(struct drm_connector *connector) @@ -373,7 +415,6 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; - int bpp = 24; /* MST uses fixed bpp */ int max_rate, mode_rate, max_lanes, max_link_clock; if (drm_connector_is_unregistered(connector)) @@ -386,7 +427,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); - mode_rate = intel_dp_link_required(mode->clock, bpp); + mode_rate = intel_dp_link_required(mode->clock, 18); /* TODO - validate mode against available PBN for link */ if (mode->clock < 10000) @@ -487,6 +528,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo if (ret) goto err; + intel_attach_force_audio_property(connector); + intel_attach_broadcast_rgb_property(connector); + drm_connector_attach_max_bpc_property(connector, 6, 12); + return connector; err: diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 95cb8b154f87..db295c77ff0d 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c @@ -341,7 +341,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv, enum dpio_phy phy) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_PORT_REF_DW3(phy), GRC_DONE, GRC_DONE, 10)) @@ -383,7 +383,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, * The flag should get set in 100us according to the HW team, but * use 1ms due to occasional timeouts observed with that. */ - if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy), + if (intel_wait_for_register_fw(&dev_priv->uncore, + BXT_PORT_CL1CM_DW0(phy), PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1)) diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 0a42d11c4c33..e01c057ce50b 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -241,11 +241,11 @@ out: } static struct intel_shared_dpll * -intel_find_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, +intel_find_shared_dpll(struct intel_crtc_state *crtc_state, enum intel_dpll_id range_min, enum intel_dpll_id range_max) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll, *unused_pll = NULL; struct intel_shared_dpll_state *shared_dpll; @@ -420,9 +420,10 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, } static struct intel_shared_dpll * -ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +ibx_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum intel_dpll_id i; @@ -436,7 +437,7 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, crtc->base.base.id, crtc->base.name, pll->info->name); } else { - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_PCH_PLL_A, DPLL_ID_PCH_PLL_B); } @@ -764,15 +765,13 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */, *r2_out = best.r2; } -static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, - struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state) { struct intel_shared_dpll *pll; u32 val; unsigned int p, n2, r2; - hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); + hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p); val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | @@ -780,7 +779,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, crtc_state->dpll_hw_state.wrpll = val; - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); if (!pll) @@ -790,11 +789,12 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock, } static struct intel_shared_dpll * -hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock) +hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct intel_shared_dpll *pll; enum intel_dpll_id pll_id; + int clock = crtc_state->port_clock; switch (clock / 2) { case 81000: @@ -820,19 +820,18 @@ hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock) } static struct intel_shared_dpll * -hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +hsw_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct intel_shared_dpll *pll; - int clock = crtc_state->port_clock; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state); + pll = hsw_ddi_hdmi_get_dpll(crtc_state); } else if (intel_crtc_has_dp_encoder(crtc_state)) { - pll = hsw_ddi_dp_get_dpll(encoder, clock); + pll = hsw_ddi_dp_get_dpll(crtc_state); } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { if (WARN_ON(crtc_state->port_clock / 2 != 135000)) return NULL; @@ -840,7 +839,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SPLL, DPLL_ID_SPLL); } else { return NULL; @@ -961,7 +960,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(regs[id].ctl, I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DPLL_STATUS, DPLL_LOCK(id), DPLL_LOCK(id), @@ -1308,9 +1307,7 @@ skip_remaining_dividers: return true; } -static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - int clock) +static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { u32 ctrl1, cfgcr1, cfgcr2; struct skl_wrpll_params wrpll_params = { 0, }; @@ -1323,7 +1320,8 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params)) + if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000, + &wrpll_params)) return false; cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | @@ -1346,8 +1344,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, } static bool -skl_ddi_dp_set_dpll_hw_state(int clock, - struct intel_dpll_hw_state *dpll_hw_state) +skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { u32 ctrl1; @@ -1356,7 +1353,7 @@ skl_ddi_dp_set_dpll_hw_state(int clock, * as the DPLL id in this function. */ ctrl1 = DPLL_CTRL1_OVERRIDE(0); - switch (clock / 2) { + switch (crtc_state->port_clock / 2) { case 81000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); break; @@ -1378,44 +1375,43 @@ skl_ddi_dp_set_dpll_hw_state(int clock, break; } - dpll_hw_state->ctrl1 = ctrl1; + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + crtc_state->dpll_hw_state.ctrl1 = ctrl1; + return true; } static struct intel_shared_dpll * -skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +skl_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct intel_shared_dpll *pll; - int clock = crtc_state->port_clock; bool bret; - struct intel_dpll_hw_state dpll_hw_state; - - memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock); + bret = skl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); return NULL; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { - bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state); + bret = skl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); return NULL; } - crtc_state->dpll_hw_state = dpll_hw_state; } else { return NULL; } if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL0); else - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SKL_DPLL1, DPLL_ID_SKL_DPLL3); if (!pll) @@ -1692,10 +1688,10 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = { }; static bool -bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, int clock, +bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state, struct bxt_clk_div *clk_div) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct dpll best_clock; /* Calculate HDMI div */ @@ -1703,9 +1699,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, * FIXME: tie the following calculation into * i9xx_crtc_compute_clock */ - if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) { + if (!bxt_find_best_dpll(crtc_state, &best_clock)) { DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", - clock, pipe_name(intel_crtc->pipe)); + crtc_state->port_clock, + pipe_name(crtc->pipe)); return false; } @@ -1722,8 +1719,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc, return true; } -static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div) +static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state, + struct bxt_clk_div *clk_div) { + int clock = crtc_state->port_clock; int i; *clk_div = bxt_dp_clk_val[0]; @@ -1737,14 +1736,17 @@ static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div) clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2; } -static bool bxt_ddi_set_dpll_hw_state(int clock, - struct bxt_clk_div *clk_div, - struct intel_dpll_hw_state *dpll_hw_state) +static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state, + const struct bxt_clk_div *clk_div) { + struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state; + int clock = crtc_state->port_clock; int vco = clk_div->vco; u32 prop_coef, int_coef, gain_ctl, targ_cnt; u32 lanestagger; + memset(dpll_hw_state, 0, sizeof(*dpll_hw_state)); + if (vco >= 6200000 && vco <= 6700000) { prop_coef = 4; int_coef = 9; @@ -1804,55 +1806,45 @@ static bool bxt_ddi_set_dpll_hw_state(int clock, } static bool -bxt_ddi_dp_set_dpll_hw_state(int clock, - struct intel_dpll_hw_state *dpll_hw_state) +bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { - struct bxt_clk_div clk_div = {0}; + struct bxt_clk_div clk_div = {}; - bxt_ddi_dp_pll_dividers(clock, &clk_div); + bxt_ddi_dp_pll_dividers(crtc_state, &clk_div); - return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); + return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } static bool -bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, int clock, - struct intel_dpll_hw_state *dpll_hw_state) +bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { - struct bxt_clk_div clk_div = { }; + struct bxt_clk_div clk_div = {}; - bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div); + bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div); - return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state); + return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div); } static struct intel_shared_dpll * -bxt_get_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder) +bxt_get_dpll(struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) { - struct intel_dpll_hw_state dpll_hw_state = { }; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; - int i, clock = crtc_state->port_clock; + enum intel_dpll_id id; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) && - !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock, - &dpll_hw_state)) + !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state)) return NULL; if (intel_crtc_has_dp_encoder(crtc_state) && - !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) + !bxt_ddi_dp_set_dpll_hw_state(crtc_state)) return NULL; - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - - crtc_state->dpll_hw_state = dpll_hw_state; - /* 1:1 mapping between ports and PLLs */ - i = (enum intel_dpll_id) encoder->port; - pll = intel_get_shared_dpll_by_id(dev_priv, i); + id = (enum intel_dpll_id) encoder->port; + pll = intel_get_shared_dpll_by_id(dev_priv, id); DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n", crtc->base.base.id, crtc->base.name, pll->info->name); @@ -1911,8 +1903,7 @@ static void intel_ddi_pll_init(struct drm_device *dev) struct intel_dpll_mgr { const struct dpll_info *dpll_info; - struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, + struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder); void (*dump_hw_state)(struct drm_i915_private *dev_priv, @@ -1986,7 +1977,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_POWER_STATE, PLL_POWER_STATE, @@ -2027,7 +2018,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 7. Wait for PLL lock status in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_LOCK, PLL_LOCK, @@ -2075,7 +2066,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 4. Wait for PLL not locked status in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_LOCK, 0, @@ -2097,7 +2088,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv, I915_WRITE(CNL_DPLL_ENABLE(id), val); /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, CNL_DPLL_ENABLE(id), PLL_POWER_STATE, 0, @@ -2242,11 +2233,11 @@ int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv) } static bool -cnl_ddi_calculate_wrpll(int clock, - struct drm_i915_private *dev_priv, +cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *wrpll_params) { - u32 afe_clock = clock * 5; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + u32 afe_clock = crtc_state->port_clock * 5; u32 ref_clock; u32 dco_min = 7998000; u32 dco_max = 10000000; @@ -2282,23 +2273,20 @@ cnl_ddi_calculate_wrpll(int clock, ref_clock = cnl_hdmi_pll_ref_clock(dev_priv); - cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv, - kdiv); + cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, + pdiv, qdiv, kdiv); return true; } -static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, - int clock) +static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 cfgcr0, cfgcr1; struct skl_wrpll_params wrpll_params = { 0, }; cfgcr0 = DPLL_CFGCR0_HDMI_MODE; - if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params)) + if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params)) return false; cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) | @@ -2319,14 +2307,13 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc, } static bool -cnl_ddi_dp_set_dpll_hw_state(int clock, - struct intel_dpll_hw_state *dpll_hw_state) +cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state) { u32 cfgcr0; cfgcr0 = DPLL_CFGCR0_SSC_ENABLE; - switch (clock / 2) { + switch (crtc_state->port_clock / 2) { case 81000: cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810; break; @@ -2356,41 +2343,40 @@ cnl_ddi_dp_set_dpll_hw_state(int clock, break; } - dpll_hw_state->cfgcr0 = cfgcr0; + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; + return true; } static struct intel_shared_dpll * -cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +cnl_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { struct intel_shared_dpll *pll; - int clock = crtc_state->port_clock; bool bret; - struct intel_dpll_hw_state dpll_hw_state; - - memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { - bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock); + bret = cnl_ddi_hdmi_pll_dividers(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n"); return NULL; } } else if (intel_crtc_has_dp_encoder(crtc_state)) { - bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state); + bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state); if (!bret) { DRM_DEBUG_KMS("Could not set DP dpll HW state.\n"); return NULL; } - crtc_state->dpll_hw_state = dpll_hw_state; } else { DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n", crtc_state->output_types); return NULL; } - pll = intel_find_shared_dpll(crtc, crtc_state, + pll = intel_find_shared_dpll(crtc_state, DPLL_ID_SKL_DPLL0, DPLL_ID_SKL_DPLL2); if (!pll) { @@ -2431,47 +2417,69 @@ static const struct intel_dpll_mgr cnl_pll_mgr = { .dump_hw_state = cnl_dump_hw_state, }; +struct icl_combo_pll_params { + int clock; + struct skl_wrpll_params wrpll; +}; + /* * These values alrea already adjusted: they're the bits we write to the * registers, not the logical values. */ -static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = { - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */ - .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */ - .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */ - .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2}, - { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */ - .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, +static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = { + { 540000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 270000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */ + .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 162000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */ + .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 324000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */ + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 216000, + { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, + { 432000, + { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 648000, + { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 810000, + { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */ + .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, }; + /* Also used for 38.4 MHz values. */ -static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = { - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */ - .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */ - .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */ - .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2}, - { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */ - .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */ - .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, - { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */ - .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0}, +static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = { + { 540000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 270000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */ + .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 162000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */ + .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 324000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */ + .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 216000, + { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, }, + { 432000, + { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */ + .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 648000, + { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */ + .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, + { 810000, + { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */ + .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, }, }; static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = { @@ -2484,72 +2492,53 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = { .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }; -static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock, +static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { - const struct skl_wrpll_params *params; - - params = dev_priv->cdclk.hw.ref == 24000 ? - icl_dp_combo_pll_24MHz_values : - icl_dp_combo_pll_19_2MHz_values; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + const struct icl_combo_pll_params *params = + dev_priv->cdclk.hw.ref == 24000 ? + icl_dp_combo_pll_24MHz_values : + icl_dp_combo_pll_19_2MHz_values; + int clock = crtc_state->port_clock; + int i; - switch (clock) { - case 540000: - *pll_params = params[0]; - break; - case 270000: - *pll_params = params[1]; - break; - case 162000: - *pll_params = params[2]; - break; - case 324000: - *pll_params = params[3]; - break; - case 216000: - *pll_params = params[4]; - break; - case 432000: - *pll_params = params[5]; - break; - case 648000: - *pll_params = params[6]; - break; - case 810000: - *pll_params = params[7]; - break; - default: - MISSING_CASE(clock); - return false; + for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) { + if (clock == params[i].clock) { + *pll_params = params[i].wrpll; + return true; + } } - return true; + MISSING_CASE(clock); + return false; } -static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock, +static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + *pll_params = dev_priv->cdclk.hw.ref == 24000 ? icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values; return true; } static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder, int clock, - struct intel_dpll_hw_state *pll_state) + struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); u32 cfgcr0, cfgcr1; struct skl_wrpll_params pll_params = { 0 }; bool ret; if (intel_port_is_tc(dev_priv, encoder->port)) - ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params); + ret = icl_calc_tbt_pll(crtc_state, &pll_params); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params); + ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params); else - ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params); + ret = icl_calc_dp_combo_pll(crtc_state, &pll_params); if (!ret) return false; @@ -2563,82 +2552,16 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state, DPLL_CFGCR1_PDIV(pll_params.pdiv) | DPLL_CFGCR1_CENTRAL_FREQ_8400; - pll_state->cfgcr0 = cfgcr0; - pll_state->cfgcr1 = cfgcr1; - return true; -} - -int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, - u32 pll_id) -{ - u32 cfgcr0, cfgcr1; - u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction; - const struct skl_wrpll_params *params; - int index, n_entries, link_clock; - - /* Read back values from DPLL CFGCR registers */ - cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id)); - cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id)); - - dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK; - dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> - DPLL_CFGCR0_DCO_FRACTION_SHIFT; - pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT; - kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT; - qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >> - DPLL_CFGCR1_QDIV_MODE_SHIFT; - qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >> - DPLL_CFGCR1_QDIV_RATIO_SHIFT; - - params = dev_priv->cdclk.hw.ref == 24000 ? - icl_dp_combo_pll_24MHz_values : - icl_dp_combo_pll_19_2MHz_values; - n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); - - for (index = 0; index < n_entries; index++) { - if (dco_integer == params[index].dco_integer && - dco_fraction == params[index].dco_fraction && - pdiv == params[index].pdiv && - kdiv == params[index].kdiv && - qdiv_mode == params[index].qdiv_mode && - qdiv_ratio == params[index].qdiv_ratio) - break; - } + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); - /* Map PLL Index to Link Clock */ - switch (index) { - default: - MISSING_CASE(index); - /* fall through */ - case 0: - link_clock = 540000; - break; - case 1: - link_clock = 270000; - break; - case 2: - link_clock = 162000; - break; - case 3: - link_clock = 324000; - break; - case 4: - link_clock = 216000; - break; - case 5: - link_clock = 432000; - break; - case 6: - link_clock = 648000; - break; - case 7: - link_clock = 810000; - break; - } + crtc_state->dpll_hw_state.cfgcr0 = cfgcr0; + crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; - return link_clock; + return true; } + static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id) { return id - DPLL_ID_ICL_MGPLL1; @@ -2649,11 +2572,6 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port) return tc_port + DPLL_ID_ICL_MGPLL1; } -bool intel_dpll_is_combophy(enum intel_dpll_id id) -{ - return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1; -} - static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, u32 *target_dco_khz, struct intel_dpll_hw_state *state) @@ -2728,12 +2646,12 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, * The specification for this function uses real numbers, so the math had to be * adapted to integer-only calculation, that's why it looks so different. */ -static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, - struct intel_encoder *encoder, int clock, - struct intel_dpll_hw_state *pll_state) +static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state; int refclk_khz = dev_priv->cdclk.hw.ref; + int clock = crtc_state->port_clock; u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac; u32 iref_ndiv, iref_trim, iref_pulse_w; u32 prop_coeff, int_coeff; @@ -2743,6 +2661,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); + memset(pll_state, 0, sizeof(*pll_state)); + if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, pll_state)) { DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock); @@ -2892,23 +2812,20 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } static struct intel_shared_dpll * -icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, +icl_get_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct intel_digital_port *intel_dig_port; struct intel_shared_dpll *pll; - struct intel_dpll_hw_state pll_state = {}; enum port port = encoder->port; enum intel_dpll_id min, max; - int clock = crtc_state->port_clock; bool ret; if (intel_port_is_combophy(dev_priv, port)) { min = DPLL_ID_ICL_DPLL0; max = DPLL_ID_ICL_DPLL1; - ret = icl_calc_dpll_state(crtc_state, encoder, clock, - &pll_state); + ret = icl_calc_dpll_state(crtc_state, encoder); } else if (intel_port_is_tc(dev_priv, port)) { if (encoder->type == INTEL_OUTPUT_DP_MST) { struct intel_dp_mst_encoder *mst_encoder; @@ -2922,16 +2839,14 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, if (intel_dig_port->tc_type == TC_PORT_TBT) { min = DPLL_ID_ICL_TBTPLL; max = min; - ret = icl_calc_dpll_state(crtc_state, encoder, clock, - &pll_state); + ret = icl_calc_dpll_state(crtc_state, encoder); } else { enum tc_port tc_port; tc_port = intel_port_to_tc(dev_priv, port); min = icl_tc_port_to_pll_id(tc_port); max = min; - ret = icl_calc_mg_pll_state(crtc_state, encoder, clock, - &pll_state); + ret = icl_calc_mg_pll_state(crtc_state); } } else { MISSING_CASE(port); @@ -2943,9 +2858,8 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, return NULL; } - crtc_state->dpll_hw_state = pll_state; - pll = intel_find_shared_dpll(crtc, crtc_state, min, max); + pll = intel_find_shared_dpll(crtc_state, min, max); if (!pll) { DRM_DEBUG_KMS("No PLL selected\n"); return NULL; @@ -2956,19 +2870,72 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, return pll; } -static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id) +static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) { - if (intel_dpll_is_combophy(id)) - return CNL_DPLL_ENABLE(id); - else if (id == DPLL_ID_ICL_TBTPLL) - return TBT_PLL_ENABLE; + const enum intel_dpll_id id = pll->info->id; + enum tc_port tc_port = icl_pll_id_to_tc_port(id); + intel_wakeref_t wakeref; + bool ret = false; + u32 val; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_PLLS); + if (!wakeref) + return false; - return MG_PLL_ENABLE(icl_pll_id_to_tc_port(id)); + val = I915_READ(MG_PLL_ENABLE(tc_port)); + if (!(val & PLL_ENABLE)) + goto out; + + hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; + + hw_state->mg_clktop2_coreclkctl1 = + I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); + hw_state->mg_clktop2_coreclkctl1 &= + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + + hw_state->mg_clktop2_hsclkctl = + I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); + hw_state->mg_clktop2_hsclkctl &= + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; + + hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port)); + hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port)); + hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port)); + hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port)); + hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port)); + + hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port)); + hw_state->mg_pll_tdc_coldst_bias = + I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); + + if (dev_priv->cdclk.hw.ref == 38400) { + hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; + hw_state->mg_pll_bias_mask = 0; + } else { + hw_state->mg_pll_tdc_coldst_bias_mask = -1U; + hw_state->mg_pll_bias_mask = -1U; + } + + hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; + hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; + + ret = true; +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref); + return ret; } static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) + struct intel_dpll_hw_state *hw_state, + i915_reg_t enable_reg) { const enum intel_dpll_id id = pll->info->id; intel_wakeref_t wakeref; @@ -2980,54 +2947,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, if (!wakeref) return false; - val = I915_READ(icl_pll_id_to_enable_reg(id)); + val = I915_READ(enable_reg); if (!(val & PLL_ENABLE)) goto out; - if (intel_dpll_is_combophy(id) || - id == DPLL_ID_ICL_TBTPLL) { - hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); - hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); - } else { - enum tc_port tc_port = icl_pll_id_to_tc_port(id); - - hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port)); - hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; - - hw_state->mg_clktop2_coreclkctl1 = - I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port)); - hw_state->mg_clktop2_coreclkctl1 &= - MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - - hw_state->mg_clktop2_hsclkctl = - I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port)); - hw_state->mg_clktop2_hsclkctl &= - MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | - MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; - - hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port)); - hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port)); - hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port)); - hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port)); - hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port)); - - hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port)); - hw_state->mg_pll_tdc_coldst_bias = - I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); - - if (dev_priv->cdclk.hw.ref == 38400) { - hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; - hw_state->mg_pll_bias_mask = 0; - } else { - hw_state->mg_pll_tdc_coldst_bias_mask = -1U; - hw_state->mg_pll_bias_mask = -1U; - } - - hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask; - hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask; - } + hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id)); + hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id)); ret = true; out: @@ -3035,6 +2960,21 @@ out: return ret; } +static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + return icl_pll_get_hw_state(dev_priv, pll, hw_state, + CNL_DPLL_ENABLE(pll->info->id)); +} + +static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE); +} + static void icl_dpll_write(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { @@ -3096,11 +3036,10 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); } -static void icl_pll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) +static void icl_pll_power_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + i915_reg_t enable_reg) { - const enum intel_dpll_id id = pll->info->id; - i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id); u32 val; val = I915_READ(enable_reg); @@ -3111,37 +3050,90 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, - PLL_POWER_STATE, 1)) - DRM_ERROR("PLL %d Power not enabled\n", id); + if (intel_wait_for_register(&dev_priv->uncore, enable_reg, + PLL_POWER_STATE, PLL_POWER_STATE, 1)) + DRM_ERROR("PLL %d Power not enabled\n", pll->info->id); +} - if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL) - icl_dpll_write(dev_priv, pll); - else - icl_mg_pll_write(dev_priv, pll); +static void icl_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + i915_reg_t enable_reg) +{ + u32 val; + + val = I915_READ(enable_reg); + val |= PLL_ENABLE; + I915_WRITE(enable_reg, val); + + /* Timeout is actually 600us. */ + if (intel_wait_for_register(&dev_priv->uncore, enable_reg, + PLL_LOCK, PLL_LOCK, 1)) + DRM_ERROR("PLL %d not locked\n", pll->info->id); +} + +static void combo_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id); + + icl_pll_power_enable(dev_priv, pll, enable_reg); + + icl_dpll_write(dev_priv, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code * paths should already be setting the appropriate voltage, hence we do - * nothign here. + * nothing here. */ - val = I915_READ(enable_reg); - val |= PLL_ENABLE; - I915_WRITE(enable_reg, val); + icl_pll_enable(dev_priv, pll, enable_reg); - if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK, - 1)) /* 600us actually. */ - DRM_ERROR("PLL %d not locked\n", id); + /* DVFS post sequence would be here. See the comment above. */ +} + +static void tbt_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE); + + icl_dpll_write(dev_priv, pll); + + /* + * DVFS pre sequence would be here, but in our driver the cdclk code + * paths should already be setting the appropriate voltage, hence we do + * nothing here. + */ + + icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE); + + /* DVFS post sequence would be here. See the comment above. */ +} + +static void mg_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + i915_reg_t enable_reg = + MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id)); + + icl_pll_power_enable(dev_priv, pll, enable_reg); + + icl_mg_pll_write(dev_priv, pll); + + /* + * DVFS pre sequence would be here, but in our driver the cdclk code + * paths should already be setting the appropriate voltage, hence we do + * nothing here. + */ + + icl_pll_enable(dev_priv, pll, enable_reg); /* DVFS post sequence would be here. See the comment above. */ } static void icl_pll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) + struct intel_shared_dpll *pll, + i915_reg_t enable_reg) { - const enum intel_dpll_id id = pll->info->id; - i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id); u32 val; /* The first steps are done by intel_ddi_post_disable(). */ @@ -3157,8 +3149,9 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, I915_WRITE(enable_reg, val); /* Timeout is actually 1us. */ - if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1)) - DRM_ERROR("PLL %d locked\n", id); + if (intel_wait_for_register(&dev_priv->uncore, + enable_reg, PLL_LOCK, 0, 1)) + DRM_ERROR("PLL %d locked\n", pll->info->id); /* DVFS post sequence would be here. See the comment above. */ @@ -3170,9 +3163,30 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, * The spec says we need to "wait" but it also says it should be * immediate. */ - if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0, - 1)) - DRM_ERROR("PLL %d Power not disabled\n", id); + if (intel_wait_for_register(&dev_priv->uncore, + enable_reg, PLL_POWER_STATE, 0, 1)) + DRM_ERROR("PLL %d Power not disabled\n", pll->info->id); +} + +static void combo_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id)); +} + +static void tbt_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE); +} + +static void mg_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + i915_reg_t enable_reg = + MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id)); + + icl_pll_disable(dev_priv, pll, enable_reg); } static void icl_dump_hw_state(struct drm_i915_private *dev_priv, @@ -3197,20 +3211,32 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv, hw_state->mg_pll_tdc_coldst_bias); } -static const struct intel_shared_dpll_funcs icl_pll_funcs = { - .enable = icl_pll_enable, - .disable = icl_pll_disable, - .get_hw_state = icl_pll_get_hw_state, +static const struct intel_shared_dpll_funcs combo_pll_funcs = { + .enable = combo_pll_enable, + .disable = combo_pll_disable, + .get_hw_state = combo_pll_get_hw_state, +}; + +static const struct intel_shared_dpll_funcs tbt_pll_funcs = { + .enable = tbt_pll_enable, + .disable = tbt_pll_disable, + .get_hw_state = tbt_pll_get_hw_state, +}; + +static const struct intel_shared_dpll_funcs mg_pll_funcs = { + .enable = mg_pll_enable, + .disable = mg_pll_disable, + .get_hw_state = mg_pll_get_hw_state, }; static const struct dpll_info icl_plls[] = { - { "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, - { "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, - { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, - { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, - { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, - { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, - { "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, + { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, + { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, + { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, + { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, { }, }; @@ -3220,6 +3246,18 @@ static const struct intel_dpll_mgr icl_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct dpll_info ehl_plls[] = { + { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, + { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, + { }, +}; + +static const struct intel_dpll_mgr ehl_pll_mgr = { + .dpll_info = ehl_plls, + .get_dpll = icl_get_dpll, + .dump_hw_state = icl_dump_hw_state, +}; + /** * intel_shared_dpll_init - Initialize shared DPLLs * @dev: drm device @@ -3233,7 +3271,9 @@ void intel_shared_dpll_init(struct drm_device *dev) const struct dpll_info *dpll_info; int i; - if (IS_ICELAKE(dev_priv)) + if (IS_ELKHARTLAKE(dev_priv)) + dpll_mgr = &ehl_pll_mgr; + else if (INTEL_GEN(dev_priv) >= 11) dpll_mgr = &icl_pll_mgr; else if (IS_CANNONLAKE(dev_priv)) dpll_mgr = &cnl_pll_mgr; @@ -3271,31 +3311,29 @@ void intel_shared_dpll_init(struct drm_device *dev) /** * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination - * @crtc: CRTC - * @crtc_state: atomic state for @crtc + * @crtc_state: atomic state for the crtc * @encoder: encoder * * Find an appropriate DPLL for the given CRTC and encoder combination. A - * reference from the @crtc to the returned pll is registered in the atomic - * state. That configuration is made effective by calling + * reference from the @crtc_state to the returned pll is registered in the + * atomic state. That configuration is made effective by calling * intel_shared_dpll_swap_state(). The reference should be released by calling * intel_release_shared_dpll(). * * Returns: - * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state. + * A shared DPLL to be used by @crtc_state and @encoder. */ struct intel_shared_dpll * -intel_get_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state, +intel_get_shared_dpll(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; if (WARN_ON(!dpll_mgr)) return NULL; - return dpll_mgr->get_dpll(crtc, crtc_state, encoder); + return dpll_mgr->get_dpll(crtc_state, encoder); } /** diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h index 40e8391a92f2..bd8124cc81ed 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -327,8 +327,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv, bool state); #define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) #define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) -struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *state, +struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state, struct intel_encoder *encoder); void intel_release_shared_dpll(struct intel_shared_dpll *dpll, struct intel_crtc *crtc, @@ -341,8 +340,6 @@ void intel_shared_dpll_init(struct drm_device *dev); void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv, struct intel_dpll_hw_state *hw_state); -int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv, - u32 pll_id); int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv); enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port); bool intel_dpll_is_combophy(enum intel_dpll_id id); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 375f51d14dda..f8c7b291fdc3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -41,6 +41,7 @@ #include <drm/drm_rect.h> #include <drm/drm_vblank.h> #include <drm/drm_atomic.h> +#include <drm/i915_mei_hdcp_interface.h> #include <media/cec-notifier.h> struct drm_printer; @@ -323,6 +324,13 @@ struct intel_panel { struct intel_digital_port; +enum check_link_response { + HDCP_LINK_PROTECTED = 0, + HDCP_TOPOLOGY_CHANGE, + HDCP_LINK_INTEGRITY_FAILURE, + HDCP_REAUTH_REQUEST +}; + /* * This structure serves as a translation layer between the generic HDCP code * and the bus-specific code. What that means is that HDCP over HDMI differs @@ -395,6 +403,32 @@ struct intel_hdcp_shim { /* Detects panel's hdcp capability. This is optional for HDMI. */ int (*hdcp_capable)(struct intel_digital_port *intel_dig_port, bool *hdcp_capable); + + /* HDCP adaptation(DP/HDMI) required on the port */ + enum hdcp_wired_protocol protocol; + + /* Detects whether sink is HDCP2.2 capable */ + int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port, + bool *capable); + + /* Write HDCP2.2 messages */ + int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port, + void *buf, size_t size); + + /* Read HDCP2.2 messages */ + int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size); + + /* + * Implementation of DP HDCP2.2 Errata for the communication of stream + * type to Receivers. In DP HDCP2.2 Stream type is one of the input to + * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. + */ + int (*config_stream_type)(struct intel_digital_port *intel_dig_port, + bool is_repeater, u8 type); + + /* HDCP2.2 Link Integrity Check */ + int (*check_2_2_link)(struct intel_digital_port *intel_dig_port); }; struct intel_hdcp { @@ -404,6 +438,50 @@ struct intel_hdcp { u64 value; struct delayed_work check_work; struct work_struct prop_work; + + /* HDCP1.4 Encryption status */ + bool hdcp_encrypted; + + /* HDCP2.2 related definitions */ + /* Flag indicates whether this connector supports HDCP2.2 or not. */ + bool hdcp2_supported; + + /* HDCP2.2 Encryption status */ + bool hdcp2_encrypted; + + /* + * Content Stream Type defined by content owner. TYPE0(0x0) content can + * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1) + * content can flow only through a link protected by HDCP2.2. + */ + u8 content_type; + struct hdcp_port_data port_data; + + bool is_paired; + bool is_repeater; + + /* + * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT. + * Incremented after processing the RepeaterAuth_Send_ReceiverID_List. + * When it rolls over re-auth has to be triggered. + */ + u32 seq_num_v; + + /* + * Count of RepeaterAuth_Stream_Manage msg propagated. + * Initialized to 0 on AKE_INIT. Incremented after every successful + * transmission of RepeaterAuth_Stream_Manage message. When it rolls + * over re-Auth has to be triggered. + */ + u32 seq_num_m; + + /* + * Work queue to signal the CP_IRQ. Used for the waiters to read the + * available information from HDCP DP sink. + */ + wait_queue_head_t cp_irq_queue; + atomic_t cp_irq_count; + int cp_irq_count_cached; }; struct intel_connector { @@ -921,7 +999,8 @@ struct intel_crtc_state { struct intel_link_m_n fdi_m_n; bool ips_enabled; - bool ips_force_disable; + + bool crc_enabled; bool enable_fbc; @@ -942,13 +1021,30 @@ struct intel_crtc_state { /* Gamma mode programmed on the pipe */ u32 gamma_mode; + union { + /* CSC mode programmed on the pipe */ + u32 csc_mode; + + /* CHV CGM mode */ + u32 cgm_mode; + }; + /* bitmask of visible planes (enum plane_id) */ u8 active_planes; u8 nv12_planes; + u8 c8_planes; /* bitmask of planes that will be updated during the commit */ u8 update_planes; + struct { + u32 enable; + u32 gcp; + union hdmi_infoframe avi; + union hdmi_infoframe spd; + union hdmi_infoframe hdmi; + } infoframes; + /* HDMI scrambling status */ bool hdmi_scrambling; @@ -961,6 +1057,12 @@ struct intel_crtc_state { /* Output down scaling is done in LSPCON device */ bool lspcon_downsampling; + /* enable pipe gamma? */ + bool gamma_enable; + + /* enable pipe csc? */ + bool csc_enable; + /* Display Stream compression state */ struct { bool compression_enable; @@ -989,9 +1091,6 @@ struct intel_crtc { struct intel_crtc_state *config; - /* global reset count when the last flip was submitted */ - unsigned int reset_count; - /* Access to these should be protected by dev_priv->irq_lock. */ bool cpu_fifo_underrun_disabled; bool pch_fifo_underrun_disabled; @@ -1260,11 +1359,15 @@ struct intel_digital_port { const struct intel_crtc_state *crtc_state, unsigned int type, const void *frame, ssize_t len); + void (*read_infoframe)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len); void (*set_infoframes)(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); - bool (*infoframe_enabled)(struct intel_encoder *encoder, + u32 (*infoframes_enabled)(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); }; @@ -1556,7 +1659,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder, bool enable); void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder); int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, - enum intel_dpll_id pll_id); + struct intel_dpll_hw_state *state); unsigned int intel_fb_align_height(const struct drm_framebuffer *fb, int color_plane, unsigned int height); @@ -1738,7 +1841,7 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); -bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, +bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock); int chv_calc_dpll_params(int refclk, struct dpll *pll_clock); @@ -1806,6 +1909,16 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); void intel_csr_ucode_resume(struct drm_i915_private *); /* intel_dp.c */ +struct link_config_limits { + int min_clock, max_clock; + int min_lane_count, max_lane_count; + int min_bpp, max_bpp; +}; +void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + struct link_config_limits *limits); +bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t dp_reg, enum port port, enum pipe *pipe); @@ -2000,13 +2113,22 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, bool scrambling); void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); void intel_infoframe_init(struct intel_digital_port *intel_dig_port); +u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +u32 intel_hdmi_infoframe_enable(unsigned int type); +void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state); +void intel_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + enum hdmi_infoframe_type type, + union hdmi_infoframe *frame); /* intel_lvds.c */ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t lvds_reg, enum pipe *pipe); void intel_lvds_init(struct drm_i915_private *dev_priv); -struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev); -bool intel_is_dual_link_lvds(struct drm_device *dev); +struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv); +bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv); /* intel_overlay.c */ void intel_overlay_setup(struct drm_i915_private *dev_priv); @@ -2042,10 +2164,13 @@ void intel_panel_update_backlight(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state); -extern struct drm_display_mode *intel_find_panel_downclock( - struct drm_i915_private *dev_priv, - struct drm_display_mode *fixed_mode, - struct drm_connector *connector); +struct drm_display_mode * +intel_panel_edid_downclock_mode(struct intel_connector *connector, + const struct drm_display_mode *fixed_mode); +struct drm_display_mode * +intel_panel_edid_fixed_mode(struct intel_connector *connector); +struct drm_display_mode * +intel_panel_vbt_fixed_mode(struct intel_connector *connector); #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) int intel_backlight_device_register(struct intel_connector *connector); @@ -2068,9 +2193,12 @@ int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); int intel_hdcp_enable(struct intel_connector *connector); int intel_hdcp_disable(struct intel_connector *connector); -int intel_hdcp_check_link(struct intel_connector *connector); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); bool intel_hdcp_capable(struct intel_connector *connector); +void intel_hdcp_component_init(struct drm_i915_private *dev_priv); +void intel_hdcp_component_fini(struct drm_i915_private *dev_priv); +void intel_hdcp_cleanup(struct intel_connector *connector); +void intel_hdcp_handle_cp_irq(struct intel_connector *connector); /* intel_psr.c */ #define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support) @@ -2079,9 +2207,9 @@ void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); -int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, - struct drm_modeset_acquire_ctx *ctx, - u64 value); +void intel_psr_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); @@ -2152,20 +2280,26 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, u8 req_slices); static inline void -assert_rpm_device_not_suspended(struct drm_i915_private *i915) +assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm) { - WARN_ONCE(i915->runtime_pm.suspended, + WARN_ONCE(rpm->suspended, "Device suspended during HW access\n"); } static inline void -assert_rpm_wakelock_held(struct drm_i915_private *i915) +__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm) { - assert_rpm_device_not_suspended(i915); - WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count), + assert_rpm_device_not_suspended(rpm); + WARN_ONCE(!atomic_read(&rpm->wakeref_count), "RPM wakelock ref not held during HW access"); } +static inline void +assert_rpm_wakelock_held(struct drm_i915_private *i915) +{ + __assert_rpm_wakelock_held(&i915->runtime_pm); +} + /** * disable_rpm_wakeref_asserts - disable the RPM assert checks * @i915: i915 device instance @@ -2257,11 +2391,10 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); -void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv); void gen6_rps_busy(struct drm_i915_private *dev_priv); void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); -void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps); +void gen6_rps_boost(struct i915_request *rq); void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); @@ -2375,6 +2508,14 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state); /* intel_atomic_plane.c */ +void intel_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_update_slave(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); @@ -2404,11 +2545,15 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type, const void *buf, ssize_t len); +void lspcon_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len); void lspcon_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -bool lspcon_infoframe_enabled(struct intel_encoder *encoder, +u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); void lspcon_ycbcr420_config(struct drm_connector *connector, struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index a9a19778dc7f..705a609050c0 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -189,7 +189,6 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); /* intel_dsi_vbt.c */ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); -int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi); void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, enum mipi_seq seq_id); void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c index 06a11c35a784..3074448446bc 100644 --- a/drivers/gpu/drm/i915/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c @@ -194,7 +194,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, break; } - if (!IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) < 11) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: @@ -365,7 +365,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) /* pull up/down */ value = *data++ & 1; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_exec_gpio(dev_priv, gpio_source, gpio_index, value); else if (IS_VALLEYVIEW(dev_priv)) vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value); @@ -532,24 +532,6 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) msleep(msec); } -int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi) -{ - struct intel_connector *connector = intel_dsi->attached_connector; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (!mode) - return 0; - - mode->type |= DRM_MODE_TYPE_PREFERRED; - - drm_mode_probed_add(&connector->base, mode); - - return 1; -} - #define ICL_PREPARE_CNT_MAX 0x7 #define ICL_CLK_ZERO_CNT_MAX 0xf #define ICL_TRAIL_CNT_MAX 0x7 @@ -890,7 +872,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->burst_mode_ratio = burst_mode_ratio; - if (IS_ICELAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) icl_dphy_param_init(intel_dsi); else vlv_dphy_param_init(intel_dsi); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 49fa43ff02ba..d0427c2e3997 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -84,7 +84,6 @@ static const struct engine_class_info intel_engine_classes[] = { #define MAX_MMIO_BASES 3 struct engine_info { unsigned int hw_id; - unsigned int uabi_id; u8 class; u8 instance; /* mmio bases table *must* be sorted in reverse gen order */ @@ -95,27 +94,24 @@ struct engine_info { }; static const struct engine_info intel_engines[] = { - [RCS] = { - .hw_id = RCS_HW, - .uabi_id = I915_EXEC_RENDER, + [RCS0] = { + .hw_id = RCS0_HW, .class = RENDER_CLASS, .instance = 0, .mmio_bases = { { .gen = 1, .base = RENDER_RING_BASE } }, }, - [BCS] = { - .hw_id = BCS_HW, - .uabi_id = I915_EXEC_BLT, + [BCS0] = { + .hw_id = BCS0_HW, .class = COPY_ENGINE_CLASS, .instance = 0, .mmio_bases = { { .gen = 6, .base = BLT_RING_BASE } }, }, - [VCS] = { - .hw_id = VCS_HW, - .uabi_id = I915_EXEC_BSD, + [VCS0] = { + .hw_id = VCS0_HW, .class = VIDEO_DECODE_CLASS, .instance = 0, .mmio_bases = { @@ -124,9 +120,8 @@ static const struct engine_info intel_engines[] = { { .gen = 4, .base = BSD_RING_BASE } }, }, - [VCS2] = { - .hw_id = VCS2_HW, - .uabi_id = I915_EXEC_BSD, + [VCS1] = { + .hw_id = VCS1_HW, .class = VIDEO_DECODE_CLASS, .instance = 1, .mmio_bases = { @@ -134,27 +129,24 @@ static const struct engine_info intel_engines[] = { { .gen = 8, .base = GEN8_BSD2_RING_BASE } }, }, - [VCS3] = { - .hw_id = VCS3_HW, - .uabi_id = I915_EXEC_BSD, + [VCS2] = { + .hw_id = VCS2_HW, .class = VIDEO_DECODE_CLASS, .instance = 2, .mmio_bases = { { .gen = 11, .base = GEN11_BSD3_RING_BASE } }, }, - [VCS4] = { - .hw_id = VCS4_HW, - .uabi_id = I915_EXEC_BSD, + [VCS3] = { + .hw_id = VCS3_HW, .class = VIDEO_DECODE_CLASS, .instance = 3, .mmio_bases = { { .gen = 11, .base = GEN11_BSD4_RING_BASE } }, }, - [VECS] = { - .hw_id = VECS_HW, - .uabi_id = I915_EXEC_VEBOX, + [VECS0] = { + .hw_id = VECS0_HW, .class = VIDEO_ENHANCEMENT_CLASS, .instance = 0, .mmio_bases = { @@ -162,9 +154,8 @@ static const struct engine_info intel_engines[] = { { .gen = 7, .base = VEBOX_RING_BASE } }, }, - [VECS2] = { - .hw_id = VECS2_HW, - .uabi_id = I915_EXEC_VEBOX, + [VECS1] = { + .hw_id = VECS1_HW, .class = VIDEO_ENHANCEMENT_CLASS, .instance = 1, .mmio_bases = { @@ -264,21 +255,17 @@ static void __sprint_engine_name(char *name, const struct engine_info *info) void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) { - struct drm_i915_private *dev_priv = engine->i915; - i915_reg_t hwstam; - /* * Though they added more rings on g4x/ilk, they did not add * per-engine HWSTAM until gen6. */ - if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS) + if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS) return; - hwstam = RING_HWSTAM(engine->mmio_base); - if (INTEL_GEN(dev_priv) >= 3) - I915_WRITE(hwstam, mask); + if (INTEL_GEN(engine->i915) >= 3) + ENGINE_WRITE(engine, RING_HWSTAM, mask); else - I915_WRITE16(hwstam, mask); + ENGINE_WRITE16(engine, RING_HWSTAM, mask); } static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) @@ -313,15 +300,18 @@ intel_engine_setup(struct drm_i915_private *dev_priv, if (!engine) return -ENOMEM; + BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); + engine->id = id; + engine->mask = BIT(id); engine->i915 = dev_priv; + engine->uncore = &dev_priv->uncore; __sprint_engine_name(engine->name, info); engine->hw_id = engine->guc_id = info->hw_id; engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases); engine->class = info->class; engine->instance = info->instance; - engine->uabi_id = info->uabi_id; engine->uabi_class = intel_engine_classes[info->class].uabi_class; engine->context_size = __intel_engine_context_size(dev_priv, @@ -355,15 +345,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv, int intel_engines_init_mmio(struct drm_i915_private *dev_priv) { struct intel_device_info *device_info = mkwrite_device_info(dev_priv); - const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask; + const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask; struct intel_engine_cs *engine; enum intel_engine_id id; unsigned int mask = 0; unsigned int i; int err; - WARN_ON(ring_mask == 0); - WARN_ON(ring_mask & + WARN_ON(engine_mask == 0); + WARN_ON(engine_mask & GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); if (i915_inject_load_failure()) @@ -377,7 +367,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) if (err) goto cleanup; - mask |= ENGINE_MASK(i); + mask |= BIT(i); } /* @@ -385,16 +375,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) * are added to the driver by a warning and disabling the forgotten * engines. */ - if (WARN_ON(mask != ring_mask)) - device_info->ring_mask = mask; + if (WARN_ON(mask != engine_mask)) + device_info->engine_mask = mask; /* We always presume we have at least RCS available for later probing */ - if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) { + if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) { err = -ENODEV; goto cleanup; } - RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask); + RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask); i915_check_and_clear_faults(dev_priv); @@ -455,12 +445,6 @@ cleanup: return err; } -void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno) -{ - intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); - GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno); -} - static void intel_engine_init_batch_pool(struct intel_engine_cs *engine) { i915_gem_batch_pool_init(&engine->batch_pool, engine); @@ -541,9 +525,7 @@ static int init_status_page(struct intel_engine_cs *engine) return PTR_ERR(obj); } - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err; + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -594,7 +576,6 @@ int intel_engine_setup_common(struct intel_engine_cs *engine) err = i915_timeline_init(engine->i915, &engine->timeline, - engine->name, engine->status_page.vma); if (err) goto err_hwsp; @@ -614,10 +595,44 @@ err_hwsp: return err; } -static void __intel_context_unpin(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +void intel_engines_set_scheduler_caps(struct drm_i915_private *i915) { - intel_context_unpin(to_intel_context(ctx, engine)); + static const struct { + u8 engine; + u8 sched; + } map[] = { +#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) } + MAP(PREEMPTION, PREEMPTION), + MAP(SEMAPHORES, SEMAPHORES), +#undef MAP + }; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 enabled, disabled; + + enabled = 0; + disabled = 0; + for_each_engine(engine, i915, id) { /* all engines must agree! */ + int i; + + if (engine->schedule) + enabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + else + disabled |= (I915_SCHEDULER_CAP_ENABLED | + I915_SCHEDULER_CAP_PRIORITY); + + for (i = 0; i < ARRAY_SIZE(map); i++) { + if (engine->flags & BIT(map[i].engine)) + enabled |= BIT(map[i].sched); + else + disabled |= BIT(map[i].sched); + } + } + + i915->caps.scheduler = enabled & ~disabled; + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED)) + i915->caps.scheduler = 0; } struct measure_breadcrumb { @@ -639,7 +654,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) return -ENOMEM; if (i915_timeline_init(engine->i915, - &frame->timeline, "measure", + &frame->timeline, engine->status_page.vma)) goto out_frame; @@ -670,6 +685,20 @@ out_frame: return dw; } +static int pin_context(struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + struct intel_context **out) +{ + struct intel_context *ce; + + ce = intel_context_pin(ctx, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + *out = ce; + return 0; +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -684,11 +713,8 @@ out_frame: int intel_engine_init_common(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; - struct intel_context *ce; int ret; - engine->set_default_submission(engine); - /* We may need to do things with the shrinker which * require us to immediately switch back to the default * context. This can cause a problem as pinning the @@ -696,36 +722,34 @@ int intel_engine_init_common(struct intel_engine_cs *engine) * be available. To avoid this we always pin the default * context. */ - ce = intel_context_pin(i915->kernel_context, engine); - if (IS_ERR(ce)) - return PTR_ERR(ce); + ret = pin_context(i915->kernel_context, engine, + &engine->kernel_context); + if (ret) + return ret; /* * Similarly the preempt context must always be available so that - * we can interrupt the engine at any time. + * we can interrupt the engine at any time. However, as preemption + * is optional, we allow it to fail. */ - if (i915->preempt_context) { - ce = intel_context_pin(i915->preempt_context, engine); - if (IS_ERR(ce)) { - ret = PTR_ERR(ce); - goto err_unpin_kernel; - } - } + if (i915->preempt_context) + pin_context(i915->preempt_context, engine, + &engine->preempt_context); ret = measure_breadcrumb_dw(engine); if (ret < 0) - goto err_unpin_preempt; + goto err_unpin; engine->emit_fini_breadcrumb_dw = ret; - return 0; + engine->set_default_submission(engine); -err_unpin_preempt: - if (i915->preempt_context) - __intel_context_unpin(i915->preempt_context, engine); + return 0; -err_unpin_kernel: - __intel_context_unpin(i915->kernel_context, engine); +err_unpin: + if (engine->preempt_context) + intel_context_unpin(engine->preempt_context); + intel_context_unpin(engine->kernel_context); return ret; } @@ -738,8 +762,6 @@ err_unpin_kernel: */ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; - cleanup_status_page(engine); intel_engine_fini_breadcrumbs(engine); @@ -749,9 +771,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - if (i915->preempt_context) - __intel_context_unpin(i915->preempt_context, engine); - __intel_context_unpin(i915->kernel_context, engine); + if (engine->preempt_context) + intel_context_unpin(engine->preempt_context); + intel_context_unpin(engine->kernel_context); i915_timeline_fini(&engine->timeline); @@ -762,50 +784,48 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct drm_i915_private *i915 = engine->i915; + u64 acthd; - if (INTEL_GEN(dev_priv) >= 8) - acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), - RING_ACTHD_UDW(engine->mmio_base)); - else if (INTEL_GEN(dev_priv) >= 4) - acthd = I915_READ(RING_ACTHD(engine->mmio_base)); + if (INTEL_GEN(i915) >= 8) + acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); + else if (INTEL_GEN(i915) >= 4) + acthd = ENGINE_READ(engine, RING_ACTHD); else - acthd = I915_READ(ACTHD); + acthd = ENGINE_READ(engine, ACTHD); return acthd; } u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; u64 bbaddr; - if (INTEL_GEN(dev_priv) >= 8) - bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base), - RING_BBADDR_UDW(engine->mmio_base)); + if (INTEL_GEN(engine->i915) >= 8) + bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); else - bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); + bbaddr = ENGINE_READ(engine, RING_BBADDR); return bbaddr; } int intel_engine_stop_cs(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + struct intel_uncore *uncore = engine->uncore; const u32 base = engine->mmio_base; const i915_reg_t mode = RING_MI_MODE(base); int err; - if (INTEL_GEN(dev_priv) < 3) + if (INTEL_GEN(engine->i915) < 3) return -ENODEV; GEM_TRACE("%s\n", engine->name); - I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); + intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); err = 0; - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(uncore, mode, MODE_IDLE, MODE_IDLE, 1000, 0, NULL)) { @@ -814,19 +834,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) } /* A final mmio read to let GPU writes be hopefully flushed to memory */ - POSTING_READ_FW(mode); + intel_uncore_posting_read_fw(uncore, mode); return err; } void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - GEM_TRACE("%s\n", engine->name); - I915_WRITE_FW(RING_MI_MODE(engine->mmio_base), - _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); } const char *i915_cache_level_str(struct drm_i915_private *i915, int type) @@ -863,6 +880,7 @@ static inline u32 read_subslice_reg(struct drm_i915_private *dev_priv, int slice, int subslice, i915_reg_t reg) { + struct intel_uncore *uncore = &dev_priv->uncore; u32 mcr_slice_subslice_mask; u32 mcr_slice_subslice_select; u32 default_mcr_s_ss_select; @@ -884,33 +902,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv); - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + fw_domains |= intel_uncore_forcewake_for_reg(uncore, GEN8_MCR_SELECTOR, FW_REG_READ | FW_REG_WRITE); - spin_lock_irq(&dev_priv->uncore.lock); - intel_uncore_forcewake_get__locked(dev_priv, fw_domains); + spin_lock_irq(&uncore->lock); + intel_uncore_forcewake_get__locked(uncore, fw_domains); - mcr = I915_READ_FW(GEN8_MCR_SELECTOR); + mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR); WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) != default_mcr_s_ss_select); mcr &= ~mcr_slice_subslice_mask; mcr |= mcr_slice_subslice_select; - I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); + intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); - ret = I915_READ_FW(reg); + ret = intel_uncore_read_fw(uncore, reg); mcr &= ~mcr_slice_subslice_mask; mcr |= default_mcr_s_ss_select; - I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); + intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr); - intel_uncore_forcewake_put__locked(dev_priv, fw_domains); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irq(&uncore->lock); return ret; } @@ -920,6 +938,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *dev_priv = engine->i915; + struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; int subslice; @@ -928,12 +947,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, switch (INTEL_GEN(dev_priv)) { default: - instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); - if (engine->id != RCS) + if (engine->id != RCS0) break; - instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); + instdone->slice_common = + intel_uncore_read(uncore, GEN7_SC_INSTDONE); for_each_instdone_slice_subslice(dev_priv, slice, subslice) { instdone->sampler[slice][subslice] = read_subslice_reg(dev_priv, slice, subslice, @@ -944,28 +965,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, } break; case 7: - instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); - if (engine->id != RCS) + if (engine->id != RCS0) break; - instdone->slice_common = I915_READ(GEN7_SC_INSTDONE); - instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE); - instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE); + instdone->slice_common = + intel_uncore_read(uncore, GEN7_SC_INSTDONE); + instdone->sampler[0][0] = + intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE); + instdone->row[0][0] = + intel_uncore_read(uncore, GEN7_ROW_INSTDONE); break; case 6: case 5: case 4: - instdone->instdone = I915_READ(RING_INSTDONE(mmio_base)); - - if (engine->id == RCS) + instdone->instdone = + intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); + if (engine->id == RCS0) /* HACK: Using the wrong struct member */ - instdone->slice_common = I915_READ(GEN4_INSTDONE1); + instdone->slice_common = + intel_uncore_read(uncore, GEN4_INSTDONE1); break; case 3: case 2: - instdone->instdone = I915_READ(GEN2_INSTDONE); + instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE); break; } } @@ -985,12 +1011,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return true; /* First check that no commands are left in the ring */ - if ((I915_READ_HEAD(engine) & HEAD_ADDR) != - (I915_READ_TAIL(engine) & TAIL_ADDR)) + if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) != + (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR)) idle = false; /* No bit for gen2, so assume the CS parser is idle */ - if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE)) + if (INTEL_GEN(dev_priv) > 2 && + !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) idle = false; intel_runtime_pm_put(dev_priv, wakeref); @@ -1007,16 +1034,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine) */ bool intel_engine_is_idle(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - /* More white lies, if wedged, hw state is inconsistent */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_reset_failed(engine->i915)) return true; - /* Any inflight/incomplete requests? */ - if (!intel_engine_signaled(engine, intel_engine_last_submit(engine))) - return false; - /* Waiting to drain ELSP? */ if (READ_ONCE(engine->execlists.active)) { struct tasklet_struct *t = &engine->execlists.tasklet; @@ -1045,7 +1066,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return ring_is_idle(engine); } -bool intel_engines_are_idle(struct drm_i915_private *dev_priv) +bool intel_engines_are_idle(struct drm_i915_private *i915) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1054,10 +1075,14 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) * If the driver is wedged, HW state may be very inconsistent and * report that it is still busy, even though we have stopped using it. */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_reset_failed(i915)) return true; - for_each_engine(engine, dev_priv, id) { + /* Already parked (and passed an idleness test); must still be idle */ + if (!READ_ONCE(i915->gt.awake)) + return true; + + for_each_engine(engine, i915, id) { if (!intel_engine_is_idle(engine)) return false; } @@ -1065,34 +1090,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) return true; } -/** - * intel_engine_has_kernel_context: - * @engine: the engine - * - * Returns true if the last context to be executed on this engine, or has been - * executed if the engine is already idle, is the kernel context - * (#i915.kernel_context). - */ -bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) -{ - const struct intel_context *kernel_context = - to_intel_context(engine->i915->kernel_context, engine); - struct i915_request *rq; - - lockdep_assert_held(&engine->i915->drm.struct_mutex); - - /* - * Check the last context seen by the engine. If active, it will be - * the last request that remains in the timeline. When idle, it is - * the last executed context as tracked by retirement. - */ - rq = __i915_active_request_peek(&engine->timeline.last_request); - if (rq) - return rq->hw_context == kernel_context; - else - return engine->last_retired_context == kernel_context; -} - void intel_engines_reset_default_submission(struct drm_i915_private *i915) { struct intel_engine_cs *engine; @@ -1180,6 +1177,8 @@ void intel_engines_park(struct drm_i915_private *i915) i915_gem_batch_pool_fini(&engine->batch_pool); engine->execlists.no_priolist = false; } + + i915->gt.active_engines = 0; } /** @@ -1283,15 +1282,14 @@ static void print_request(struct drm_printer *m, x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); - drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n", + drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n", prefix, - rq->global_seqno, + rq->fence.context, rq->fence.seqno, i915_request_completed(rq) ? "!" : i915_request_started(rq) ? "*" : "", test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags) ? "+" : "", - rq->fence.context, rq->fence.seqno, buf, jiffies_to_msecs(jiffies - rq->emitted_jiffies), name); @@ -1334,25 +1332,26 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, &engine->execlists; u64 addr; - if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7)) - drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID)); + if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7)) + drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); drm_printf(m, "\tRING_START: 0x%08x\n", - I915_READ(RING_START(engine->mmio_base))); + ENGINE_READ(engine, RING_START)); drm_printf(m, "\tRING_HEAD: 0x%08x\n", - I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR); + ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR); drm_printf(m, "\tRING_TAIL: 0x%08x\n", - I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR); + ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR); drm_printf(m, "\tRING_CTL: 0x%08x%s\n", - I915_READ(RING_CTL(engine->mmio_base)), - I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); if (INTEL_GEN(engine->i915) > 2) { drm_printf(m, "\tRING_MODE: 0x%08x%s\n", - I915_READ(RING_MI_MODE(engine->mmio_base)), - I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); + ENGINE_READ(engine, RING_MI_MODE), + ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); } if (INTEL_GEN(dev_priv) >= 6) { - drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); + drm_printf(m, "\tRING_IMR: %08x\n", + ENGINE_READ(engine, RING_IMR)); } addr = intel_engine_get_active_head(engine); @@ -1362,22 +1361,21 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); if (INTEL_GEN(dev_priv) >= 8) - addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base), - RING_DMA_FADD_UDW(engine->mmio_base)); + addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); else if (INTEL_GEN(dev_priv) >= 4) - addr = I915_READ(RING_DMA_FADD(engine->mmio_base)); + addr = ENGINE_READ(engine, RING_DMA_FADD); else - addr = I915_READ(DMA_FADD_I8XX); + addr = ENGINE_READ(engine, DMA_FADD_I8XX); drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); if (INTEL_GEN(dev_priv) >= 4) { drm_printf(m, "\tIPEIR: 0x%08x\n", - I915_READ(RING_IPEIR(engine->mmio_base))); + ENGINE_READ(engine, RING_IPEIR)); drm_printf(m, "\tIPEHR: 0x%08x\n", - I915_READ(RING_IPEHR(engine->mmio_base))); + ENGINE_READ(engine, RING_IPEHR)); } else { - drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR)); - drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR)); + drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR)); + drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); } if (HAS_EXECLISTS(dev_priv)) { @@ -1387,15 +1385,15 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, u8 read, write; drm_printf(m, "\tExeclist status: 0x%08x %08x\n", - I915_READ(RING_EXECLIST_STATUS_LO(engine)), - I915_READ(RING_EXECLIST_STATUS_HI(engine))); + ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), + ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); read = execlists->csb_head; write = READ_ONCE(*execlists->csb_write); drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n", read, write, - GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))), + GEN8_CSB_WRITE_PTR(ENGINE_READ(engine, RING_CONTEXT_STATUS_PTR)), yesno(test_bit(TASKLET_STATE_SCHED, &engine->execlists.tasklet.state)), enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); @@ -1410,9 +1408,13 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n", idx, hws[idx * 2], - I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), + ENGINE_READ_IDX(engine, + RING_CONTEXT_STATUS_BUF_LO, + idx), hws[idx * 2 + 1], - I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); + ENGINE_READ_IDX(engine, + RING_CONTEXT_STATUS_BUF_HI, + idx)); } rcu_read_lock(); @@ -1425,10 +1427,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, char hdr[80]; snprintf(hdr, sizeof(hdr), - "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ", + "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", idx, count, i915_ggtt_offset(rq->ring->vma), - rq->timeline->hwsp_offset); + rq->timeline->hwsp_offset, + hwsp_seqno(rq)); print_request(m, rq, hdr); } else { drm_printf(m, "\t\tELSP[%d] idle\n", idx); @@ -1438,11 +1441,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine, rcu_read_unlock(); } else if (INTEL_GEN(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", - I915_READ(RING_PP_DIR_BASE(engine))); + ENGINE_READ(engine, RING_PP_DIR_BASE)); drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", - I915_READ(RING_PP_DIR_BASE_READ(engine))); + ENGINE_READ(engine, RING_PP_DIR_BASE_READ)); drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", - I915_READ(RING_PP_DIR_DCLV(engine))); + ENGINE_READ(engine, RING_PP_DIR_DCLV)); } } @@ -1495,13 +1498,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, va_end(ap); } - if (i915_terminally_wedged(&engine->i915->gpu_error)) + if (i915_reset_failed(engine->i915)) drm_printf(m, "*** WEDGED ***\n"); - drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n", - intel_engine_get_seqno(engine), - intel_engine_last_submit(engine), - engine->hangcheck.seqno, + drm_printf(m, "\tHangcheck %x:%x [%d ms]\n", + engine->hangcheck.last_seqno, + engine->hangcheck.next_seqno, jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); drm_printf(m, "\tReset count: %d (global %d)\n", i915_reset_engine_count(error, engine), @@ -1521,7 +1523,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, if (&rq->link != &engine->timeline.requests) print_request(m, rq, "\t\tlast "); - rq = i915_gem_find_active_request(engine); + rq = intel_engine_find_active_request(engine); if (rq) { print_request(m, rq, "\t\tactive "); @@ -1688,6 +1690,50 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine) write_sequnlock_irqrestore(&engine->stats.lock, flags); } +static bool match_ring(struct i915_request *rq) +{ + u32 ring = ENGINE_READ(rq->engine, RING_START); + + return ring == i915_ggtt_offset(rq->ring->vma); +} + +struct i915_request * +intel_engine_find_active_request(struct intel_engine_cs *engine) +{ + struct i915_request *request, *active = NULL; + unsigned long flags; + + /* + * We are called by the error capture, reset and to dump engine + * state at random points in time. In particular, note that neither is + * crucially ordered with an interrupt. After a hang, the GPU is dead + * and we assume that no more writes can happen (we waited long enough + * for all writes that were in transaction to be flushed) - adding an + * extra delay for a recent interrupt is pointless. Hence, we do + * not need an engine->irq_seqno_barrier() before the seqno reads. + * At all other times, we must assume the GPU is still running, but + * we only care about the snapshot of this moment. + */ + spin_lock_irqsave(&engine->timeline.lock, flags); + list_for_each_entry(request, &engine->timeline.requests, link) { + if (i915_request_completed(request)) + continue; + + if (!i915_request_started(request)) + break; + + /* More than one preemptible request may match! */ + if (!match_ring(request)) + break; + + active = request; + break; + } + spin_unlock_irqrestore(&engine->timeline.lock, flags); + + return active; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_engine.c" #include "selftests/intel_engine_cs.c" diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h new file mode 100644 index 000000000000..b3249bf6a65f --- /dev/null +++ b/drivers/gpu/drm/i915/intel_engine_types.h @@ -0,0 +1,527 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_ENGINE_TYPES__ +#define __INTEL_ENGINE_TYPES__ + +#include <linux/hashtable.h> +#include <linux/irq_work.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/types.h> + +#include "i915_timeline_types.h" +#include "intel_device_info.h" +#include "intel_workarounds_types.h" + +#include "i915_gem_batch_pool.h" +#include "i915_pmu.h" + +#define I915_MAX_SLICES 3 +#define I915_MAX_SUBSLICES 8 + +#define I915_CMD_HASH_ORDER 9 + +struct drm_i915_reg_table; +struct i915_gem_context; +struct i915_request; +struct i915_sched_attr; +struct intel_uncore; + +struct intel_hw_status_page { + struct i915_vma *vma; + u32 *addr; +}; + +struct intel_instdone { + u32 instdone; + /* The following exist only in the RCS engine */ + u32 slice_common; + u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; + u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; +}; + +struct intel_engine_hangcheck { + u64 acthd; + u32 last_seqno; + u32 next_seqno; + unsigned long action_timestamp; + struct intel_instdone instdone; +}; + +struct intel_ring { + struct kref ref; + struct i915_vma *vma; + void *vaddr; + + struct i915_timeline *timeline; + struct list_head request_list; + struct list_head active_link; + + u32 head; + u32 tail; + u32 emit; + + u32 space; + u32 size; + u32 effective_size; +}; + +/* + * we use a single page to load ctx workarounds so all of these + * values are referred in terms of dwords + * + * struct i915_wa_ctx_bb: + * offset: specifies batch starting position, also helpful in case + * if we want to have multiple batches at different offsets based on + * some criteria. It is not a requirement at the moment but provides + * an option for future use. + * size: size of the batch in DWORDS + */ +struct i915_ctx_workarounds { + struct i915_wa_ctx_bb { + u32 offset; + u32 size; + } indirect_ctx, per_ctx; + struct i915_vma *vma; +}; + +#define I915_MAX_VCS 4 +#define I915_MAX_VECS 2 + +/* + * Engine IDs definitions. + * Keep instances of the same type engine together. + */ +enum intel_engine_id { + RCS0 = 0, + BCS0, + VCS0, + VCS1, + VCS2, + VCS3, +#define _VCS(n) (VCS0 + (n)) + VECS0, + VECS1 +#define _VECS(n) (VECS0 + (n)) +}; + +struct st_preempt_hang { + struct completion completion; + unsigned int count; + bool inject_hang; +}; + +/** + * struct intel_engine_execlists - execlist submission queue and port state + * + * The struct intel_engine_execlists represents the combined logical state of + * driver and the hardware state for execlist mode of submission. + */ +struct intel_engine_execlists { + /** + * @tasklet: softirq tasklet for bottom handler + */ + struct tasklet_struct tasklet; + + /** + * @default_priolist: priority list for I915_PRIORITY_NORMAL + */ + struct i915_priolist default_priolist; + + /** + * @no_priolist: priority lists disabled + */ + bool no_priolist; + + /** + * @submit_reg: gen-specific execlist submission register + * set to the ExecList Submission Port (elsp) register pre-Gen11 and to + * the ExecList Submission Queue Contents register array for Gen11+ + */ + u32 __iomem *submit_reg; + + /** + * @ctrl_reg: the enhanced execlists control register, used to load the + * submit queue on the HW and to request preemptions to idle + */ + u32 __iomem *ctrl_reg; + + /** + * @port: execlist port states + * + * For each hardware ELSP (ExecList Submission Port) we keep + * track of the last request and the number of times we submitted + * that port to hw. We then count the number of times the hw reports + * a context completion or preemption. As only one context can + * be active on hw, we limit resubmission of context to port[0]. This + * is called Lite Restore, of the context. + */ + struct execlist_port { + /** + * @request_count: combined request and submission count + */ + struct i915_request *request_count; +#define EXECLIST_COUNT_BITS 2 +#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) +#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) +#define port_set(p, packed) ((p)->request_count = (packed)) +#define port_isset(p) ((p)->request_count) +#define port_index(p, execlists) ((p) - (execlists)->port) + + /** + * @context_id: context ID for port + */ + GEM_DEBUG_DECL(u32 context_id); + +#define EXECLIST_MAX_PORTS 2 + } port[EXECLIST_MAX_PORTS]; + + /** + * @active: is the HW active? We consider the HW as active after + * submitting any context for execution and until we have seen the + * last context completion event. After that, we do not expect any + * more events until we submit, and so can park the HW. + * + * As we have a small number of different sources from which we feed + * the HW, we track the state of each inside a single bitfield. + */ + unsigned int active; +#define EXECLISTS_ACTIVE_USER 0 +#define EXECLISTS_ACTIVE_PREEMPT 1 +#define EXECLISTS_ACTIVE_HWACK 2 + + /** + * @port_mask: number of execlist ports - 1 + */ + unsigned int port_mask; + + /** + * @queue_priority_hint: Highest pending priority. + * + * When we add requests into the queue, or adjust the priority of + * executing requests, we compute the maximum priority of those + * pending requests. We can then use this value to determine if + * we need to preempt the executing requests to service the queue. + * However, since the we may have recorded the priority of an inflight + * request we wanted to preempt but since completed, at the time of + * dequeuing the priority hint may no longer may match the highest + * available request priority. + */ + int queue_priority_hint; + + /** + * @queue: queue of requests, in priority lists + */ + struct rb_root_cached queue; + + /** + * @csb_write: control register for Context Switch buffer + * + * Note this register may be either mmio or HWSP shadow. + */ + u32 *csb_write; + + /** + * @csb_status: status array for Context Switch buffer + * + * Note these register may be either mmio or HWSP shadow. + */ + u32 *csb_status; + + /** + * @preempt_complete_status: expected CSB upon completing preemption + */ + u32 preempt_complete_status; + + /** + * @csb_head: context status buffer head + */ + u8 csb_head; + + I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) +}; + +#define INTEL_ENGINE_CS_MAX_NAME 8 + +struct intel_engine_cs { + struct drm_i915_private *i915; + struct intel_uncore *uncore; + char name[INTEL_ENGINE_CS_MAX_NAME]; + + enum intel_engine_id id; + unsigned int hw_id; + unsigned int guc_id; + intel_engine_mask_t mask; + + u8 uabi_class; + + u8 class; + u8 instance; + u32 context_size; + u32 mmio_base; + + struct intel_ring *buffer; + + struct i915_timeline timeline; + + struct intel_context *kernel_context; /* pinned */ + struct intel_context *preempt_context; /* pinned; optional */ + + struct drm_i915_gem_object *default_state; + void *pinned_default_state; + + /* Rather than have every client wait upon all user interrupts, + * with the herd waking after every interrupt and each doing the + * heavyweight seqno dance, we delegate the task (of being the + * bottom-half of the user interrupt) to the first client. After + * every interrupt, we wake up one client, who does the heavyweight + * coherent seqno read and either goes back to sleep (if incomplete), + * or wakes up all the completed clients in parallel, before then + * transferring the bottom-half status to the next client in the queue. + * + * Compared to walking the entire list of waiters in a single dedicated + * bottom-half, we reduce the latency of the first waiter by avoiding + * a context switch, but incur additional coherent seqno reads when + * following the chain of request breadcrumbs. Since it is most likely + * that we have a single client waiting on each seqno, then reducing + * the overhead of waking that client is much preferred. + */ + struct intel_breadcrumbs { + spinlock_t irq_lock; + struct list_head signalers; + + struct irq_work irq_work; /* for use from inside irq_lock */ + + unsigned int irq_enabled; + + bool irq_armed; + } breadcrumbs; + + struct intel_engine_pmu { + /** + * @enable: Bitmask of enable sample events on this engine. + * + * Bits correspond to sample event types, for instance + * I915_SAMPLE_QUEUED is bit 0 etc. + */ + u32 enable; + /** + * @enable_count: Reference count for the enabled samplers. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. + */ + unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; + /** + * @sample: Counter values for sampling events. + * + * Our internal timer stores the current counters in this field. + * + * Index number corresponds to @enum drm_i915_pmu_engine_sample. + */ + struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; + } pmu; + + /* + * A pool of objects to use as shadow copies of client batch buffers + * when the command parser is enabled. Prevents the client from + * modifying the batch contents after software parsing. + */ + struct i915_gem_batch_pool batch_pool; + + struct intel_hw_status_page status_page; + struct i915_ctx_workarounds wa_ctx; + struct i915_wa_list ctx_wa_list; + struct i915_wa_list wa_list; + struct i915_wa_list whitelist; + + u32 irq_keep_mask; /* always keep these interrupts */ + u32 irq_enable_mask; /* bitmask to enable ring interrupt */ + void (*irq_enable)(struct intel_engine_cs *engine); + void (*irq_disable)(struct intel_engine_cs *engine); + + int (*init_hw)(struct intel_engine_cs *engine); + + struct { + void (*prepare)(struct intel_engine_cs *engine); + void (*reset)(struct intel_engine_cs *engine, bool stalled); + void (*finish)(struct intel_engine_cs *engine); + } reset; + + void (*park)(struct intel_engine_cs *engine); + void (*unpark)(struct intel_engine_cs *engine); + + void (*set_default_submission)(struct intel_engine_cs *engine); + + const struct intel_context_ops *cops; + + int (*request_alloc)(struct i915_request *rq); + int (*init_context)(struct i915_request *rq); + + int (*emit_flush)(struct i915_request *request, u32 mode); +#define EMIT_INVALIDATE BIT(0) +#define EMIT_FLUSH BIT(1) +#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) + int (*emit_bb_start)(struct i915_request *rq, + u64 offset, u32 length, + unsigned int dispatch_flags); +#define I915_DISPATCH_SECURE BIT(0) +#define I915_DISPATCH_PINNED BIT(1) + int (*emit_init_breadcrumb)(struct i915_request *rq); + u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, + u32 *cs); + unsigned int emit_fini_breadcrumb_dw; + + /* Pass the request to the hardware queue (e.g. directly into + * the legacy ringbuffer or to the end of an execlist). + * + * This is called from an atomic context with irqs disabled; must + * be irq safe. + */ + void (*submit_request)(struct i915_request *rq); + + /* + * Call when the priority on a request has changed and it and its + * dependencies may need rescheduling. Note the request itself may + * not be ready to run! + */ + void (*schedule)(struct i915_request *request, + const struct i915_sched_attr *attr); + + /* + * Cancel all requests on the hardware, or queued for execution. + * This should only cancel the ready requests that have been + * submitted to the engine (via the engine->submit_request callback). + * This is called when marking the device as wedged. + */ + void (*cancel_requests)(struct intel_engine_cs *engine); + + void (*cleanup)(struct intel_engine_cs *engine); + + struct intel_engine_execlists execlists; + + /* Contexts are pinned whilst they are active on the GPU. The last + * context executed remains active whilst the GPU is idle - the + * switch away and write to the context object only occurs on the + * next execution. Contexts are only unpinned on retirement of the + * following request ensuring that we can always write to the object + * on the context switch even after idling. Across suspend, we switch + * to the kernel context and trash it as the save may not happen + * before the hardware is powered down. + */ + struct intel_context *last_retired_context; + + /* status_notifier: list of callbacks for context-switch changes */ + struct atomic_notifier_head context_status_notifier; + + struct intel_engine_hangcheck hangcheck; + +#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) +#define I915_ENGINE_SUPPORTS_STATS BIT(1) +#define I915_ENGINE_HAS_PREEMPTION BIT(2) +#define I915_ENGINE_HAS_SEMAPHORES BIT(3) + unsigned int flags; + + /* + * Table of commands the command parser needs to know about + * for this engine. + */ + DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); + + /* + * Table of registers allowed in commands that read/write registers. + */ + const struct drm_i915_reg_table *reg_tables; + int reg_table_count; + + /* + * Returns the bitmask for the length field of the specified command. + * Return 0 for an unrecognized/invalid command. + * + * If the command parser finds an entry for a command in the engine's + * cmd_tables, it gets the command's length based on the table entry. + * If not, it calls this function to determine the per-engine length + * field encoding for the command (i.e. different opcode ranges use + * certain bits to encode the command length in the header). + */ + u32 (*get_cmd_length_mask)(u32 cmd_header); + + struct { + /** + * @lock: Lock protecting the below fields. + */ + seqlock_t lock; + /** + * @enabled: Reference count indicating number of listeners. + */ + unsigned int enabled; + /** + * @active: Number of contexts currently scheduled in. + */ + unsigned int active; + /** + * @enabled_at: Timestamp when busy stats were enabled. + */ + ktime_t enabled_at; + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases + * where engine is currently busy (active > 0). + */ + ktime_t total; + } stats; +}; + +static inline bool +intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; +} + +static inline bool +intel_engine_supports_stats(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_SUPPORTS_STATS; +} + +static inline bool +intel_engine_has_preemption(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_HAS_PREEMPTION; +} + +static inline bool +intel_engine_has_semaphores(const struct intel_engine_cs *engine) +{ + return engine->flags & I915_ENGINE_HAS_SEMAPHORES; +} + +#define instdone_slice_mask(dev_priv__) \ + (IS_GEN(dev_priv__, 7) ? \ + 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) + +#define instdone_subslice_mask(dev_priv__) \ + (IS_GEN(dev_priv__, 7) ? \ + 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) + +#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ + for ((slice__) = 0, (subslice__) = 0; \ + (slice__) < I915_MAX_SLICES; \ + (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ + (slice__) += ((subslice__) == 0)) \ + for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ + (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) + +#endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 656e684e7c9a..43fe08be3b7d 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -108,7 +108,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) I915_WRITE(FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, FBC_STATUS, FBC_STAT_COMPRESSING, 0, 10)) { DRM_DEBUG_KMS("FBC idle timed out\n"); diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h index b96a31bc1080..a34ece53a771 100644 --- a/drivers/gpu/drm/i915/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/intel_gpu_commands.h @@ -105,8 +105,13 @@ #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ -#define MI_SEMAPHORE_POLL (1<<15) -#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12) +#define MI_SEMAPHORE_POLL (1 << 15) +#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) +#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) +#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12) +#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12) +#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12) +#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12) #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c index 8660af3fd755..3aabfa2d9198 100644 --- a/drivers/gpu/drm/i915/intel_guc.c +++ b/drivers/gpu/drm/i915/intel_guc.c @@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); for (i = 0; i < guc->send_regs.count; i++) { - fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, guc_send_reg(guc, i), FW_REG_READ | FW_REG_WRITE); } @@ -203,11 +203,19 @@ int intel_guc_init(struct intel_guc *guc) goto err_log; GEM_BUG_ON(!guc->ads_vma); + if (HAS_GUC_CT(dev_priv)) { + ret = intel_guc_ct_init(&guc->ct); + if (ret) + goto err_ads; + } + /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(dev_priv); return 0; +err_ads: + intel_guc_ads_destroy(guc); err_log: intel_guc_log_destroy(&guc->log); err_shared: @@ -222,6 +230,10 @@ void intel_guc_fini(struct intel_guc *guc) struct drm_i915_private *dev_priv = guc_to_i915(guc); i915_ggtt_disable_guc(dev_priv); + + if (HAS_GUC_CT(dev_priv)) + intel_guc_ct_fini(&guc->ct); + intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); guc_shared_data_destroy(guc); @@ -357,14 +369,14 @@ void intel_guc_init_params(struct intel_guc *guc) * they are power context saved so it's ok to release forcewake * when we are done here and take it again at xfer time. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER); I915_WRITE(SOFT_SCRATCH(0), 0); for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) I915_WRITE(SOFT_SCRATCH(1 + i), params[i]); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER); } int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len, @@ -386,6 +398,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, u32 *response_buf, u32 response_buf_size) { struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_uncore *uncore = &dev_priv->uncore; u32 status; int i; int ret; @@ -402,12 +415,12 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER); mutex_lock(&guc->send_mutex); - intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains); + intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains); for (i = 0; i < len; i++) - I915_WRITE(guc_send_reg(guc, i), action[i]); + intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]); - POSTING_READ(guc_send_reg(guc, i - 1)); + intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1)); intel_guc_notify(guc); @@ -415,7 +428,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, * No GuC command should ever take longer than 10ms. * Fast commands should still complete in 10us. */ - ret = __intel_wait_for_register_fw(dev_priv, + ret = __intel_wait_for_register_fw(uncore, guc_send_reg(guc, 0), INTEL_GUC_MSG_TYPE_MASK, INTEL_GUC_MSG_TYPE_RESPONSE << @@ -442,7 +455,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, ret = INTEL_GUC_MSG_TO_DATA(status); out: - intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains); + intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains); mutex_unlock(&guc->send_mutex); return ret; @@ -472,17 +485,25 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc) spin_unlock(&guc->irq_lock); enable_rpm_wakeref_asserts(dev_priv); - intel_guc_to_host_process_recv_msg(guc, msg); + intel_guc_to_host_process_recv_msg(guc, &msg, 1); } -void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg) +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len) { + u32 msg; + + if (unlikely(!len)) + return -EPROTO; + /* Make sure to handle only enabled messages */ - msg &= guc->msg_enabled_mask; + msg = payload[0] & guc->msg_enabled_mask; if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) intel_guc_log_handle_flush_event(&guc->log); + + return 0; } int intel_guc_sample_forcewake(struct intel_guc *guc) @@ -544,7 +565,7 @@ static int guc_sleep_state_action(struct intel_guc *guc, if (ret) return ret; - ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14), + ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK, 0, 0, 10, &status); if (ret) diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 744220296653..2c59ff8d9f39 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -32,6 +32,7 @@ #include "intel_guc_log.h" #include "intel_guc_reg.h" #include "intel_uc_fw.h" +#include "i915_utils.h" #include "i915_vma.h" struct guc_preempt_work { @@ -164,7 +165,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len, void intel_guc_to_host_event_handler(struct intel_guc *guc); void intel_guc_to_host_event_handler_nop(struct intel_guc *guc); void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc); -void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg); +int intel_guc_to_host_process_recv_msg(struct intel_guc *guc, + const u32 *payload, u32 len); int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); int intel_guc_suspend(struct intel_guc *guc); diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c index f0db62887f50..bec62f34b15a 100644 --- a/drivers/gpu/drm/i915/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/intel_guc_ads.c @@ -121,8 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc) * to find it. Note that we have to skip our header (1 page), * because our GuC shared data is there. */ - kernel_ctx_vma = to_intel_context(dev_priv->kernel_context, - dev_priv->engine[RCS])->state; + kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state; blob->ads.golden_context_lrca = intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset; diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c index a52883e9146f..dde1dc0d6e69 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.c +++ b/drivers/gpu/drm/i915/intel_guc_ct.c @@ -140,11 +140,6 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc, return err; } -static bool ctch_is_open(struct intel_guc_ct_channel *ctch) -{ - return ctch->vma != NULL; -} - static int ctch_init(struct intel_guc *guc, struct intel_guc_ct_channel *ctch) { @@ -214,25 +209,21 @@ err_out: static void ctch_fini(struct intel_guc *guc, struct intel_guc_ct_channel *ctch) { + GEM_BUG_ON(ctch->enabled); + i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP); } -static int ctch_open(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +static int ctch_enable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) { u32 base; int err; int i; - CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n", - ctch->owner, yesno(ctch_is_open(ctch))); + GEM_BUG_ON(!ctch->vma); - if (!ctch->vma) { - err = ctch_init(guc, ctch); - if (unlikely(err)) - goto err_out; - GEM_BUG_ON(!ctch->vma); - } + GEM_BUG_ON(ctch->enabled); /* vma should be already allocated and map'ed */ base = intel_guc_ggtt_offset(guc, ctch->vma); @@ -255,7 +246,7 @@ static int ctch_open(struct intel_guc *guc, base + PAGE_SIZE/4 * CTB_RECV, INTEL_GUC_CT_BUFFER_TYPE_RECV); if (unlikely(err)) - goto err_fini; + goto err_out; err = guc_action_register_ct_buffer(guc, base + PAGE_SIZE/4 * CTB_SEND, @@ -263,23 +254,25 @@ static int ctch_open(struct intel_guc *guc, if (unlikely(err)) goto err_deregister; + ctch->enabled = true; + return 0; err_deregister: guc_action_deregister_ct_buffer(guc, ctch->owner, INTEL_GUC_CT_BUFFER_TYPE_RECV); -err_fini: - ctch_fini(guc, ctch); err_out: DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err); return err; } -static void ctch_close(struct intel_guc *guc, - struct intel_guc_ct_channel *ctch) +static void ctch_disable(struct intel_guc *guc, + struct intel_guc_ct_channel *ctch) { - GEM_BUG_ON(!ctch_is_open(ctch)); + GEM_BUG_ON(!ctch->enabled); + + ctch->enabled = false; guc_action_deregister_ct_buffer(guc, ctch->owner, @@ -287,7 +280,6 @@ static void ctch_close(struct intel_guc *guc, guc_action_deregister_ct_buffer(guc, ctch->owner, INTEL_GUC_CT_BUFFER_TYPE_RECV); - ctch_fini(guc, ctch); } static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch) @@ -481,7 +473,7 @@ static int ctch_send(struct intel_guc_ct *ct, u32 fence; int err; - GEM_BUG_ON(!ctch_is_open(ctch)); + GEM_BUG_ON(!ctch->enabled); GEM_BUG_ON(!len); GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK); GEM_BUG_ON(!response_buf && response_buf_size); @@ -709,14 +701,15 @@ static void ct_process_request(struct intel_guc_ct *ct, u32 action, u32 len, const u32 *payload) { struct intel_guc *guc = ct_to_guc(ct); + int ret; CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload); switch (action) { case INTEL_GUC_ACTION_DEFAULT: - if (unlikely(len < 1)) + ret = intel_guc_to_host_process_recv_msg(guc, payload, len); + if (unlikely(ret)) goto fail_unexpected; - intel_guc_to_host_process_recv_msg(guc, *payload); break; default: @@ -817,7 +810,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct) u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */ int err = 0; - if (!ctch_is_open(ctch)) + if (!ctch->enabled) return; do { @@ -849,6 +842,51 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc) } /** + * intel_guc_ct_init - Init CT communication + * @ct: pointer to CT struct + * + * Allocate memory required for communication via + * the CT channel. + * + * Shall only be called for platforms with HAS_GUC_CT. + * + * Return: 0 on success, a negative errno code on failure. + */ +int intel_guc_ct_init(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + int err; + + err = ctch_init(guc, ctch); + if (unlikely(err)) { + DRM_ERROR("CT: can't open channel %d; err=%d\n", + ctch->owner, err); + return err; + } + + GEM_BUG_ON(!ctch->vma); + return 0; +} + +/** + * intel_guc_ct_fini - Fini CT communication + * @ct: pointer to CT struct + * + * Deallocate memory required for communication via + * the CT channel. + * + * Shall only be called for platforms with HAS_GUC_CT. + */ +void intel_guc_ct_fini(struct intel_guc_ct *ct) +{ + struct intel_guc *guc = ct_to_guc(ct); + struct intel_guc_ct_channel *ctch = &ct->host_channel; + + ctch_fini(guc, ctch); +} + +/** * intel_guc_ct_enable - Enable buffer based command transport. * @ct: pointer to CT struct * @@ -865,7 +903,10 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct) GEM_BUG_ON(!HAS_GUC_CT(i915)); - err = ctch_open(guc, ctch); + if (ctch->enabled) + return 0; + + err = ctch_enable(guc, ctch); if (unlikely(err)) return err; @@ -890,10 +931,10 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct) GEM_BUG_ON(!HAS_GUC_CT(i915)); - if (!ctch_is_open(ctch)) + if (!ctch->enabled) return; - ctch_close(guc, ctch); + ctch_disable(guc, ctch); /* Disable send */ guc->send = intel_guc_send_nop; diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h index d774895ab143..f5e7f0663304 100644 --- a/drivers/gpu/drm/i915/intel_guc_ct.h +++ b/drivers/gpu/drm/i915/intel_guc_ct.h @@ -66,6 +66,7 @@ struct intel_guc_ct_channel { struct intel_guc_ct_buffer ctbs[2]; u32 owner; u32 next_fence; + bool enabled; }; /** Holds all command transport channels. @@ -90,6 +91,8 @@ struct intel_guc_ct { }; void intel_guc_ct_init_early(struct intel_guc_ct *ct); +int intel_guc_ct_init(struct intel_guc_ct *ct); +void intel_guc_ct_fini(struct intel_guc_ct *ct); int intel_guc_ct_enable(struct intel_guc_ct *ct); void intel_guc_ct_disable(struct intel_guc_ct *ct); diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index 13ff7003c6be..792a551450c7 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -241,7 +241,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); guc_prepare_xfer(guc); @@ -254,7 +254,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma) ret = guc_xfer_ucode(guc, vma); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); return ret; } diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 806fdfd7c78a..7146524264dd 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -620,7 +620,12 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) void intel_guc_log_relay_close(struct intel_guc_log *log) { + struct intel_guc *guc = log_to_guc(log); + struct drm_i915_private *i915 = guc_to_i915(guc); + guc_log_disable_flush_events(log); + synchronize_irq(i915->drm.irq); + flush_work(&log->relay.flush_work); mutex_lock(&log->relay.lock); diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c index 8bc8aa54aa35..c4ad73980988 100644 --- a/drivers/gpu/drm/i915/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/intel_guc_submission.c @@ -382,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client) desc->db_id = client->doorbell_id; for_each_engine_masked(engine, dev_priv, client->engines, tmp) { - struct intel_context *ce = to_intel_context(ctx, engine); + struct intel_context *ce = intel_context_lookup(ctx, engine); u32 guc_engine_id = engine->guc_id; struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id]; @@ -393,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client) * for now who owns a GuC client. But for future owner of GuC * client, need to make sure lrc is pinned prior to enter here. */ - if (!ce->state) + if (!ce || !ce->state) break; /* XXX: continue? */ /* @@ -535,7 +535,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) spin_lock(&client->wq_lock); guc_wq_item_append(client, engine->guc_id, ctx_desc, - ring_tail, rq->global_seqno); + ring_tail, rq->fence.seqno); guc_ring_doorbell(client); client->submissions[engine->id] += 1; @@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work) preempt_work[engine->id]); struct intel_guc_client *client = guc->preempt_client; struct guc_stage_desc *stage_desc = __get_stage_desc(client); - struct intel_context *ce = to_intel_context(client->owner, engine); + struct intel_context *ce = intel_context_lookup(client->owner, engine); u32 data[7]; if (!ce->ring->emit) { /* recreate upon load/resume */ @@ -575,7 +575,7 @@ static void inject_preempt_context(struct work_struct *work) u32 *cs; cs = ce->ring->vaddr; - if (engine->id == RCS) { + if (engine->class == RENDER_CLASS) { cs = gen8_emit_ggtt_write_rcs(cs, GUC_PREEMPT_FINISHED, addr, @@ -583,7 +583,8 @@ static void inject_preempt_context(struct work_struct *work) } else { cs = gen8_emit_ggtt_write(cs, GUC_PREEMPT_FINISHED, - addr); + addr, + 0); *cs++ = MI_NOOP; *cs++ = MI_NOOP; } @@ -720,7 +721,7 @@ static inline int rq_prio(const struct i915_request *rq) static inline int port_prio(const struct execlist_port *port) { - return rq_prio(port_request(port)); + return rq_prio(port_request(port)) | __NO_PREEMPTION; } static bool __guc_dequeue(struct intel_engine_cs *engine) @@ -781,8 +782,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } done: execlists->queue_priority_hint = @@ -1031,7 +1031,7 @@ static int guc_clients_create(struct intel_guc *guc) GEM_BUG_ON(guc->preempt_client); client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, + INTEL_INFO(dev_priv)->engine_mask, GUC_CLIENT_PRIORITY_KMD_NORMAL, dev_priv->kernel_context); if (IS_ERR(client)) { @@ -1042,7 +1042,7 @@ static int guc_clients_create(struct intel_guc *guc) if (dev_priv->preempt_context) { client = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, + INTEL_INFO(dev_priv)->engine_mask, GUC_CLIENT_PRIORITY_KMD_HIGH, dev_priv->preempt_context); if (IS_ERR(client)) { diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index a219c796e56d..59232df11ada 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -56,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) int slice; int subslice; - if (engine->id != RCS) + if (engine->id != RCS0) return true; intel_engine_get_instdone(engine, &instdone); @@ -118,11 +118,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd) * and break the hang. This should work on * all but the second generation chipsets. */ - tmp = I915_READ_CTL(engine); + tmp = ENGINE_READ(engine, RING_CTL); if (tmp & RING_WAIT) { - i915_handle_error(dev_priv, BIT(engine->id), 0, + i915_handle_error(dev_priv, engine->mask, 0, "stuck wait on %s", engine->name); - I915_WRITE_CTL(engine, tmp); + ENGINE_WRITE(engine, RING_CTL, tmp); return ENGINE_WAIT_KICK; } @@ -133,21 +133,21 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine, struct hangcheck *hc) { hc->acthd = intel_engine_get_active_head(engine); - hc->seqno = intel_engine_get_seqno(engine); + hc->seqno = intel_engine_get_hangcheck_seqno(engine); } static void hangcheck_store_sample(struct intel_engine_cs *engine, const struct hangcheck *hc) { engine->hangcheck.acthd = hc->acthd; - engine->hangcheck.seqno = hc->seqno; + engine->hangcheck.last_seqno = hc->seqno; } static enum intel_engine_hangcheck_action hangcheck_get_action(struct intel_engine_cs *engine, const struct hangcheck *hc) { - if (engine->hangcheck.seqno != hc->seqno) + if (engine->hangcheck.last_seqno != hc->seqno) return ENGINE_ACTIVE_SEQNO; if (intel_engine_is_idle(engine)) @@ -263,14 +263,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!READ_ONCE(dev_priv->gt.awake)) return; - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_terminally_wedged(dev_priv)) return; /* As enabling the GPU requires fairly extensive mmio access, * periodically arm the mmio checker to see if we are triggering * any invalid access. */ - intel_uncore_arm_unclaimed_mmio_detection(dev_priv); + intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); for_each_engine(engine, dev_priv, id) { struct hangcheck hc; @@ -282,13 +282,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work) hangcheck_store_sample(engine, &hc); if (hc.stalled) { - hung |= intel_engine_flag(engine); + hung |= engine->mask; if (hc.action != ENGINE_DEAD) - stuck |= intel_engine_flag(engine); + stuck |= engine->mask; } if (hc.wedged) - wedged |= intel_engine_flag(engine); + wedged |= engine->mask; } if (GEM_SHOW_DEBUG() && (hung | stuck)) { diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index ce7ba3a9c000..86965fa37739 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -7,14 +7,17 @@ */ #include <drm/drm_hdcp.h> +#include <drm/i915_component.h> #include <linux/i2c.h> #include <linux/random.h> +#include <linux/component.h> #include "intel_drv.h" #include "i915_reg.h" #define KEY_LOAD_TRIES 5 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50 +#define HDCP2_LC_RETRY_CNT 3 static bool intel_hdcp_is_ksv_valid(u8 *ksv) @@ -72,6 +75,52 @@ bool intel_hdcp_capable(struct intel_connector *connector) return capable; } +/* Is HDCP2.2 capable on Platform and Sink */ +static bool intel_hdcp2_capable(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + bool capable = false; + + /* I915 support for HDCP2.2 */ + if (!hdcp->hdcp2_supported) + return false; + + /* MEI interface is solid */ + mutex_lock(&dev_priv->hdcp_comp_mutex); + if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return false; + } + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + /* Sink's capability for HDCP2.2 */ + hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable); + + return capable; +} + +static inline bool intel_hdcp_in_use(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum port port = connector->encoder->port; + u32 reg; + + reg = I915_READ(PORT_HDCP_STATUS(port)); + return reg & HDCP_STATUS_ENC; +} + +static inline bool intel_hdcp2_in_use(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + enum port port = connector->encoder->port; + u32 reg; + + reg = I915_READ(HDCP2_STATUS_DDI(port)); + return reg & LINK_ENCRYPTION_STATUS; +} + static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, const struct intel_hdcp_shim *shim) { @@ -176,7 +225,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) } /* Wait for the keys to load (500us) */ - ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS, + ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE, 10, 1, &val); if (ret) @@ -194,7 +243,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) { I915_WRITE(HDCP_SHA_TEXT, sha_text); - if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) { DRM_ERROR("Timed out waiting for SHA1 ready\n"); return -ETIMEDOUT; @@ -425,7 +474,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, /* Tell the HW we're done with the hash and wait for it to ACK */ I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH); - if (intel_wait_for_register(dev_priv, HDCP_REP_CTL, + if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL, HDCP_SHA1_COMPLETE, HDCP_SHA1_COMPLETE, 1)) { DRM_ERROR("Timed out waiting for SHA1 complete\n"); @@ -555,7 +604,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), + if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), HDCP_STATUS_AN_READY, HDCP_STATUS_AN_READY, 1)) { DRM_ERROR("Timed out waiting for An\n"); @@ -636,7 +685,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, } /* Wait for encryption confirmation */ - if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), + if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port), HDCP_STATUS_ENC, HDCP_STATUS_ENC, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Timed out waiting for encryption\n"); @@ -666,8 +715,10 @@ static int _intel_hdcp_disable(struct intel_connector *connector) DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); + hdcp->hdcp_encrypted = false; I915_WRITE(PORT_HDCP_CONF(port), 0); - if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0, + if (intel_wait_for_register(&dev_priv->uncore, + PORT_HDCP_STATUS(port), ~0, 0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; @@ -711,8 +762,10 @@ static int _intel_hdcp_enable(struct intel_connector *connector) /* Incase of authentication failures, HDCP spec expects reauth. */ for (i = 0; i < tries; i++) { ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim); - if (!ret) + if (!ret) { + hdcp->hdcp_encrypted = true; return 0; + } DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); @@ -730,16 +783,64 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) return container_of(hdcp, struct intel_connector, hdcp); } -static void intel_hdcp_check_work(struct work_struct *work) +/* Implements Part 3 of the HDCP authorization procedure */ +static int intel_hdcp_check_link(struct intel_connector *connector) { - struct intel_hdcp *hdcp = container_of(to_delayed_work(work), - struct intel_hdcp, - check_work); - struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + struct intel_hdcp *hdcp = &connector->hdcp; + struct drm_i915_private *dev_priv = connector->base.dev->dev_private; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + enum port port = intel_dig_port->base.port; + int ret = 0; - if (!intel_hdcp_check_link(connector)) - schedule_delayed_work(&hdcp->check_work, - DRM_HDCP_CHECK_PERIOD_MS); + mutex_lock(&hdcp->mutex); + + /* Check_link valid only when HDCP1.4 is enabled */ + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || + !hdcp->hdcp_encrypted) { + ret = -EINVAL; + goto out; + } + + if (WARN_ON(!intel_hdcp_in_use(connector))) { + DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n", + connector->base.name, connector->base.base.id, + I915_READ(PORT_HDCP_STATUS(port))); + ret = -ENXIO; + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + if (hdcp->shim->check_link(intel_dig_port)) { + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + } + goto out; + } + + DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", + connector->base.name, connector->base.base.id); + + ret = _intel_hdcp_disable(connector); + if (ret) { + DRM_ERROR("Failed to disable hdcp (%d)\n", ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + ret = _intel_hdcp_enable(connector); + if (ret) { + DRM_ERROR("Failed to enable hdcp (%d)\n", ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + +out: + mutex_unlock(&hdcp->mutex); + return ret; } static void intel_hdcp_prop_work(struct work_struct *work) @@ -773,14 +874,929 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) return INTEL_GEN(dev_priv) >= 9 && port < PORT_E; } +static int +hdcp2_prepare_ake_init(struct intel_connector *connector, + struct hdcp2_ake_init *ake_data) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data); + if (ret) + DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector, + struct hdcp2_ake_send_cert *rx_cert, + bool *paired, + struct hdcp2_ake_no_stored_km *ek_pub_km, + size_t *msg_sz) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data, + rx_cert, paired, + ek_pub_km, msg_sz); + if (ret < 0) + DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_verify_hprime(struct intel_connector *connector, + struct hdcp2_ake_send_hprime *rx_hprime) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime); + if (ret < 0) + DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_store_pairing_info(struct intel_connector *connector, + struct hdcp2_ake_send_pairing_info *pairing_info) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info); + if (ret < 0) + DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_prepare_lc_init(struct intel_connector *connector, + struct hdcp2_lc_init *lc_init) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init); + if (ret < 0) + DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_lprime(struct intel_connector *connector, + struct hdcp2_lc_send_lprime *rx_lprime) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime); + if (ret < 0) + DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_prepare_skey(struct intel_connector *connector, + struct hdcp2_ske_send_eks *ske_data) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data); + if (ret < 0) + DRM_DEBUG_KMS("Get session key failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack *rep_send_ack) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data, + rep_topology, + rep_send_ack); + if (ret < 0) + DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int +hdcp2_verify_mprime(struct intel_connector *connector, + struct hdcp2_rep_stream_ready *stream_ready) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready); + if (ret < 0) + DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_authenticate_port(struct intel_connector *connector) +{ + struct hdcp_port_data *data = &connector->hdcp.port_data; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data); + if (ret < 0) + DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_close_mei_session(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct i915_hdcp_comp_master *comp; + int ret; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + comp = dev_priv->hdcp_master; + + if (!comp || !comp->ops) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return -EINVAL; + } + + ret = comp->ops->close_hdcp_session(comp->mei_dev, + &connector->hdcp.port_data); + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return ret; +} + +static int hdcp2_deauthenticate_port(struct intel_connector *connector) +{ + return hdcp2_close_mei_session(connector); +} + +/* Authentication flow starts from here */ +static int hdcp2_authentication_key_exchange(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_ake_init ake_init; + struct hdcp2_ake_send_cert send_cert; + struct hdcp2_ake_no_stored_km no_stored_km; + struct hdcp2_ake_send_hprime send_hprime; + struct hdcp2_ake_send_pairing_info pairing_info; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + size_t size; + int ret; + + /* Init for seq_num */ + hdcp->seq_num_v = 0; + hdcp->seq_num_m = 0; + + ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init); + if (ret < 0) + return ret; + + ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init, + sizeof(msgs.ake_init)); + if (ret < 0) + return ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT, + &msgs.send_cert, sizeof(msgs.send_cert)); + if (ret < 0) + return ret; + + if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) + return -EINVAL; + + hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]); + + /* + * Here msgs.no_stored_km will hold msgs corresponding to the km + * stored also. + */ + ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert, + &hdcp->is_paired, + &msgs.no_stored_km, &size); + if (ret < 0) + return ret; + + ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size); + if (ret < 0) + return ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME, + &msgs.send_hprime, sizeof(msgs.send_hprime)); + if (ret < 0) + return ret; + + ret = hdcp2_verify_hprime(connector, &msgs.send_hprime); + if (ret < 0) + return ret; + + if (!hdcp->is_paired) { + /* Pairing is required */ + ret = shim->read_2_2_msg(intel_dig_port, + HDCP_2_2_AKE_SEND_PAIRING_INFO, + &msgs.pairing_info, + sizeof(msgs.pairing_info)); + if (ret < 0) + return ret; + + ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info); + if (ret < 0) + return ret; + hdcp->is_paired = true; + } + + return 0; +} + +static int hdcp2_locality_check(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_lc_init lc_init; + struct hdcp2_lc_send_lprime send_lprime; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + int tries = HDCP2_LC_RETRY_CNT, ret, i; + + for (i = 0; i < tries; i++) { + ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init); + if (ret < 0) + continue; + + ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init, + sizeof(msgs.lc_init)); + if (ret < 0) + continue; + + ret = shim->read_2_2_msg(intel_dig_port, + HDCP_2_2_LC_SEND_LPRIME, + &msgs.send_lprime, + sizeof(msgs.send_lprime)); + if (ret < 0) + continue; + + ret = hdcp2_verify_lprime(connector, &msgs.send_lprime); + if (!ret) + break; + } + + return ret; +} + +static int hdcp2_session_key_exchange(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + struct hdcp2_ske_send_eks send_eks; + int ret; + + ret = hdcp2_prepare_skey(connector, &send_eks); + if (ret < 0) + return ret; + + ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks, + sizeof(send_eks)); + if (ret < 0) + return ret; + + return 0; +} + +static +int hdcp2_propagate_stream_management_info(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_rep_stream_manage stream_manage; + struct hdcp2_rep_stream_ready stream_ready; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + int ret; + + /* Prepare RepeaterAuth_Stream_Manage msg */ + msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE; + drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m); + + /* K no of streams is fixed as 1. Stored as big-endian. */ + msgs.stream_manage.k = cpu_to_be16(1); + + /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ + msgs.stream_manage.streams[0].stream_id = 0; + msgs.stream_manage.streams[0].stream_type = hdcp->content_type; + + /* Send it to Repeater */ + ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage, + sizeof(msgs.stream_manage)); + if (ret < 0) + return ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY, + &msgs.stream_ready, sizeof(msgs.stream_ready)); + if (ret < 0) + return ret; + + hdcp->port_data.seq_num_m = hdcp->seq_num_m; + hdcp->port_data.streams[0].stream_type = hdcp->content_type; + + ret = hdcp2_verify_mprime(connector, &msgs.stream_ready); + if (ret < 0) + return ret; + + hdcp->seq_num_m++; + + if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) { + DRM_DEBUG_KMS("seq_num_m roll over.\n"); + return -1; + } + + return 0; +} + +static +int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + union { + struct hdcp2_rep_send_receiverid_list recvid_list; + struct hdcp2_rep_send_ack rep_ack; + } msgs; + const struct intel_hdcp_shim *shim = hdcp->shim; + u8 *rx_info; + u32 seq_num_v; + int ret; + + ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, + &msgs.recvid_list, sizeof(msgs.recvid_list)); + if (ret < 0) + return ret; + + rx_info = msgs.recvid_list.rx_info; + + if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) || + HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) { + DRM_DEBUG_KMS("Topology Max Size Exceeded\n"); + return -EINVAL; + } + + /* Converting and Storing the seq_num_v to local variable as DWORD */ + seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v); + + if (seq_num_v < hdcp->seq_num_v) { + /* Roll over of the seq_num_v from repeater. Reauthenticate. */ + DRM_DEBUG_KMS("Seq_num_v roll over.\n"); + return -EINVAL; + } + + ret = hdcp2_verify_rep_topology_prepare_ack(connector, + &msgs.recvid_list, + &msgs.rep_ack); + if (ret < 0) + return ret; + + hdcp->seq_num_v = seq_num_v; + ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack, + sizeof(msgs.rep_ack)); + if (ret < 0) + return ret; + + return 0; +} + +static int hdcp2_authenticate_repeater(struct intel_connector *connector) +{ + int ret; + + ret = hdcp2_authenticate_repeater_topology(connector); + if (ret < 0) + return ret; + + return hdcp2_propagate_stream_management_info(connector); +} + +static int hdcp2_authenticate_sink(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct intel_hdcp *hdcp = &connector->hdcp; + const struct intel_hdcp_shim *shim = hdcp->shim; + int ret; + + ret = hdcp2_authentication_key_exchange(connector); + if (ret < 0) { + DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret); + return ret; + } + + ret = hdcp2_locality_check(connector); + if (ret < 0) { + DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret); + return ret; + } + + ret = hdcp2_session_key_exchange(connector); + if (ret < 0) { + DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret); + return ret; + } + + if (shim->config_stream_type) { + ret = shim->config_stream_type(intel_dig_port, + hdcp->is_repeater, + hdcp->content_type); + if (ret < 0) + return ret; + } + + if (hdcp->is_repeater) { + ret = hdcp2_authenticate_repeater(connector); + if (ret < 0) { + DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret); + return ret; + } + } + + hdcp->port_data.streams[0].stream_type = hdcp->content_type; + ret = hdcp2_authenticate_port(connector); + if (ret < 0) + return ret; + + return ret; +} + +static int hdcp2_enable_encryption(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + enum port port = connector->encoder->port; + int ret; + + WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS); + + if (hdcp->shim->toggle_signalling) { + ret = hdcp->shim->toggle_signalling(intel_dig_port, true); + if (ret) { + DRM_ERROR("Failed to enable HDCP signalling. %d\n", + ret); + return ret; + } + } + + if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) { + /* Link is Authenticated. Now set for Encryption */ + I915_WRITE(HDCP2_CTL_DDI(port), + I915_READ(HDCP2_CTL_DDI(port)) | + CTL_LINK_ENCRYPTION_REQ); + } + + ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), + LINK_ENCRYPTION_STATUS, + LINK_ENCRYPTION_STATUS, + ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); + + return ret; +} + +static int hdcp2_disable_encryption(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + enum port port = connector->encoder->port; + int ret; + + WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS)); + + I915_WRITE(HDCP2_CTL_DDI(port), + I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ); + + ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port), + LINK_ENCRYPTION_STATUS, 0x0, + ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); + if (ret == -ETIMEDOUT) + DRM_DEBUG_KMS("Disable Encryption Timedout"); + + if (hdcp->shim->toggle_signalling) { + ret = hdcp->shim->toggle_signalling(intel_dig_port, false); + if (ret) { + DRM_ERROR("Failed to disable HDCP signalling. %d\n", + ret); + return ret; + } + } + + return ret; +} + +static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector) +{ + int ret, i, tries = 3; + + for (i = 0; i < tries; i++) { + ret = hdcp2_authenticate_sink(connector); + if (!ret) + break; + + /* Clearing the mei hdcp session */ + DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n", + i + 1, tries, ret); + if (hdcp2_deauthenticate_port(connector) < 0) + DRM_DEBUG_KMS("Port deauth failed.\n"); + } + + if (i != tries) { + /* + * Ensuring the required 200mSec min time interval between + * Session Key Exchange and encryption. + */ + msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN); + ret = hdcp2_enable_encryption(connector); + if (ret < 0) { + DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret); + if (hdcp2_deauthenticate_port(connector) < 0) + DRM_DEBUG_KMS("Port deauth failed.\n"); + } + } + + return ret; +} + +static int _intel_hdcp2_enable(struct intel_connector *connector) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + int ret; + + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n", + connector->base.name, connector->base.base.id, + hdcp->content_type); + + ret = hdcp2_authenticate_and_encrypt(connector); + if (ret) { + DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n", + hdcp->content_type, ret); + return ret; + } + + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n", + connector->base.name, connector->base.base.id, + hdcp->content_type); + + hdcp->hdcp2_encrypted = true; + return 0; +} + +static int _intel_hdcp2_disable(struct intel_connector *connector) +{ + int ret; + + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n", + connector->base.name, connector->base.base.id); + + ret = hdcp2_disable_encryption(connector); + + if (hdcp2_deauthenticate_port(connector) < 0) + DRM_DEBUG_KMS("Port deauth failed.\n"); + + connector->hdcp.hdcp2_encrypted = false; + + return ret; +} + +/* Implements the Link Integrity Check for HDCP2.2 */ +static int intel_hdcp2_check_link(struct intel_connector *connector) +{ + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + enum port port = connector->encoder->port; + int ret = 0; + + mutex_lock(&hdcp->mutex); + + /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || + !hdcp->hdcp2_encrypted) { + ret = -EINVAL; + goto out; + } + + if (WARN_ON(!intel_hdcp2_in_use(connector))) { + DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n", + I915_READ(HDCP2_STATUS_DDI(port))); + ret = -ENXIO; + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + ret = hdcp->shim->check_2_2_link(intel_dig_port); + if (ret == HDCP_LINK_PROTECTED) { + if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + } + goto out; + } + + if (ret == HDCP_TOPOLOGY_CHANGE) { + if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + goto out; + + DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n"); + ret = hdcp2_authenticate_repeater_topology(connector); + if (!ret) { + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + goto out; + } + DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n", + connector->base.name, connector->base.base.id, + ret); + } else { + DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n", + connector->base.name, connector->base.base.id); + } + + ret = _intel_hdcp2_disable(connector); + if (ret) { + DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n", + connector->base.name, connector->base.base.id, ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + + ret = _intel_hdcp2_enable(connector); + if (ret) { + DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n", + connector->base.name, connector->base.base.id, + ret); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + schedule_work(&hdcp->prop_work); + goto out; + } + +out: + mutex_unlock(&hdcp->mutex); + return ret; +} + +static void intel_hdcp_check_work(struct work_struct *work) +{ + struct intel_hdcp *hdcp = container_of(to_delayed_work(work), + struct intel_hdcp, + check_work); + struct intel_connector *connector = intel_hdcp_to_connector(hdcp); + + if (!intel_hdcp2_check_link(connector)) + schedule_delayed_work(&hdcp->check_work, + DRM_HDCP2_CHECK_PERIOD_MS); + else if (!intel_hdcp_check_link(connector)) + schedule_delayed_work(&hdcp->check_work, + DRM_HDCP_CHECK_PERIOD_MS); +} + +static int i915_hdcp_component_bind(struct device *i915_kdev, + struct device *mei_kdev, void *data) +{ + struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); + + DRM_DEBUG("I915 HDCP comp bind\n"); + mutex_lock(&dev_priv->hdcp_comp_mutex); + dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data; + dev_priv->hdcp_master->mei_dev = mei_kdev; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + return 0; +} + +static void i915_hdcp_component_unbind(struct device *i915_kdev, + struct device *mei_kdev, void *data) +{ + struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev); + + DRM_DEBUG("I915 HDCP comp unbind\n"); + mutex_lock(&dev_priv->hdcp_comp_mutex); + dev_priv->hdcp_master = NULL; + mutex_unlock(&dev_priv->hdcp_comp_mutex); +} + +static const struct component_ops i915_hdcp_component_ops = { + .bind = i915_hdcp_component_bind, + .unbind = i915_hdcp_component_unbind, +}; + +static inline int initialize_hdcp_port_data(struct intel_connector *connector) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + struct hdcp_port_data *data = &hdcp->port_data; + + data->port = connector->encoder->port; + data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; + data->protocol = (u8)hdcp->shim->protocol; + + data->k = 1; + if (!data->streams) + data->streams = kcalloc(data->k, + sizeof(struct hdcp2_streamid_type), + GFP_KERNEL); + if (!data->streams) { + DRM_ERROR("Out of Memory\n"); + return -ENOMEM; + } + + data->streams[0].stream_id = 0; + data->streams[0].stream_type = hdcp->content_type; + + return 0; +} + +static bool is_hdcp2_supported(struct drm_i915_private *dev_priv) +{ + if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP)) + return false; + + return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || + IS_KABYLAKE(dev_priv)); +} + +void intel_hdcp_component_init(struct drm_i915_private *dev_priv) +{ + int ret; + + if (!is_hdcp2_supported(dev_priv)) + return; + + mutex_lock(&dev_priv->hdcp_comp_mutex); + WARN_ON(dev_priv->hdcp_comp_added); + + dev_priv->hdcp_comp_added = true; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops, + I915_COMPONENT_HDCP); + if (ret < 0) { + DRM_DEBUG_KMS("Failed at component add(%d)\n", ret); + mutex_lock(&dev_priv->hdcp_comp_mutex); + dev_priv->hdcp_comp_added = false; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return; + } +} + +static void intel_hdcp2_init(struct intel_connector *connector) +{ + struct intel_hdcp *hdcp = &connector->hdcp; + int ret; + + ret = initialize_hdcp_port_data(connector); + if (ret) { + DRM_DEBUG_KMS("Mei hdcp data init failed\n"); + return; + } + + hdcp->hdcp2_supported = true; +} + int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; int ret; - ret = drm_connector_attach_content_protection_property( - &connector->base); + if (!shim) + return -EINVAL; + + ret = drm_connector_attach_content_protection_property(&connector->base); if (ret) return ret; @@ -788,28 +1804,47 @@ int intel_hdcp_init(struct intel_connector *connector, mutex_init(&hdcp->mutex); INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work); INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work); + + if (is_hdcp2_supported(dev_priv)) + intel_hdcp2_init(connector); + init_waitqueue_head(&hdcp->cp_irq_queue); + return 0; } int intel_hdcp_enable(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; - int ret; + unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS; + int ret = -EINVAL; if (!hdcp->shim) return -ENOENT; mutex_lock(&hdcp->mutex); + WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED); - ret = _intel_hdcp_enable(connector); - if (ret) - goto out; + /* + * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup + * is capable of HDCP2.2, it is preferred to use HDCP2.2. + */ + if (intel_hdcp2_capable(connector)) { + ret = _intel_hdcp2_enable(connector); + if (!ret) + check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS; + } + + /* When HDCP2.2 fails, HDCP1.4 will be attempted */ + if (ret && intel_hdcp_capable(connector)) { + ret = _intel_hdcp_enable(connector); + } + + if (!ret) { + schedule_delayed_work(&hdcp->check_work, check_link_interval); + hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&hdcp->prop_work); + } - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); - schedule_delayed_work(&hdcp->check_work, - DRM_HDCP_CHECK_PERIOD_MS); -out: mutex_unlock(&hdcp->mutex); return ret; } @@ -826,7 +1861,10 @@ int intel_hdcp_disable(struct intel_connector *connector) if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED; - ret = _intel_hdcp_disable(connector); + if (hdcp->hdcp2_encrypted) + ret = _intel_hdcp2_disable(connector); + else if (hdcp->hdcp_encrypted) + ret = _intel_hdcp_disable(connector); } mutex_unlock(&hdcp->mutex); @@ -834,6 +1872,30 @@ int intel_hdcp_disable(struct intel_connector *connector) return ret; } +void intel_hdcp_component_fini(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->hdcp_comp_mutex); + if (!dev_priv->hdcp_comp_added) { + mutex_unlock(&dev_priv->hdcp_comp_mutex); + return; + } + + dev_priv->hdcp_comp_added = false; + mutex_unlock(&dev_priv->hdcp_comp_mutex); + + component_del(dev_priv->drm.dev, &i915_hdcp_component_ops); +} + +void intel_hdcp_cleanup(struct intel_connector *connector) +{ + if (!connector->hdcp.shim) + return; + + mutex_lock(&connector->hdcp.mutex); + kfree(connector->hdcp.port_data.streams); + mutex_unlock(&connector->hdcp.mutex); +} + void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state) @@ -867,61 +1929,16 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, crtc_state->mode_changed = true; } -/* Implements Part 3 of the HDCP authorization procedure */ -int intel_hdcp_check_link(struct intel_connector *connector) +/* Handles the CP_IRQ raised from the DP HDCP sink */ +void intel_hdcp_handle_cp_irq(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; - struct drm_i915_private *dev_priv = connector->base.dev->dev_private; - struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); - enum port port = intel_dig_port->base.port; - int ret = 0; if (!hdcp->shim) - return -ENOENT; - - mutex_lock(&hdcp->mutex); - - if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) - goto out; - - if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) { - DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n", - connector->base.name, connector->base.base.id, - I915_READ(PORT_HDCP_STATUS(port))); - ret = -ENXIO; - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); - goto out; - } - - if (hdcp->shim->check_link(intel_dig_port)) { - if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&hdcp->prop_work); - } - goto out; - } - - DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n", - connector->base.name, connector->base.base.id); + return; - ret = _intel_hdcp_disable(connector); - if (ret) { - DRM_ERROR("Failed to disable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); - goto out; - } + atomic_inc(&connector->hdcp.cp_irq_count); + wake_up_all(&connector->hdcp.cp_irq_queue); - ret = _intel_hdcp_enable(connector); - if (ret) { - DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret); - hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; - schedule_work(&hdcp->prop_work); - goto out; - } - -out: - mutex_unlock(&hdcp->mutex); - return ret; + schedule_delayed_work(&hdcp->check_work, 0); } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 765718b606d8..26767785f14a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -82,6 +82,8 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) static u32 g4x_infoframe_index(unsigned int type) { switch (type) { + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return VIDEO_DIP_SELECT_GAMUT; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_SELECT_AVI; case HDMI_INFOFRAME_TYPE_SPD: @@ -97,6 +99,12 @@ static u32 g4x_infoframe_index(unsigned int type) static u32 g4x_infoframe_enable(unsigned int type) { switch (type) { + case HDMI_PACKET_TYPE_GENERAL_CONTROL: + return VIDEO_DIP_ENABLE_GCP; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return VIDEO_DIP_ENABLE_GAMUT; + case DP_SDP_VSC: + return 0; case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI; case HDMI_INFOFRAME_TYPE_SPD: @@ -112,6 +120,10 @@ static u32 g4x_infoframe_enable(unsigned int type) static u32 hsw_infoframe_enable(unsigned int type) { switch (type) { + case HDMI_PACKET_TYPE_GENERAL_CONTROL: + return VIDEO_DIP_ENABLE_GCP_HSW; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return VIDEO_DIP_ENABLE_GMP_HSW; case DP_SDP_VSC: return VIDEO_DIP_ENABLE_VSC_HSW; case DP_SDP_PPS: @@ -135,6 +147,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, int i) { switch (type) { + case HDMI_PACKET_TYPE_GAMUT_METADATA: + return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i); case DP_SDP_VSC: return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i); case DP_SDP_PPS: @@ -200,17 +214,37 @@ static void g4x_write_infoframe(struct intel_encoder *encoder, POSTING_READ(VIDEO_DIP_CTL); } -static bool g4x_infoframe_enabled(struct intel_encoder *encoder, +static void g4x_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val, *data = frame; + int i; + + val = I915_READ(VIDEO_DIP_CTL); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(VIDEO_DIP_CTL, val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(VIDEO_DIP_DATA); +} + +static u32 g4x_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val = I915_READ(VIDEO_DIP_CTL); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD); @@ -255,7 +289,28 @@ static void ibx_write_infoframe(struct intel_encoder *encoder, POSTING_READ(reg); } -static bool ibx_infoframe_enabled(struct intel_encoder *encoder, +static void ibx_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + u32 val, *data = frame; + int i; + + val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe)); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe)); +} + +static u32 ibx_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -264,10 +319,10 @@ static bool ibx_infoframe_enabled(struct intel_encoder *encoder, u32 val = I915_READ(reg); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | @@ -316,7 +371,28 @@ static void cpt_write_infoframe(struct intel_encoder *encoder, POSTING_READ(reg); } -static bool cpt_infoframe_enabled(struct intel_encoder *encoder, +static void cpt_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + u32 val, *data = frame; + int i; + + val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe)); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe)); +} + +static u32 cpt_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -324,7 +400,7 @@ static bool cpt_infoframe_enabled(struct intel_encoder *encoder, u32 val = I915_READ(TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | @@ -370,7 +446,28 @@ static void vlv_write_infoframe(struct intel_encoder *encoder, POSTING_READ(reg); } -static bool vlv_infoframe_enabled(struct intel_encoder *encoder, +static void vlv_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + u32 val, *data = frame; + int i; + + val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe)); + + val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ + val |= g4x_infoframe_index(type); + + I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe)); +} + +static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -378,10 +475,10 @@ static bool vlv_infoframe_enabled(struct intel_encoder *encoder, u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe)); if ((val & VIDEO_DIP_ENABLE) == 0) - return false; + return 0; if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port)) - return false; + return 0; return val & (VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT | @@ -423,7 +520,24 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, POSTING_READ(ctl_reg); } -static bool hsw_infoframe_enabled(struct intel_encoder *encoder, +static void hsw_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val, *data = frame; + int i; + + val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder)); + + for (i = 0; i < len; i += 4) + *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder, + type, i >> 2)); +} + +static u32 hsw_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -434,6 +548,53 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder, VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW); } +static const u8 infoframe_type_to_idx[] = { + HDMI_PACKET_TYPE_GENERAL_CONTROL, + HDMI_PACKET_TYPE_GAMUT_METADATA, + DP_SDP_VSC, + HDMI_INFOFRAME_TYPE_AVI, + HDMI_INFOFRAME_TYPE_SPD, + HDMI_INFOFRAME_TYPE_VENDOR, +}; + +u32 intel_hdmi_infoframe_enable(unsigned int type) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) { + if (infoframe_type_to_idx[i] == type) + return BIT(i); + } + + return 0; +} + +u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + u32 val, ret = 0; + int i; + + val = dig_port->infoframes_enabled(encoder, crtc_state); + + /* map from hardware bits to dip idx */ + for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) { + unsigned int type = infoframe_type_to_idx[i]; + + if (HAS_DDI(dev_priv)) { + if (val & hsw_infoframe_enable(type)) + ret |= BIT(i); + } else { + if (val & g4x_infoframe_enable(type)) + ret |= BIT(i); + } + } + + return ret; +} + /* * The data we write to the DIP data buffer registers is 1 byte bigger than the * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting @@ -453,15 +614,23 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder, */ static void intel_write_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, - union hdmi_infoframe *frame) + enum hdmi_infoframe_type type, + const union hdmi_infoframe *frame) { struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(type)) == 0) + return; + + if (WARN_ON(frame->any.type != type)) + return; + /* see comment above for the reason for this offset */ - len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1); - if (len < 0) + len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1); + if (WARN_ON(len < 0)) return; /* Insert the 'hole' (see big comment above) at position 3 */ @@ -469,86 +638,143 @@ static void intel_write_infoframe(struct intel_encoder *encoder, buffer[3] = 0; len++; - intel_dig_port->write_infoframe(encoder, - crtc_state, - frame->any.type, buffer, len); + intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len); } -static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +void intel_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + enum hdmi_infoframe_type type, + union hdmi_infoframe *frame) { + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + u8 buffer[VIDEO_DIP_DATA_SIZE]; + int ret; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(type)) == 0) + return; + + intel_dig_port->read_infoframe(encoder, crtc_state, + type, buffer, sizeof(buffer)); + + /* Fill the 'hole' (see big comment above) at position 3 */ + memmove(&buffer[1], &buffer[0], 3); + + /* see comment above for the reason for this offset */ + ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1); + if (ret) { + DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type); + return; + } + + if (frame->any.type != type) + DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n", + frame->any.type, type); +} + +static bool +intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; - union hdmi_infoframe frame; + struct drm_connector *connector = conn_state->connector; int ret; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, - conn_state->connector, + if (!crtc_state->has_infoframe) + return true; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); + + ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, adjusted_mode); - if (ret < 0) { - DRM_ERROR("couldn't fill AVI infoframe\n"); - return; - } + if (ret) + return false; if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - frame.avi.colorspace = HDMI_COLORSPACE_YUV420; + frame->colorspace = HDMI_COLORSPACE_YUV420; else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) - frame.avi.colorspace = HDMI_COLORSPACE_YUV444; + frame->colorspace = HDMI_COLORSPACE_YUV444; else - frame.avi.colorspace = HDMI_COLORSPACE_RGB; + frame->colorspace = HDMI_COLORSPACE_RGB; - drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state); + drm_hdmi_avi_infoframe_colorspace(frame, conn_state); - drm_hdmi_avi_infoframe_quant_range(&frame.avi, - conn_state->connector, + drm_hdmi_avi_infoframe_quant_range(frame, connector, adjusted_mode, crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); - drm_hdmi_avi_infoframe_content_type(&frame.avi, - conn_state); + drm_hdmi_avi_infoframe_content_type(frame, conn_state); /* TODO: handle pixel repetition for YCBCR420 outputs */ - intel_write_infoframe(encoder, crtc_state, - &frame); + + ret = hdmi_avi_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; } -static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +static bool +intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { - union hdmi_infoframe frame; + struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd; int ret; - ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx"); - if (ret < 0) { - DRM_ERROR("couldn't fill SPD infoframe\n"); - return; - } + if (!crtc_state->has_infoframe) + return true; - frame.spd.sdi = HDMI_SPD_SDI_PC; + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD); - intel_write_infoframe(encoder, crtc_state, - &frame); + ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx"); + if (WARN_ON(ret)) + return false; + + frame->sdi = HDMI_SPD_SDI_PC; + + ret = hdmi_spd_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; } -static void -intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - union hdmi_infoframe frame; +static bool +intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct hdmi_vendor_infoframe *frame = + &crtc_state->infoframes.hdmi.vendor.hdmi; + const struct drm_display_info *info = + &conn_state->connector->display_info; int ret; - ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, + if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe) + return true; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR); + + ret = drm_hdmi_vendor_infoframe_from_display_mode(frame, conn_state->connector, &crtc_state->base.adjusted_mode); - if (ret < 0) - return; + if (WARN_ON(ret)) + return false; - intel_write_infoframe(encoder, crtc_state, - &frame); + ret = hdmi_vendor_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; } static void g4x_set_infoframes(struct intel_encoder *encoder, @@ -608,9 +834,15 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state) @@ -676,7 +908,10 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); i915_reg_t reg; - u32 val = 0; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) + return false; if (HAS_DDI(dev_priv)) reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); @@ -687,18 +922,54 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder, else return false; + I915_WRITE(reg, crtc_state->infoframes.gcp); + + return true; +} + +void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + i915_reg_t reg; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0) + return; + + if (HAS_DDI(dev_priv)) + reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); + else if (HAS_PCH_SPLIT(dev_priv)) + reg = TVIDEO_DIP_GCP(crtc->pipe); + else + return; + + crtc_state->infoframes.gcp = I915_READ(reg); +} + +static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (IS_G4X(dev_priv) || !crtc_state->has_infoframe) + return; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL); + /* Indicate color depth whenever the sink supports deep color */ if (hdmi_sink_is_deep_color(conn_state)) - val |= GCP_COLOR_INDICATION; + crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION; /* Enable default_phase whenever the display mode is suitably aligned */ if (gcp_default_phase_possible(crtc_state->pipe_bpp, &crtc_state->base.adjusted_mode)) - val |= GCP_DEFAULT_PHASE_ENABLE; - - I915_WRITE(reg, val); - - return val != 0; + crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE; } static void ibx_set_infoframes(struct intel_encoder *encoder, @@ -749,9 +1020,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static void cpt_set_infoframes(struct intel_encoder *encoder, @@ -792,9 +1069,15 @@ static void cpt_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static void vlv_set_infoframes(struct intel_encoder *encoder, @@ -844,9 +1127,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } static void hsw_set_infoframes(struct intel_encoder *encoder, @@ -877,9 +1166,15 @@ static void hsw_set_infoframes(struct intel_encoder *encoder, I915_WRITE(reg, val); POSTING_READ(reg); - intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state); - intel_hdmi_set_spd_infoframe(encoder, crtc_state); - intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_AVI, + &crtc_state->infoframes.avi); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_SPD, + &crtc_state->infoframes.spd); + intel_write_infoframe(encoder, crtc_state, + HDMI_INFOFRAME_TYPE_VENDOR, + &crtc_state->infoframes.hdmi); } void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) @@ -1085,10 +1380,44 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, return ret; } +static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + struct drm_crtc *crtc = connector->base.state->crtc; + struct intel_crtc *intel_crtc = container_of(crtc, + struct intel_crtc, base); + u32 scanline; + int ret; + + for (;;) { + scanline = I915_READ(PIPEDSL(intel_crtc->pipe)); + if (scanline > 100 && scanline < 200) + break; + usleep_range(25, 50); + } + + ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false); + if (ret) { + DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret); + return ret; + } + ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true); + if (ret) { + DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret); + return ret; + } + + return 0; +} + static int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, bool enable) { + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct intel_connector *connector = hdmi->attached_connector; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); int ret; if (!enable) @@ -1100,6 +1429,14 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, enable ? "Enable" : "Disable", ret); return ret; } + + /* + * WA: To fix incorrect positioning of the window of + * opportunity and enc_en signalling in KABYLAKE. + */ + if (IS_KABYLAKE(dev_priv) && enable) + return kbl_repositioning_enc_en_signal(connector); + return 0; } @@ -1131,6 +1468,190 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) return true; } +static struct hdcp2_hdmi_msg_data { + u8 msg_id; + u32 timeout; + u32 timeout2; + } hdcp2_msg_data[] = { + {HDCP_2_2_AKE_INIT, 0, 0}, + {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0}, + {HDCP_2_2_AKE_NO_STORED_KM, 0, 0}, + {HDCP_2_2_AKE_STORED_KM, 0, 0}, + {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, + HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS}, + {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, + 0}, + {HDCP_2_2_LC_INIT, 0, 0}, + {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0}, + {HDCP_2_2_SKE_SEND_EKS, 0, 0}, + {HDCP_2_2_REP_SEND_RECVID_LIST, + HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0}, + {HDCP_2_2_REP_SEND_ACK, 0, 0}, + {HDCP_2_2_REP_STREAM_MANAGE, 0, 0}, + {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, + 0}, + }; + +static +int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, + u8 *rx_status) +{ + return intel_hdmi_hdcp_read(intel_dig_port, + HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET, + rx_status, + HDCP_2_2_HDMI_RXSTATUS_LEN); +} + +static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++) + if (hdcp2_msg_data[i].msg_id == msg_id && + (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired)) + return hdcp2_msg_data[i].timeout; + else if (hdcp2_msg_data[i].msg_id == msg_id) + return hdcp2_msg_data[i].timeout2; + + return -EINVAL; +} + +static inline +int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port, + u8 msg_id, bool *msg_ready, + ssize_t *msg_sz) +{ + u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; + int ret; + + ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status); + if (ret < 0) { + DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret); + return ret; + } + + *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) | + rx_status[0]); + + if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) + *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) && + *msg_sz); + else + *msg_ready = *msg_sz; + + return 0; +} + +static ssize_t +intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, + u8 msg_id, bool paired) +{ + bool msg_ready = false; + int timeout, ret; + ssize_t msg_sz = 0; + + timeout = get_hdcp2_msg_timeout(msg_id, paired); + if (timeout < 0) + return timeout; + + ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port, + msg_id, &msg_ready, + &msg_sz), + !ret && msg_ready && msg_sz, timeout * 1000, + 1000, 5 * 1000); + if (ret) + DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n", + msg_id, ret, timeout); + + return ret ? ret : msg_sz; +} + +static +int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, + void *buf, size_t size) +{ + unsigned int offset; + + offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET; + return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size); +} + +static +int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, + u8 msg_id, void *buf, size_t size) +{ + struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp; + unsigned int offset; + ssize_t ret; + + ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id, + hdcp->is_paired); + if (ret < 0) + return ret; + + /* + * Available msg size should be equal to or lesser than the + * available buffer. + */ + if (ret > size) { + DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n", + ret, size); + return -1; + } + + offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET; + ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret); + if (ret) + DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret); + + return ret; +} + +static +int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port) +{ + u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; + int ret; + + ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status); + if (ret) + return ret; + + /* + * Re-auth request and Link Integrity Failures are represented by + * same bit. i.e reauth_req. + */ + if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1])) + ret = HDCP_REAUTH_REQUEST; + else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1])) + ret = HDCP_TOPOLOGY_CHANGE; + + return ret; +} + +static +int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port, + bool *capable) +{ + u8 hdcp2_version; + int ret; + + *capable = false; + ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET, + &hdcp2_version, sizeof(hdcp2_version)); + if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK) + *capable = true; + + return ret; +} + +static inline +enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void) +{ + return HDCP_PROTOCOL_HDMI; +} + static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { .write_an_aksv = intel_hdmi_hdcp_write_an_aksv, .read_bksv = intel_hdmi_hdcp_read_bksv, @@ -1142,6 +1663,11 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = { .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part, .toggle_signalling = intel_hdmi_hdcp_toggle_signalling, .check_link = intel_hdmi_hdcp_check_link, + .write_2_2_msg = intel_hdmi_hdcp2_write_msg, + .read_2_2_msg = intel_hdmi_hdcp2_read_msg, + .check_2_2_link = intel_hdmi_hdcp2_check_link, + .hdcp_2_2_capable = intel_hdmi_hdcp2_capable, + .protocol = HDCP_PROTOCOL_HDMI, }; static void intel_hdmi_prepare(struct intel_encoder *encoder, @@ -1207,7 +1733,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; @@ -1230,7 +1755,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; - if (intel_dig_port->infoframe_enabled(encoder, pipe_config)) + pipe_config->infoframes.enable |= + intel_hdmi_infoframes_enabled(encoder, pipe_config); + + if (pipe_config->infoframes.enable) pipe_config->has_infoframe = true; if (tmp & SDVO_AUDIO_ENABLE) @@ -1253,6 +1781,18 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, pipe_config->base.adjusted_mode.crtc_clock = dotclock; pipe_config->lane_count = 4; + + intel_hdmi_read_gcp_infoframe(encoder, pipe_config); + + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_AVI, + &pipe_config->infoframes.avi); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_SPD, + &pipe_config->infoframes.spd); + intel_read_infoframe(encoder, pipe_config, + HDMI_INFOFRAME_TYPE_VENDOR, + &pipe_config->infoframes.hdmi); } static void intel_enable_hdmi_audio(struct intel_encoder *encoder, @@ -1666,7 +2206,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, /* Display Wa_1405510057:icl */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && - bpc == 10 && IS_ICELAKE(dev_priv) && + bpc == 10 && INTEL_GEN(dev_priv) >= 11 && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) return false; @@ -1824,6 +2364,23 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, } } + intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state); + + if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad AVI infoframe\n"); + return -EINVAL; + } + + if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad SPD infoframe\n"); + return -EINVAL; + } + + if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad HDMI infoframe\n"); + return -EINVAL; + } + return 0; } @@ -1943,7 +2500,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - if (IS_ICELAKE(dev_priv) && + if (INTEL_GEN(dev_priv) >= 11 && !intel_digital_port_connected(encoder)) goto out; @@ -2344,14 +2901,14 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, return info->alternate_ddc_pin; } - if (IS_CHERRYVIEW(dev_priv)) - ddc_pin = chv_port_to_ddc_pin(dev_priv, port); - else if (IS_GEN9_LP(dev_priv)) - ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); + if (HAS_PCH_ICP(dev_priv)) + ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); - else if (HAS_PCH_ICP(dev_priv)) - ddc_pin = icl_port_to_ddc_pin(dev_priv, port); + else if (IS_GEN9_LP(dev_priv)) + ddc_pin = bxt_port_to_ddc_pin(dev_priv, port); + else if (IS_CHERRYVIEW(dev_priv)) + ddc_pin = chv_port_to_ddc_pin(dev_priv, port); else ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); @@ -2368,33 +2925,36 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { intel_dig_port->write_infoframe = vlv_write_infoframe; + intel_dig_port->read_infoframe = vlv_read_infoframe; intel_dig_port->set_infoframes = vlv_set_infoframes; - intel_dig_port->infoframe_enabled = vlv_infoframe_enabled; + intel_dig_port->infoframes_enabled = vlv_infoframes_enabled; } else if (IS_G4X(dev_priv)) { intel_dig_port->write_infoframe = g4x_write_infoframe; + intel_dig_port->read_infoframe = g4x_read_infoframe; intel_dig_port->set_infoframes = g4x_set_infoframes; - intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; + intel_dig_port->infoframes_enabled = g4x_infoframes_enabled; } else if (HAS_DDI(dev_priv)) { if (intel_dig_port->lspcon.active) { - intel_dig_port->write_infoframe = - lspcon_write_infoframe; + intel_dig_port->write_infoframe = lspcon_write_infoframe; + intel_dig_port->read_infoframe = lspcon_read_infoframe; intel_dig_port->set_infoframes = lspcon_set_infoframes; - intel_dig_port->infoframe_enabled = - lspcon_infoframe_enabled; + intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled; } else { - intel_dig_port->set_infoframes = hsw_set_infoframes; - intel_dig_port->infoframe_enabled = - hsw_infoframe_enabled; intel_dig_port->write_infoframe = hsw_write_infoframe; + intel_dig_port->read_infoframe = hsw_read_infoframe; + intel_dig_port->set_infoframes = hsw_set_infoframes; + intel_dig_port->infoframes_enabled = hsw_infoframes_enabled; } } else if (HAS_PCH_IBX(dev_priv)) { intel_dig_port->write_infoframe = ibx_write_infoframe; + intel_dig_port->read_infoframe = ibx_read_infoframe; intel_dig_port->set_infoframes = ibx_set_infoframes; - intel_dig_port->infoframe_enabled = ibx_infoframe_enabled; + intel_dig_port->infoframes_enabled = ibx_infoframes_enabled; } else { intel_dig_port->write_infoframe = cpt_write_infoframe; + intel_dig_port->read_infoframe = cpt_read_infoframe; intel_dig_port->set_infoframes = cpt_set_infoframes; - intel_dig_port->infoframe_enabled = cpt_infoframe_enabled; + intel_dig_port->infoframes_enabled = cpt_infoframes_enabled; } } @@ -2440,6 +3000,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi_add_properties(intel_hdmi, connector); + intel_connector_attach_encoder(intel_connector, intel_encoder); + intel_hdmi->attached_connector = intel_connector; + if (is_hdcp_supported(dev_priv, port)) { int ret = intel_hdcp_init(intel_connector, &intel_hdmi_hdcp_shim); @@ -2447,9 +3010,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, DRM_DEBUG_KMS("HDCP init failed, skipping.\n"); } - intel_connector_attach_encoder(intel_connector, intel_encoder); - intel_hdmi->attached_connector = intel_connector; - /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 9bd1c9002c2a..94c04f16a2ad 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -79,7 +79,7 @@ int intel_huc_auth(struct intel_huc *huc) } /* Check authentication status, it should be done by now */ - ret = __intel_wait_for_register(i915, + ret = __intel_wait_for_register(&i915->uncore, HUC_STATUS2, HUC_FW_VERIFIED, HUC_FW_VERIFIED, diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c index 7d7bfc7f7ca7..68d47c105939 100644 --- a/drivers/gpu/drm/i915/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/intel_huc_fw.c @@ -106,41 +106,46 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma) { struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); struct drm_i915_private *dev_priv = huc_to_i915(huc); + struct intel_uncore *uncore = &dev_priv->uncore; unsigned long offset = 0; u32 size; int ret; GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Set the source address for the uCode */ offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) + huc_fw->header_offset; - I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset)); - I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF); + intel_uncore_write(uncore, DMA_ADDR_0_LOW, + lower_32_bits(offset)); + intel_uncore_write(uncore, DMA_ADDR_0_HIGH, + upper_32_bits(offset) & 0xFFFF); - /* Hardware doesn't look at destination address for HuC. Set it to 0, + /* + * Hardware doesn't look at destination address for HuC. Set it to 0, * but still program the correct address space. */ - I915_WRITE(DMA_ADDR_1_LOW, 0); - I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); + intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0); + intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM); size = huc_fw->header_size + huc_fw->ucode_size; - I915_WRITE(DMA_COPY_SIZE, size); + intel_uncore_write(uncore, DMA_COPY_SIZE, size); /* Start the DMA */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); + intel_uncore_write(uncore, DMA_CTRL, + _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA)); /* Wait for DMA to finish */ - ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100); + ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100); DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret); /* Disable the bits once DMA is over */ - I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); + intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL)); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); return ret; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 5a733e711355..422685d120e9 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -348,7 +348,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); I915_WRITE_FW(GMBUS4, irq_enable); - ret = intel_wait_for_register_fw(dev_priv, + ret = intel_wait_for_register_fw(&dev_priv->uncore, GMBUS2, GMBUS_ACTIVE, 0, 10); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 5e98fd79bd9d..bec232acc8d7 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -164,20 +164,15 @@ #define WA_TAIL_DWORDS 2 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) -static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct intel_context *ce); +#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE) + +static int execlists_context_deferred_alloc(struct intel_context *ce, + struct intel_engine_cs *engine); static void execlists_init_reg_state(u32 *reg_state, - struct i915_gem_context *ctx, + struct intel_context *ce, struct intel_engine_cs *engine, struct intel_ring *ring); -static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) -{ - return (i915_ggtt_offset(engine->status_page.vma) + - I915_GEM_HWS_INDEX_ADDR); -} - static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -188,6 +183,34 @@ static inline int rq_prio(const struct i915_request *rq) return rq->sched.attr.priority; } +static int effective_prio(const struct i915_request *rq) +{ + int prio = rq_prio(rq); + + /* + * On unwinding the active request, we give it a priority bump + * equivalent to a freshly submitted request. This protects it from + * being gazumped again, but it would be preferable if we didn't + * let it be gazumped in the first place! + * + * See __unwind_incomplete_requests() + */ + if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) { + /* + * After preemption, we insert the active request at the + * end of the new priority level. This means that we will be + * _lower_ priority than the preemptee all things equal (and + * so the preemption is valid), so adjust our comparison + * accordingly. + */ + prio |= ACTIVE_PRIORITY; + prio--; + } + + /* Restrict mere WAIT boosts from triggering preemption */ + return prio | __NO_PREEMPTION; +} + static int queue_prio(const struct intel_engine_execlists *execlists) { struct i915_priolist *p; @@ -208,7 +231,7 @@ static int queue_prio(const struct intel_engine_execlists *execlists) static inline bool need_preempt(const struct intel_engine_cs *engine, const struct i915_request *rq) { - const int last_prio = rq_prio(rq); + int last_prio; if (!intel_engine_has_preemption(engine)) return false; @@ -228,6 +251,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, * preempt. If that hint is stale or we may be trying to preempt * ourselves, ignore the request. */ + last_prio = effective_prio(rq); if (!__execlists_need_preempt(engine->execlists.queue_priority_hint, last_prio)) return false; @@ -254,12 +278,11 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, } __maybe_unused static inline bool -assert_priority_queue(const struct intel_engine_execlists *execlists, - const struct i915_request *prev, +assert_priority_queue(const struct i915_request *prev, const struct i915_request *next) { - if (!prev) - return true; + const struct intel_engine_execlists *execlists = + &prev->engine->execlists; /* * Without preemption, the prev may refer to the still active element @@ -300,11 +323,10 @@ assert_priority_queue(const struct intel_engine_execlists *execlists, * engine info, SW context ID and SW counter need to form a unique number * (Context ID) per lrc. */ -static void -intel_lr_context_descriptor_update(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct intel_context *ce) +static u64 +lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) { + struct i915_gem_context *ctx = ce->gem_context; u64 desc; BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); @@ -322,7 +344,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, * Consider updating oa_get_render_ctx_id in i915_perf.c when changing * anything below. */ - if (INTEL_GEN(ctx->i915) >= 11) { + if (INTEL_GEN(engine->i915) >= 11) { GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; /* bits 37-47 */ @@ -339,7 +361,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ } - ce->lrc_desc = desc; + return desc; } static void unwind_wa_tail(struct i915_request *rq) @@ -353,7 +375,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) { struct i915_request *rq, *rn, *active = NULL; struct list_head *uninitialized_var(pl); - int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT; + int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY; lockdep_assert_held(&engine->timeline.lock); @@ -384,9 +406,21 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) * The active request is now effectively the start of a new client * stream, so give it the equivalent small priority bump to prevent * it being gazumped a second time by another peer. + * + * Note we have to be careful not to apply a priority boost to a request + * still spinning on its semaphores. If the request hasn't started, that + * means it is still waiting for its dependencies to be signaled, and + * if we apply a priority boost to this request, we will boost it past + * its signalers and so break PI. + * + * One consequence of this preemption boost is that we may jump + * over lesser priorities (such as I915_PRIORITY_WAIT), effectively + * making those priorities non-preemptible. They will be moved forward + * in the priority queue, but they will not gain immediate access to + * the GPU. */ - if (!(prio & I915_PRIORITY_NEWCLIENT)) { - prio |= I915_PRIORITY_NEWCLIENT; + if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) { + prio |= ACTIVE_PRIORITY; active->sched.attr.priority = prio; list_move_tail(&active->sched.link, i915_sched_lookup_priolist(engine, prio)); @@ -523,13 +557,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) desc = execlists_update_context(rq); GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); - GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n", + GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", engine->name, n, port[n].context_id, count, - rq->global_seqno, rq->fence.context, rq->fence.seqno, hwsp_seqno(rq), - intel_engine_get_seqno(engine), rq_prio(rq)); } else { GEM_BUG_ON(!n); @@ -564,6 +596,17 @@ static bool can_merge_ctx(const struct intel_context *prev, return true; } +static bool can_merge_rq(const struct i915_request *prev, + const struct i915_request *next) +{ + GEM_BUG_ON(!assert_priority_queue(prev, next)); + + if (!can_merge_ctx(prev->hw_context, next->hw_context)) + return false; + + return true; +} + static void port_assign(struct execlist_port *port, struct i915_request *rq) { GEM_BUG_ON(rq == port_request(port)); @@ -577,8 +620,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq) static void inject_preempt_context(struct intel_engine_cs *engine) { struct intel_engine_execlists *execlists = &engine->execlists; - struct intel_context *ce = - to_intel_context(engine->i915->preempt_context, engine); + struct intel_context *ce = engine->preempt_context; unsigned int n; GEM_BUG_ON(execlists->preempt_complete_status != @@ -716,8 +758,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { - GEM_BUG_ON(!assert_priority_queue(execlists, last, rq)); - /* * Can we combine this request with the current port? * It has to be the same context/ringbuffer and not @@ -729,8 +769,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * second request, and so we never need to tell the * hardware about the first. */ - if (last && - !can_merge_ctx(rq->hw_context, last->hw_context)) { + if (last && !can_merge_rq(last, rq)) { /* * If we are on the second port and cannot * combine this request with the last, then we @@ -740,6 +779,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine) goto done; /* + * We must not populate both ELSP[] with the + * same LRCA, i.e. we must submit 2 different + * contexts if we submit 2 ELSP. + */ + if (last->hw_context == rq->hw_context) + goto done; + + /* * If GVT overrides us we only ever submit * port[0], leaving port[1] empty. Note that we * also have to be careful that we don't queue @@ -750,7 +797,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ctx_single_port_submission(rq->hw_context)) goto done; - GEM_BUG_ON(last->hw_context == rq->hw_context); if (submit) port_assign(port, last); @@ -769,8 +815,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } done: @@ -790,8 +835,7 @@ done: * request triggering preemption on the next dequeue (or subsequent * interrupt for secondary ports). */ - execlists->queue_priority_hint = - port != execlists->port ? rq_prio(last) : INT_MIN; + execlists->queue_priority_hint = queue_prio(execlists); if (submit) { port_assign(port, last); @@ -821,13 +865,11 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) while (num_ports-- && port_isset(port)) { struct i915_request *rq = port_request(port); - GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n", + GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n", rq->engine->name, (unsigned int)(port - execlists->port), - rq->global_seqno, rq->fence.context, rq->fence.seqno, - hwsp_seqno(rq), - intel_engine_get_seqno(rq->engine)); + hwsp_seqno(rq)); GEM_BUG_ON(!execlists->active); execlists_context_schedule_out(rq, @@ -883,8 +925,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) struct rb_node *rb; unsigned long flags; - GEM_TRACE("%s current %d\n", - engine->name, intel_engine_get_seqno(engine)); + GEM_TRACE("%s\n", engine->name); /* * Before we call engine->cancel_requests(), we should have exclusive @@ -908,8 +949,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline.requests, link) { - GEM_BUG_ON(!rq->global_seqno); - if (!i915_request_signaled(rq)) dma_fence_set_error(&rq->fence, -EIO); @@ -929,14 +968,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) } rb_erase_cached(&p->node, &execlists->queue); - if (p->priority != I915_PRIORITY_NORMAL) - kmem_cache_free(engine->i915->priorities, p); + i915_priolist_free(p); } - intel_write_status_page(engine, - I915_GEM_HWS_INDEX, - intel_engine_last_submit(engine)); - /* Remaining _unready_ requests will be nop'ed when submitted */ execlists->queue_priority_hint = INT_MIN; @@ -1052,14 +1086,12 @@ static void process_csb(struct intel_engine_cs *engine) EXECLISTS_ACTIVE_USER)); rq = port_unpack(port, &count); - GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n", + GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n", engine->name, port->context_id, count, - rq ? rq->global_seqno : 0, rq ? rq->fence.context : 0, rq ? rq->fence.seqno : 0, rq ? hwsp_seqno(rq) : 0, - intel_engine_get_seqno(engine), rq ? rq_prio(rq) : 0); /* Check the context/desc id for this event matches */ @@ -1196,19 +1228,50 @@ static void execlists_submit_request(struct i915_request *request) spin_unlock_irqrestore(&engine->timeline.lock, flags); } -static void execlists_context_destroy(struct intel_context *ce) +static void __execlists_context_fini(struct intel_context *ce) { - GEM_BUG_ON(ce->pin_count); - - if (!ce->state) - return; - - intel_ring_free(ce->ring); + intel_ring_put(ce->ring); GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); i915_gem_object_put(ce->state->obj); } +static void execlists_context_destroy(struct kref *kref) +{ + struct intel_context *ce = container_of(kref, typeof(*ce), ref); + + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (ce->state) + __execlists_context_fini(ce); + + intel_context_free(ce); +} + +static int __context_pin(struct i915_vma *vma) +{ + unsigned int flags; + int err; + + flags = PIN_GLOBAL | PIN_HIGH; + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + + err = i915_vma_pin(vma, 0, 0, flags); + if (err) + return err; + + vma->obj->pin_global++; + vma->obj->mm.dirty = true; + + return 0; +} + +static void __context_unpin(struct i915_vma *vma) +{ + vma->obj->pin_global--; + __i915_vma_unpin(vma); +} + static void execlists_context_unpin(struct intel_context *ce) { struct intel_engine_cs *engine; @@ -1237,41 +1300,19 @@ static void execlists_context_unpin(struct intel_context *ce) intel_ring_unpin(ce->ring); - ce->state->obj->pin_global--; i915_gem_object_unpin_map(ce->state->obj); - i915_vma_unpin(ce->state); - - i915_gem_context_put(ce->gem_context); -} - -static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma) -{ - unsigned int flags; - int err; - - /* - * Clear this page out of any CPU caches for coherent swap-in/out. - * We only want to do this on the first bind so that we do not stall - * on an active context (which by nature is already on the GPU). - */ - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - err = i915_gem_object_set_to_wc_domain(vma->obj, true); - if (err) - return err; - } - - flags = PIN_GLOBAL | PIN_HIGH; - flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - - return i915_vma_pin(vma, 0, 0, flags); + __context_unpin(ce->state); } static void -__execlists_update_reg_state(struct intel_engine_cs *engine, - struct intel_context *ce) +__execlists_update_reg_state(struct intel_context *ce, + struct intel_engine_cs *engine) { - u32 *regs = ce->lrc_reg_state; struct intel_ring *ring = ce->ring; + u32 *regs = ce->lrc_reg_state; + + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); + GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma); regs[CTX_RING_HEAD + 1] = ring->head; @@ -1279,29 +1320,30 @@ __execlists_update_reg_state(struct intel_engine_cs *engine, /* RPCS */ if (engine->class == RENDER_CLASS) - regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915, - &ce->sseu); + regs[CTX_R_PWR_CLK_STATE + 1] = + gen8_make_rpcs(engine->i915, &ce->sseu); } -static struct intel_context * -__execlists_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - struct intel_context *ce) +static int +__execlists_context_pin(struct intel_context *ce, + struct intel_engine_cs *engine) { void *vaddr; int ret; - ret = execlists_context_deferred_alloc(ctx, engine, ce); + GEM_BUG_ON(!ce->gem_context->ppgtt); + + ret = execlists_context_deferred_alloc(ce, engine); if (ret) goto err; GEM_BUG_ON(!ce->state); - ret = __context_pin(ctx, ce->state); + ret = __context_pin(ce->state); if (ret) goto err; vaddr = i915_gem_object_pin_map(ce->state->obj, - i915_coherent_map_type(ctx->i915) | + i915_coherent_map_type(engine->i915) | I915_MAP_OVERRIDE); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); @@ -1312,56 +1354,37 @@ __execlists_context_pin(struct intel_engine_cs *engine, if (ret) goto unpin_map; - ret = i915_gem_context_pin_hw_id(ctx); + ret = i915_gem_context_pin_hw_id(ce->gem_context); if (ret) goto unpin_ring; - intel_lr_context_descriptor_update(ctx, engine, ce); - - GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head)); - + ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; + __execlists_update_reg_state(ce, engine); - __execlists_update_reg_state(engine, ce); - - ce->state->obj->pin_global++; - i915_gem_context_get(ctx); - return ce; + return 0; unpin_ring: intel_ring_unpin(ce->ring); unpin_map: i915_gem_object_unpin_map(ce->state->obj); unpin_vma: - __i915_vma_unpin(ce->state); + __context_unpin(ce->state); err: - ce->pin_count = 0; - return ERR_PTR(ret); + return ret; +} + +static int execlists_context_pin(struct intel_context *ce) +{ + return __execlists_context_pin(ce, ce->engine); } static const struct intel_context_ops execlists_context_ops = { + .pin = execlists_context_pin, .unpin = execlists_context_unpin, .destroy = execlists_context_destroy, }; -static struct intel_context * -execlists_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) -{ - struct intel_context *ce = to_intel_context(ctx, engine); - - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - GEM_BUG_ON(!ctx->ppgtt); - - if (likely(ce->pin_count++)) - return ce; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - - ce->ops = &execlists_context_ops; - - return __execlists_context_pin(engine, ctx, ce); -} - static int gen8_emit_init_breadcrumb(struct i915_request *rq) { u32 *cs; @@ -1387,6 +1410,10 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) *cs++ = rq->fence.seqno - 1; intel_ring_advance(rq, cs); + + /* Record the updated position of the request's payload */ + rq->infix = intel_ring_offset(rq, cs); + return 0; } @@ -1447,7 +1474,7 @@ static int execlists_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!request->hw_context->pin_count); + GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); /* * Flush enough space to reduce the likelihood of waiting after @@ -1465,7 +1492,7 @@ static int execlists_request_alloc(struct i915_request *request) */ /* Unconditionally invalidate GPU caches and TLBs. */ - if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm)) + if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm)) ret = request->engine->emit_flush(request, EMIT_INVALIDATE); else ret = emit_pdps(request); @@ -1732,7 +1759,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) unsigned int i; int ret; - if (GEM_DEBUG_WARN_ON(engine->id != RCS)) + if (GEM_DEBUG_WARN_ON(engine->id != RCS0)) return -EINVAL; switch (INTEL_GEN(engine->i915)) { @@ -1872,12 +1899,31 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine) __tasklet_disable_sync_once(&execlists->tasklet); GEM_BUG_ON(!reset_in_progress(execlists)); + intel_engine_stop_cs(engine); + /* And flush any current direct submission. */ spin_lock_irqsave(&engine->timeline.lock, flags); process_csb(engine); /* drain preemption events */ spin_unlock_irqrestore(&engine->timeline.lock, flags); } +static bool lrc_regs_ok(const struct i915_request *rq) +{ + const struct intel_ring *ring = rq->ring; + const u32 *regs = rq->hw_context->lrc_reg_state; + + /* Quick spot check for the common signs of context corruption */ + + if (regs[CTX_RING_BUFFER_CONTROL + 1] != + (RING_CTL_SIZE(ring->size) | RING_VALID)) + return false; + + if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma)) + return false; + + return true; +} + static void execlists_reset(struct intel_engine_cs *engine, bool stalled) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -1904,15 +1950,25 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled) /* Following the reset, we need to reload the CSB read/write pointers */ reset_csb_pointers(&engine->execlists); - GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n", - engine->name, - rq ? rq->global_seqno : 0, - intel_engine_get_seqno(engine), - yesno(stalled)); if (!rq) goto out_unlock; /* + * If this request hasn't started yet, e.g. it is waiting on a + * semaphore, we need to avoid skipping the request or else we + * break the signaling chain. However, if the context is corrupt + * the request will not restart and we will be stuck with a wedged + * device. It is quite often the case that if we issue a reset + * while the GPU is loading the context image, that the context + * image becomes corrupt. + * + * Otherwise, if we have not started yet, the request should replay + * perfectly and we do not need to flag the result as being erroneous. + */ + if (!i915_request_started(rq) && lrc_regs_ok(rq)) + goto out_unlock; + + /* * If the request was innocent, we leave the request in the ELSP * and will try to replay it on restarting. The context image may * have been corrupted by the reset, in which case we may have @@ -1924,7 +1980,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled) * image back to the expected values to skip over the guilty request. */ i915_reset_request(rq, stalled); - if (!stalled) + if (!stalled && lrc_regs_ok(rq)) goto out_unlock; /* @@ -1942,12 +1998,12 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled) engine->context_size - PAGE_SIZE); } - /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ - rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix); + /* Rerun the request; its payload has been neutered (if guilty). */ + rq->ring->head = intel_ring_wrap(rq->ring, rq->head); intel_ring_update_space(rq->ring); - execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring); - __execlists_update_reg_state(engine, rq->hw_context); + execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring); + __execlists_update_reg_state(rq->hw_context, engine); out_unlock: spin_unlock_irqrestore(&engine->timeline.lock, flags); @@ -1961,13 +2017,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine) * After a GPU reset, we may have requests to replay. Do so now while * we still have the forcewake to be sure that the GPU is not allowed * to sleep before we restart and reload a context. - * */ GEM_BUG_ON(!reset_in_progress(execlists)); if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) execlists->tasklet.func(execlists->tasklet.data); - tasklet_enable(&execlists->tasklet); + if (__tasklet_enable(&execlists->tasklet)) + /* And kick in case we missed a new request submission. */ + tasklet_hi_schedule(&execlists->tasklet); GEM_TRACE("%s: depth->%d\n", engine->name, atomic_read(&execlists->tasklet.count)); } @@ -2017,16 +2074,14 @@ static int gen8_emit_bb_start(struct i915_request *rq, static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | engine->irq_keep_mask)); - POSTING_READ_FW(RING_IMR(engine->mmio_base)); + ENGINE_WRITE(engine, RING_IMR, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + ENGINE_POSTING_READ(engine, RING_IMR); } static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); } static int gen8_emit_flush(struct i915_request *request, u32 mode) @@ -2148,16 +2203,16 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { - /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ - BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); - cs = gen8_emit_ggtt_write(cs, request->fence.seqno, - request->timeline->hwsp_offset); + request->timeline->hwsp_offset, + 0); cs = gen8_emit_ggtt_write(cs, - request->global_seqno, - intel_hws_seqno_address(request->engine)); + intel_engine_next_hangcheck_seqno(request->engine), + I915_GEM_HWS_HANGCHECK_ADDR, + MI_FLUSH_DW_STORE_INDEX); + *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -2180,9 +2235,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) PIPE_CONTROL_CS_STALL); cs = gen8_emit_ggtt_write_rcs(cs, - request->global_seqno, - intel_hws_seqno_address(request->engine), - PIPE_CONTROL_CS_STALL); + intel_engine_next_hangcheck_seqno(request->engine), + I915_GEM_HWS_HANGCHECK_ADDR, + PIPE_CONTROL_STORE_DATA_INDEX); *cs++ = MI_USER_INTERRUPT; *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -2231,7 +2286,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) dev_priv = engine->i915; if (engine->buffer) { - WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); + WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); } if (engine->cleanup) @@ -2259,14 +2314,10 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->unpark = NULL; engine->flags |= I915_ENGINE_SUPPORTS_STATS; - if (engine->i915->preempt_context) + if (!intel_vgpu_active(engine->i915)) + engine->flags |= I915_ENGINE_HAS_SEMAPHORES; + if (engine->preempt_context) engine->flags |= I915_ENGINE_HAS_PREEMPTION; - - engine->i915->caps.scheduler = - I915_SCHEDULER_CAP_ENABLED | - I915_SCHEDULER_CAP_PRIORITY; - if (intel_engine_has_preemption(engine)) - engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION; } static void @@ -2279,7 +2330,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->reset.reset = execlists_reset; engine->reset.finish = execlists_reset_finish; - engine->context_pin = execlists_context_pin; + engine->cops = &execlists_context_ops; engine->request_alloc = execlists_request_alloc; engine->emit_flush = gen8_emit_flush; @@ -2309,11 +2360,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) if (INTEL_GEN(engine->i915) < 11) { const u8 irq_shifts[] = { - [RCS] = GEN8_RCS_IRQ_SHIFT, - [BCS] = GEN8_BCS_IRQ_SHIFT, - [VCS] = GEN8_VCS1_IRQ_SHIFT, - [VCS2] = GEN8_VCS2_IRQ_SHIFT, - [VECS] = GEN8_VECS_IRQ_SHIFT, + [RCS0] = GEN8_RCS_IRQ_SHIFT, + [BCS0] = GEN8_BCS_IRQ_SHIFT, + [VCS0] = GEN8_VCS0_IRQ_SHIFT, + [VCS1] = GEN8_VCS1_IRQ_SHIFT, + [VECS0] = GEN8_VECS_IRQ_SHIFT, }; shift = irq_shifts[engine->id]; @@ -2348,6 +2399,7 @@ static int logical_ring_init(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; struct intel_engine_execlists * const execlists = &engine->execlists; + u32 base = engine->mmio_base; int ret; ret = intel_engine_init_common(engine); @@ -2357,23 +2409,19 @@ static int logical_ring_init(struct intel_engine_cs *engine) intel_engine_init_workarounds(engine); if (HAS_LOGICAL_RING_ELSQ(i915)) { - execlists->submit_reg = i915->regs + - i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine)); - execlists->ctrl_reg = i915->regs + - i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine)); + execlists->submit_reg = i915->uncore.regs + + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); + execlists->ctrl_reg = i915->uncore.regs + + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); } else { - execlists->submit_reg = i915->regs + - i915_mmio_reg_offset(RING_ELSP(engine)); + execlists->submit_reg = i915->uncore.regs + + i915_mmio_reg_offset(RING_ELSP(base)); } execlists->preempt_complete_status = ~0u; - if (i915->preempt_context) { - struct intel_context *ce = - to_intel_context(i915->preempt_context, engine); - + if (engine->preempt_context) execlists->preempt_complete_status = - upper_32_bits(ce->lrc_desc); - } + upper_32_bits(engine->preempt_context->lrc_desc); execlists->csb_status = &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; @@ -2592,13 +2640,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) } static void execlists_init_reg_state(u32 *regs, - struct i915_gem_context *ctx, + struct intel_context *ce, struct intel_engine_cs *engine, struct intel_ring *ring) { - struct drm_i915_private *dev_priv = engine->i915; - u32 base = engine->mmio_base; + struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt; bool rcs = engine->class == RENDER_CLASS; + u32 base = engine->mmio_base; /* A context is actually a big batch buffer with several * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The @@ -2610,10 +2658,10 @@ static void execlists_init_reg_state(u32 *regs, regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | MI_LRI_FORCE_POSTED; - CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine), + CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); - if (INTEL_GEN(dev_priv) < 11) { + if (INTEL_GEN(engine->i915) < 11) { regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); @@ -2668,33 +2716,33 @@ static void execlists_init_reg_state(u32 *regs, CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); - if (i915_vm_is_48bit(&ctx->ppgtt->vm)) { + if (i915_vm_is_4lvl(&ppgtt->vm)) { /* 64b PPGTT (48bit canonical) * PDP0_DESCRIPTOR contains the base address to PML4 and * other PDP Descriptors are ignored. */ - ASSIGN_CTX_PML4(ctx->ppgtt, regs); + ASSIGN_CTX_PML4(ppgtt, regs); } else { - ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3); - ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2); - ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1); - ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0); + ASSIGN_CTX_PDP(ppgtt, regs, 3); + ASSIGN_CTX_PDP(ppgtt, regs, 2); + ASSIGN_CTX_PDP(ppgtt, regs, 1); + ASSIGN_CTX_PDP(ppgtt, regs, 0); } if (rcs) { regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); - i915_oa_init_reg_state(engine, ctx, regs); + i915_oa_init_reg_state(engine, ce, regs); } regs[CTX_END] = MI_BATCH_BUFFER_END; - if (INTEL_GEN(dev_priv) >= 10) + if (INTEL_GEN(engine->i915) >= 10) regs[CTX_END] |= BIT(0); } static int -populate_lr_context(struct i915_gem_context *ctx, +populate_lr_context(struct intel_context *ce, struct drm_i915_gem_object *ctx_obj, struct intel_engine_cs *engine, struct intel_ring *ring) @@ -2703,19 +2751,12 @@ populate_lr_context(struct i915_gem_context *ctx, u32 *regs; int ret; - ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true); - if (ret) { - DRM_DEBUG_DRIVER("Could not set to CPU domain\n"); - return ret; - } - vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); return ret; } - ctx_obj->mm.dirty = true; if (engine->default_state) { /* @@ -2740,23 +2781,35 @@ populate_lr_context(struct i915_gem_context *ctx, /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ regs = vaddr + LRC_STATE_PN * PAGE_SIZE; - execlists_init_reg_state(regs, ctx, engine, ring); + execlists_init_reg_state(regs, ce, engine, ring); if (!engine->default_state) regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); - if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11) + if (ce->gem_context == engine->i915->preempt_context && + INTEL_GEN(engine->i915) < 11) regs[CTX_CONTEXT_CONTROL + 1] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); + ret = 0; err_unpin_ctx: + __i915_gem_object_flush_map(ctx_obj, + LRC_HEADER_PAGES * PAGE_SIZE, + engine->context_size); i915_gem_object_unpin_map(ctx_obj); return ret; } -static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - struct intel_context *ce) +static struct i915_timeline *get_timeline(struct i915_gem_context *ctx) +{ + if (ctx->timeline) + return i915_timeline_get(ctx->timeline); + else + return i915_timeline_create(ctx->i915, NULL); +} + +static int execlists_context_deferred_alloc(struct intel_context *ce, + struct intel_engine_cs *engine) { struct drm_i915_gem_object *ctx_obj; struct i915_vma *vma; @@ -2776,30 +2829,32 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, */ context_size += LRC_HEADER_PAGES * PAGE_SIZE; - ctx_obj = i915_gem_object_create(ctx->i915, context_size); + ctx_obj = i915_gem_object_create(engine->i915, context_size); if (IS_ERR(ctx_obj)) return PTR_ERR(ctx_obj); - vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL); + vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto error_deref_obj; } - timeline = i915_timeline_create(ctx->i915, ctx->name, NULL); + timeline = get_timeline(ce->gem_context); if (IS_ERR(timeline)) { ret = PTR_ERR(timeline); goto error_deref_obj; } - ring = intel_engine_create_ring(engine, timeline, ctx->ring_size); + ring = intel_engine_create_ring(engine, + timeline, + ce->gem_context->ring_size); i915_timeline_put(timeline); if (IS_ERR(ring)) { ret = PTR_ERR(ring); goto error_deref_obj; } - ret = populate_lr_context(ctx, ctx_obj, engine, ring); + ret = populate_lr_context(ce, ctx_obj, engine, ring); if (ret) { DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); goto error_ring_free; @@ -2811,7 +2866,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, return 0; error_ring_free: - intel_ring_free(ring); + intel_ring_put(ring); error_deref_obj: i915_gem_object_put(ctx_obj); return ret; @@ -2819,9 +2874,8 @@ error_deref_obj: void intel_lr_context_resume(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; struct i915_gem_context *ctx; - enum intel_engine_id id; + struct intel_context *ce; /* * Because we emit WA_TAIL_DWORDS there may be a disparity @@ -2835,17 +2889,10 @@ void intel_lr_context_resume(struct drm_i915_private *i915) * simplicity, we just zero everything out. */ list_for_each_entry(ctx, &i915->contexts.list, link) { - for_each_engine(engine, i915, id) { - struct intel_context *ce = - to_intel_context(ctx, engine); - - if (!ce->state) - continue; - + list_for_each_entry(ce, &ctx->active_engines, active_link) { + GEM_BUG_ON(!ce->ring); intel_ring_reset(ce->ring, 0); - - if (ce->pin_count) /* otherwise done in context_pin */ - __execlists_update_reg_state(engine, ce); + __execlists_update_reg_state(ce, ce->engine); } } } diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index f1aec8a6986f..92642ab91472 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -28,20 +28,20 @@ #include "i915_gem_context.h" /* Execlists regs */ -#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230) -#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234) -#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4) -#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244) +#define RING_ELSP(base) _MMIO((base) + 0x230) +#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) +#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) +#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) -#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) +#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) #define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2) -#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370) -#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8) -#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4) -#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0) -#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510) -#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550) +#define RING_CONTEXT_STATUS_BUF_BASE(base) _MMIO((base) + 0x370) +#define RING_CONTEXT_STATUS_BUF_LO(base, i) _MMIO((base) + 0x370 + (i) * 8) +#define RING_CONTEXT_STATUS_BUF_HI(base, i) _MMIO((base) + 0x370 + (i) * 8 + 4) +#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) +#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) +#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) #define EL_CTRL_LOAD (1 << 0) /* The docs specify that the write pointer wraps around after 5h, "After status diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 322bdddda164..8d202b13e24f 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -452,6 +452,14 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n"); } +void lspcon_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len) +{ + /* FIXME implement this */ +} + void lspcon_set_infoframes(struct intel_encoder *encoder, bool enable, const struct intel_crtc_state *crtc_state, @@ -470,6 +478,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, return; } + /* FIXME precompute infoframes */ + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, conn_state->connector, adjusted_mode); @@ -504,9 +514,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, buf, ret); } -bool lspcon_infoframe_enabled(struct intel_encoder *encoder, +u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { + /* FIXME actually read this from the hw */ return enc_to_intel_lspcon(&encoder->base)->active; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index b4aa49768e90..34dd2d71814b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -152,24 +152,17 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET; val = I915_READ(PP_ON_DELAYS(0)); - pps->port = (val & PANEL_PORT_SELECT_MASK) >> - PANEL_PORT_SELECT_SHIFT; - pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >> - PANEL_POWER_UP_DELAY_SHIFT; - pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >> - PANEL_LIGHT_ON_DELAY_SHIFT; + pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val); + pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val); + pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val); val = I915_READ(PP_OFF_DELAYS(0)); - pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >> - PANEL_POWER_DOWN_DELAY_SHIFT; - pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >> - PANEL_LIGHT_OFF_DELAY_SHIFT; + pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val); + pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val); val = I915_READ(PP_DIVISOR(0)); - pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >> - PP_REFERENCE_DIVIDER_SHIFT; - val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >> - PANEL_POWER_CYCLE_DELAY_SHIFT; + pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val); + val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val); /* * Remove the BSpec specified +1 (100ms) offset that accounts for a * too short power-cycle delay due to the asynchronous programming of @@ -209,16 +202,19 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, val |= PANEL_POWER_RESET; I915_WRITE(PP_CONTROL(0), val); - I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) | - (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) | - (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT)); - I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) | - (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT)); + I915_WRITE(PP_ON_DELAYS(0), + REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | + REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); - val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT; - val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) << - PANEL_POWER_CYCLE_DELAY_SHIFT; - I915_WRITE(PP_DIVISOR(0), val); + I915_WRITE(PP_OFF_DELAYS(0), + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | + REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); + + I915_WRITE(PP_DIVISOR(0), + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, + DIV_ROUND_UP(pps->t4, 1000) + 1)); } static void intel_pre_enable_lvds(struct intel_encoder *encoder, @@ -315,7 +311,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder, I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON); POSTING_READ(lvds_encoder->reg); - if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000)) + if (intel_wait_for_register(&dev_priv->uncore, + PP_STATUS(0), PP_ON, PP_ON, 5000)) DRM_ERROR("timed out waiting for panel to power on\n"); intel_panel_enable_backlight(pipe_config, conn_state); @@ -329,7 +326,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON); - if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000)) + if (intel_wait_for_register(&dev_priv->uncore, + PP_STATUS(0), PP_ON, 0, 1000)) DRM_ERROR("timed out waiting for panel to power off\n"); I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); @@ -746,20 +744,21 @@ static const struct dmi_system_id intel_dual_link_lvds[] = { { } /* terminating entry */ }; -struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev) +struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) { - struct intel_encoder *intel_encoder; + struct intel_encoder *encoder; - for_each_intel_encoder(dev, intel_encoder) - if (intel_encoder->type == INTEL_OUTPUT_LVDS) - return intel_encoder; + for_each_intel_encoder(&dev_priv->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_LVDS) + return encoder; + } return NULL; } -bool intel_is_dual_link_lvds(struct drm_device *dev) +bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv) { - struct intel_encoder *encoder = intel_get_lvds_encoder(dev); + struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv); return encoder && to_lvds_encoder(&encoder->base)->is_dual_link; } @@ -813,7 +812,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) struct intel_connector *intel_connector; struct drm_connector *connector; struct drm_encoder *encoder; - struct drm_display_mode *scan; /* *modes, *bios_mode; */ struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *downclock_mode = NULL; struct edid *edid; @@ -952,30 +950,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) } intel_connector->edid = edid; - list_for_each_entry(scan, &connector->probed_modes, head) { - if (scan->type & DRM_MODE_TYPE_PREFERRED) { - DRM_DEBUG_KMS("using preferred mode from EDID: "); - drm_mode_debug_printmodeline(scan); - - fixed_mode = drm_mode_duplicate(dev, scan); - if (fixed_mode) - goto out; - } - } + fixed_mode = intel_panel_edid_fixed_mode(intel_connector); + if (fixed_mode) + goto out; /* Failed to get EDID, what about VBT? */ - if (dev_priv->vbt.lfp_lvds_vbt_mode) { - DRM_DEBUG_KMS("using mode from VBT: "); - drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode); - - fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); - if (fixed_mode) { - fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; - goto out; - } - } + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); + if (fixed_mode) + goto out; /* * If we didn't get EDID, try checking if the panel is already turned diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 331e7a678fb7..274ba78500c0 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -252,7 +252,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, { bool result = false; - if (IS_ICELAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { table->size = ARRAY_SIZE(icelake_mocs_table); table->table = icelake_mocs_table; table->n_entries = GEN11_NUM_MOCS_ENTRIES; @@ -288,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv, static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) { switch (engine_id) { - case RCS: + case RCS0: return GEN9_GFX_MOCS(index); - case VCS: + case VCS0: return GEN9_MFX0_MOCS(index); - case BCS: + case BCS0: return GEN9_BLT_MOCS(index); - case VECS: + case VECS0: return GEN9_VEBOX_MOCS(index); - case VCS2: + case VCS1: return GEN9_MFX1_MOCS(index); - case VCS3: + case VCS2: return GEN11_MFX2_MOCS(index); default: MISSING_CASE(engine_id); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c0df1dbb0069..a882b8d42bd9 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -236,7 +236,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, static struct i915_request *alloc_request(struct intel_overlay *overlay) { struct drm_i915_private *dev_priv = overlay->i915; - struct intel_engine_cs *engine = dev_priv->engine[RCS]; + struct intel_engine_cs *engine = dev_priv->engine[RCS0]; return i915_request_alloc(engine, dev_priv->kernel_context); } diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index beca98d2b035..47cd4a338db6 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -46,27 +46,26 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, drm_mode_set_crtcinfo(adjusted_mode, 0); } -/** - * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID - * @dev_priv: i915 device instance - * @fixed_mode : panel native mode - * @connector: LVDS/eDP connector - * - * Return downclock_avail - * Find the reduced downclock for LVDS/eDP in EDID. - */ -struct drm_display_mode * -intel_find_panel_downclock(struct drm_i915_private *dev_priv, - struct drm_display_mode *fixed_mode, - struct drm_connector *connector) +static bool is_downclock_mode(const struct drm_display_mode *downclock_mode, + const struct drm_display_mode *fixed_mode) { - struct drm_display_mode *scan, *tmp_mode; - int temp_downclock; + return drm_mode_match(downclock_mode, fixed_mode, + DRM_MODE_MATCH_TIMINGS | + DRM_MODE_MATCH_FLAGS | + DRM_MODE_MATCH_3D_FLAGS) && + downclock_mode->clock < fixed_mode->clock; +} - temp_downclock = fixed_mode->clock; - tmp_mode = NULL; +struct drm_display_mode * +intel_panel_edid_downclock_mode(struct intel_connector *connector, + const struct drm_display_mode *fixed_mode) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + const struct drm_display_mode *scan, *best_mode = NULL; + struct drm_display_mode *downclock_mode; + int best_clock = fixed_mode->clock; - list_for_each_entry(scan, &connector->probed_modes, head) { + list_for_each_entry(scan, &connector->base.probed_modes, head) { /* * If one mode has the same resolution with the fixed_panel * mode while they have the different refresh rate, it means @@ -74,29 +73,98 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv, * case we can set the different FPx0/1 to dynamically select * between low and high frequency. */ - if (scan->hdisplay == fixed_mode->hdisplay && - scan->hsync_start == fixed_mode->hsync_start && - scan->hsync_end == fixed_mode->hsync_end && - scan->htotal == fixed_mode->htotal && - scan->vdisplay == fixed_mode->vdisplay && - scan->vsync_start == fixed_mode->vsync_start && - scan->vsync_end == fixed_mode->vsync_end && - scan->vtotal == fixed_mode->vtotal) { - if (scan->clock < temp_downclock) { - /* - * The downclock is already found. But we - * expect to find the lower downclock. - */ - temp_downclock = scan->clock; - tmp_mode = scan; - } + if (is_downclock_mode(scan, fixed_mode) && + scan->clock < best_clock) { + /* + * The downclock is already found. But we + * expect to find the lower downclock. + */ + best_clock = scan->clock; + best_mode = scan; } } - if (temp_downclock < fixed_mode->clock) - return drm_mode_duplicate(&dev_priv->drm, tmp_mode); - else + if (!best_mode) + return NULL; + + downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode); + if (!downclock_mode) return NULL; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(downclock_mode); + + return downclock_mode; +} + +struct drm_display_mode * +intel_panel_edid_fixed_mode(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + const struct drm_display_mode *scan; + struct drm_display_mode *fixed_mode; + + if (list_empty(&connector->base.probed_modes)) + return NULL; + + /* prefer fixed mode from EDID if available */ + list_for_each_entry(scan, &connector->base.probed_modes, head) { + if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0) + continue; + + fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan); + if (!fixed_mode) + return NULL; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(fixed_mode); + + return fixed_mode; + } + + scan = list_first_entry(&connector->base.probed_modes, + typeof(*scan), head); + + fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan); + if (!fixed_mode) + return NULL; + + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(fixed_mode); + + return fixed_mode; +} + +struct drm_display_mode * +intel_panel_vbt_fixed_mode(struct intel_connector *connector) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct drm_display_info *info = &connector->base.display_info; + struct drm_display_mode *fixed_mode; + + if (!dev_priv->vbt.lfp_lvds_vbt_mode) + return NULL; + + fixed_mode = drm_mode_duplicate(&dev_priv->drm, + dev_priv->vbt.lfp_lvds_vbt_mode); + if (!fixed_mode) + return NULL; + + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ", + connector->base.base.id, connector->base.name); + drm_mode_debug_printmodeline(fixed_mode); + + info->width_mm = fixed_mode->width_mm; + info->height_mm = fixed_mode->height_mm; + + return fixed_mode; } /* adjusted_mode has been preset to be the panel's fixed mode */ @@ -1894,15 +1962,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) panel->backlight.set = bxt_set_backlight; panel->backlight.get = bxt_get_backlight; panel->backlight.hz_to_pwm = bxt_hz_to_pwm; - } else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) { + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) { panel->backlight.setup = cnp_setup_backlight; panel->backlight.enable = cnp_enable_backlight; panel->backlight.disable = cnp_disable_backlight; panel->backlight.set = bxt_set_backlight; panel->backlight.get = bxt_get_backlight; panel->backlight.hz_to_pwm = cnp_hz_to_pwm; - } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) || - HAS_PCH_KBP(dev_priv)) { + } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) { panel->backlight.setup = lpt_setup_backlight; panel->backlight.enable = lpt_enable_backlight; panel->backlight.disable = lpt_disable_backlight; diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index a8554dc4f196..0b1378f0bff7 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -31,16 +31,20 @@ #include "intel_drv.h" static const char * const pipe_crc_sources[] = { - "none", - "plane1", - "plane2", - "pf", - "pipe", - "TV", - "DP-B", - "DP-C", - "DP-D", - "auto", + [INTEL_PIPE_CRC_SOURCE_NONE] = "none", + [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1", + [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2", + [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3", + [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4", + [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5", + [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6", + [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7", + [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe", + [INTEL_PIPE_CRC_SOURCE_TV] = "TV", + [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B", + [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C", + [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D", + [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto", }; static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, @@ -192,8 +196,6 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum intel_pipe_crc_source *source, u32 *val) { - bool need_stable_symbols = false; - if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source); if (ret) @@ -209,56 +211,23 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, return -EINVAL; *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE; break; - case INTEL_PIPE_CRC_SOURCE_DP_B: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_C: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X; - need_stable_symbols = true; - break; - case INTEL_PIPE_CRC_SOURCE_DP_D: - if (!IS_G4X(dev_priv)) - return -EINVAL; - *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X; - need_stable_symbols = true; - break; case INTEL_PIPE_CRC_SOURCE_NONE: *val = 0; break; default: + /* + * The DP CRC source doesn't work on g4x. + * It can be made to work to some degree by selecting + * the correct CRC source before the port is enabled, + * and not touching the CRC source bits again until + * the port is disabled. But even then the bits + * eventually get stuck and a reboot is needed to get + * working CRCs on the pipe again. Let's simply + * refuse to use DP CRCs on g4x. + */ return -EINVAL; } - /* - * When the pipe CRC tap point is after the transcoders we need - * to tweak symbol-level features to produce a deterministic series of - * symbols for a given frame. We need to reset those features only once - * a frame (instead of every nth symbol): - * - DC-balance: used to ensure a better clock recovery from the data - * link (SDVO) - * - DisplayPort scrambling: used for EMI reduction - */ - if (need_stable_symbols) { - u32 tmp = I915_READ(PORT_DFT2_G4X); - - WARN_ON(!IS_G4X(dev_priv)); - - I915_WRITE(PORT_DFT_I9XX, - I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET); - - if (pipe == PIPE_A) - tmp |= PIPE_A_SCRAMBLE_RESET; - else - tmp |= PIPE_B_SCRAMBLE_RESET; - - I915_WRITE(PORT_DFT2_G4X, tmp); - } - return 0; } @@ -283,24 +252,6 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) tmp &= ~DC_BALANCE_RESET_VLV; I915_WRITE(PORT_DFT2_G4X, tmp); - -} - -static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 tmp = I915_READ(PORT_DFT2_G4X); - - if (pipe == PIPE_A) - tmp &= ~PIPE_A_SCRAMBLE_RESET; - else - tmp &= ~PIPE_B_SCRAMBLE_RESET; - I915_WRITE(PORT_DFT2_G4X, tmp); - - if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) { - I915_WRITE(PORT_DFT_I9XX, - I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET); - } } static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, @@ -329,19 +280,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, return 0; } -static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv, - bool enable) +static void +intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) { - struct drm_device *dev = &dev_priv->drm; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *pipe_config; struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; - int ret = 0; + int ret; drm_modeset_acquire_init(&ctx, 0); - state = drm_atomic_state_alloc(dev); + state = drm_atomic_state_alloc(&dev_priv->drm); if (!state) { ret = -ENOMEM; goto unlock; @@ -356,17 +306,10 @@ retry: goto put_state; } - if (HAS_IPS(dev_priv)) { - /* - * When IPS gets enabled, the pipe CRC changes. Since IPS gets - * enabled and disabled dynamically based on package C states, - * user space can't make reliable use of the CRCs, so let's just - * completely disable it. - */ - pipe_config->ips_force_disable = enable; - } + pipe_config->base.mode_changed = pipe_config->has_psr; + pipe_config->crc_enabled = enable; - if (IS_HASWELL(dev_priv)) { + if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) { pipe_config->pch_pfit.force_thru = enable; if (pipe_config->cpu_transcoder == TRANSCODER_EDP && pipe_config->pch_pfit.enabled != enable) @@ -392,11 +335,10 @@ unlock: static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, enum intel_pipe_crc_source *source, - u32 *val, - bool set_wa) + u32 *val) { if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) - *source = INTEL_PIPE_CRC_SOURCE_PF; + *source = INTEL_PIPE_CRC_SOURCE_PIPE; switch (*source) { case INTEL_PIPE_CRC_SOURCE_PLANE1: @@ -405,11 +347,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, case INTEL_PIPE_CRC_SOURCE_PLANE2: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; break; - case INTEL_PIPE_CRC_SOURCE_PF: - if (set_wa && (IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv)) && pipe == PIPE_A) - hsw_pipe_A_crc_wa(dev_priv, true); - + case INTEL_PIPE_CRC_SOURCE_PIPE: *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; break; case INTEL_PIPE_CRC_SOURCE_NONE: @@ -422,10 +360,52 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, return 0; } +static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, + enum pipe pipe, + enum intel_pipe_crc_source *source, + u32 *val) +{ + if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) + *source = INTEL_PIPE_CRC_SOURCE_PIPE; + + switch (*source) { + case INTEL_PIPE_CRC_SOURCE_PLANE1: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE2: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE3: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE4: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE5: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE6: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PLANE7: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_PIPE: + *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL; + break; + case INTEL_PIPE_CRC_SOURCE_NONE: + *val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, enum pipe pipe, - enum intel_pipe_crc_source *source, u32 *val, - bool set_wa) + enum intel_pipe_crc_source *source, u32 *val) { if (IS_GEN(dev_priv, 2)) return i8xx_pipe_crc_ctl_reg(source, val); @@ -435,8 +415,10 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv, return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val); else if (IS_GEN_RANGE(dev_priv, 5, 6)) return ilk_pipe_crc_ctl_reg(source, val); + else if (INTEL_GEN(dev_priv) < 9) + return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val); else - return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa); + return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val); } static int @@ -486,9 +468,6 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv, switch (source) { case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_TV: - case INTEL_PIPE_CRC_SOURCE_DP_B: - case INTEL_PIPE_CRC_SOURCE_DP_C: - case INTEL_PIPE_CRC_SOURCE_DP_D: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: @@ -532,7 +511,25 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv, case INTEL_PIPE_CRC_SOURCE_PIPE: case INTEL_PIPE_CRC_SOURCE_PLANE1: case INTEL_PIPE_CRC_SOURCE_PLANE2: - case INTEL_PIPE_CRC_SOURCE_PF: + case INTEL_PIPE_CRC_SOURCE_NONE: + return 0; + default: + return -EINVAL; + } +} + +static int skl_crc_source_valid(struct drm_i915_private *dev_priv, + const enum intel_pipe_crc_source source) +{ + switch (source) { + case INTEL_PIPE_CRC_SOURCE_PIPE: + case INTEL_PIPE_CRC_SOURCE_PLANE1: + case INTEL_PIPE_CRC_SOURCE_PLANE2: + case INTEL_PIPE_CRC_SOURCE_PLANE3: + case INTEL_PIPE_CRC_SOURCE_PLANE4: + case INTEL_PIPE_CRC_SOURCE_PLANE5: + case INTEL_PIPE_CRC_SOURCE_PLANE6: + case INTEL_PIPE_CRC_SOURCE_PLANE7: case INTEL_PIPE_CRC_SOURCE_NONE: return 0; default: @@ -552,8 +549,10 @@ intel_is_valid_crc_source(struct drm_i915_private *dev_priv, return vlv_crc_source_valid(dev_priv, source); else if (IS_GEN_RANGE(dev_priv, 5, 6)) return ilk_crc_source_valid(dev_priv, source); - else + else if (INTEL_GEN(dev_priv) < 9) return ivb_crc_source_valid(dev_priv, source); + else + return skl_crc_source_valid(dev_priv, source); } const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc, @@ -592,6 +591,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) intel_wakeref_t wakeref; u32 val = 0; /* shut up gcc */ int ret = 0; + bool enable; if (display_crc_ctl_parse_source(source_name, &source) < 0) { DRM_DEBUG_DRIVER("unknown source %s\n", source_name); @@ -605,7 +605,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) return -EIO; } - ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true); + enable = source != INTEL_PIPE_CRC_SOURCE_NONE; + if (enable) + intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true); + + ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val); if (ret != 0) goto out; @@ -614,18 +618,16 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name) POSTING_READ(PIPE_CRC_CTL(crtc->index)); if (!source) { - if (IS_G4X(dev_priv)) - g4x_undo_pipe_scramble_reset(dev_priv, crtc->index); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_undo_pipe_scramble_reset(dev_priv, crtc->index); - else if ((IS_HASWELL(dev_priv) || - IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A) - hsw_pipe_A_crc_wa(dev_priv, false); } pipe_crc->skipped = 0; out: + if (!enable) + intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false); + intel_display_power_put(dev_priv, power_domain, wakeref); return ret; @@ -641,7 +643,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc) if (!crtc->crc.opened) return; - if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0) + if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0) return; /* Don't need pipe_crc->lock here, IRQs are not generated. */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 14ac31888c67..9a6eb2ef5f48 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -338,12 +338,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (enable) val |= DSP_MAXFIFO_PM5_ENABLE; else val &= ~DSP_MAXFIFO_PM5_ENABLE; - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); mutex_unlock(&dev_priv->pcu_lock); } @@ -3624,7 +3624,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) < 11) return enabled_slices; - if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE) + /* + * FIXME: for now we'll only ever use 1 slice; pretend that we have + * only that 1 slice enabled until we have a proper way for on-demand + * toggling of the second slice. + */ + if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE) enabled_slices++; return enabled_slices; @@ -3919,12 +3924,43 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width; } -static unsigned int skl_cursor_allocation(int num_active) +static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane); +static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, + int level, + const struct skl_wm_params *wp, + const struct skl_wm_level *result_prev, + struct skl_wm_level *result /* out */); + +static unsigned int +skl_cursor_allocation(const struct intel_crtc_state *crtc_state, + int num_active) { - if (num_active == 1) - return 32; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + int level, max_level = ilk_wm_max_level(dev_priv); + struct skl_wm_level wm = {}; + int ret, min_ddb_alloc = 0; + struct skl_wm_params wp; + + ret = skl_compute_wm_params(crtc_state, 256, + drm_format_info(DRM_FORMAT_ARGB8888), + DRM_FORMAT_MOD_LINEAR, + DRM_MODE_ROTATE_0, + crtc_state->pixel_rate, &wp, 0); + WARN_ON(ret); + + for (level = 0; level <= max_level; level++) { + skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm); + if (wm.min_ddb_alloc == U16_MAX) + break; - return 8; + min_ddb_alloc = wm.min_ddb_alloc; + } + + return max(num_active == 1 ? 32 : 8, min_ddb_alloc); } static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv, @@ -4308,7 +4344,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; - struct skl_plane_wm *wm; u16 alloc_size, start = 0; u16 total[I915_MAX_PLANES] = {}; u16 uv_total[I915_MAX_PLANES] = {}; @@ -4349,7 +4384,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, return 0; /* Allocate fixed number of blocks for cursor. */ - total[PLANE_CURSOR] = skl_cursor_allocation(num_active); + total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active); alloc_size -= total[PLANE_CURSOR]; cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - total[PLANE_CURSOR]; @@ -4365,15 +4400,23 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) { blocks = 0; for_each_plane_id_on_crtc(intel_crtc, plane_id) { - if (plane_id == PLANE_CURSOR) + const struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; + + if (plane_id == PLANE_CURSOR) { + if (WARN_ON(wm->wm[level].min_ddb_alloc > + total[PLANE_CURSOR])) { + blocks = U32_MAX; + break; + } continue; + } - wm = &cstate->wm.skl.optimal.planes[plane_id]; blocks += wm->wm[level].min_ddb_alloc; blocks += wm->uv_wm[level].min_ddb_alloc; } - if (blocks < alloc_size) { + if (blocks <= alloc_size) { alloc_size -= blocks; break; } @@ -4392,6 +4435,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * proportional to its relative data rate. */ for_each_plane_id_on_crtc(intel_crtc, plane_id) { + const struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; u64 rate; u16 extra; @@ -4405,8 +4450,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, if (total_data_rate == 0) break; - wm = &cstate->wm.skl.optimal.planes[plane_id]; - rate = plane_data_rate[plane_id]; extra = min_t(u16, alloc_size, DIV64_U64_ROUND_UP(alloc_size * rate, @@ -4431,14 +4474,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, /* Set the actual DDB start/end points for each plane */ start = alloc->start; for_each_plane_id_on_crtc(intel_crtc, plane_id) { - struct skl_ddb_entry *plane_alloc, *uv_plane_alloc; + struct skl_ddb_entry *plane_alloc = + &cstate->wm.skl.plane_ddb_y[plane_id]; + struct skl_ddb_entry *uv_plane_alloc = + &cstate->wm.skl.plane_ddb_uv[plane_id]; if (plane_id == PLANE_CURSOR) continue; - plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id]; - uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id]; - /* Gen11+ uses a separate plane for UV watermarks */ WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]); @@ -4464,8 +4507,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, */ for (level++; level <= ilk_wm_max_level(dev_priv); level++) { for_each_plane_id_on_crtc(intel_crtc, plane_id) { - wm = &cstate->wm.skl.optimal.planes[plane_id]; - memset(&wm->wm[level], 0, sizeof(wm->wm[level])); + struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; + + /* + * We only disable the watermarks for each plane if + * they exceed the ddb allocation of said plane. This + * is done so that we don't end up touching cursor + * watermarks needlessly when some other plane reduces + * our max possible watermark level. + * + * Bspec has this to say about the PLANE_WM enable bit: + * "All the watermarks at this level for all enabled + * planes must be enabled before the level will be used." + * So this is actually safe to do. + */ + if (wm->wm[level].min_ddb_alloc > total[plane_id] || + wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id]) + memset(&wm->wm[level], 0, sizeof(wm->wm[level])); + + /* + * Wa_1408961008:icl + * Underruns with WM1+ disabled + */ + if (IS_ICELAKE(dev_priv) && + level == 1 && wm->wm[0].plane_en) { + wm->wm[level].plane_res_b = wm->wm[0].plane_res_b; + wm->wm[level].plane_res_l = wm->wm[0].plane_res_l; + wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; + } } } @@ -4474,7 +4544,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, * don't have enough DDB blocks for it. */ for_each_plane_id_on_crtc(intel_crtc, plane_id) { - wm = &cstate->wm.skl.optimal.planes[plane_id]; + struct skl_plane_wm *wm = + &cstate->wm.skl.optimal.planes[plane_id]; + if (wm->trans_wm.plane_res_b >= total[plane_id]) memset(&wm->trans_wm, 0, sizeof(wm->trans_wm)); } @@ -4568,57 +4640,45 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, } static int -skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, - struct skl_wm_params *wp, int color_plane) +skl_compute_wm_params(const struct intel_crtc_state *crtc_state, + int width, const struct drm_format_info *format, + u64 modifier, unsigned int rotation, + u32 plane_pixel_rate, struct skl_wm_params *wp, + int color_plane) { - struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_plane_state *pstate = &intel_pstate->base; - const struct drm_framebuffer *fb = pstate->fb; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 interm_pbpl; /* only planar format has two planes */ - if (color_plane == 1 && !is_planar_yuv_format(fb->format->format)) { + if (color_plane == 1 && !is_planar_yuv_format(format->format)) { DRM_DEBUG_KMS("Non planar format have single plane\n"); return -EINVAL; } - wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED || - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; - wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = is_planar_yuv_format(fb->format->format); - - if (plane->id == PLANE_CURSOR) { - wp->width = intel_pstate->base.crtc_w; - } else { - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - wp->width = drm_rect_width(&intel_pstate->base.src) >> 16; - } + wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED || + modifier == I915_FORMAT_MOD_Yf_TILED || + modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; + wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || + modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->is_planar = is_planar_yuv_format(format->format); + wp->width = width; if (color_plane == 1 && wp->is_planar) wp->width /= 2; - wp->cpp = fb->format->cpp[color_plane]; - wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, - intel_pstate); + wp->cpp = format->cpp[color_plane]; + wp->plane_pixel_rate = plane_pixel_rate; if (INTEL_GEN(dev_priv) >= 11 && - fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) + modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1) wp->dbuf_block_size = 256; else wp->dbuf_block_size = 512; - if (drm_rotation_90_or_270(pstate->rotation)) { - + if (drm_rotation_90_or_270(rotation)) { switch (wp->cpp) { case 1: wp->y_min_scanlines = 16; @@ -4663,12 +4723,40 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, wp->plane_blocks_per_line); + wp->linetime_us = fixed16_to_u32_round_up( - intel_get_linetime_us(cstate)); + intel_get_linetime_us(crtc_state)); return 0; } +static int +skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + struct skl_wm_params *wp, int color_plane) +{ + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + const struct drm_framebuffer *fb = plane_state->base.fb; + int width; + + if (plane->id == PLANE_CURSOR) { + width = plane_state->base.crtc_w; + } else { + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + width = drm_rect_width(&plane_state->base.src) >> 16; + } + + return skl_compute_wm_params(crtc_state, width, + fb->format, fb->modifier, + plane_state->base.rotation, + skl_adjusted_plane_pixel_rate(crtc_state, plane_state), + wp, color_plane); +} + static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) { if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -4679,14 +4767,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level) } static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, int level, const struct skl_wm_params *wp, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { - struct drm_i915_private *dev_priv = - to_i915(intel_pstate->base.plane->dev); + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); u32 latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; @@ -4805,19 +4891,17 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate, static void skl_compute_wm_levels(const struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { - struct drm_i915_private *dev_priv = - to_i915(intel_pstate->base.plane->dev); + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level *result_prev = &levels[0]; for (level = 0; level <= max_level; level++) { struct skl_wm_level *result = &levels[level]; - skl_compute_plane_wm(cstate, intel_pstate, level, wm_params, + skl_compute_plane_wm(cstate, level, wm_params, result_prev, result); result_prev = result; @@ -4914,7 +4998,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, if (ret) return ret; - skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm); + skl_compute_wm_levels(crtc_state, &wm_params, wm->wm); skl_compute_transition_wm(crtc_state, &wm_params, wm); return 0; @@ -4936,13 +5020,12 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, if (ret) return ret; - skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm); + skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm); return 0; } -static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm, - struct intel_crtc_state *crtc_state, +static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->base.plane); @@ -4968,8 +5051,7 @@ static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm, return 0; } -static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm, - struct intel_crtc_state *crtc_state, +static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id; @@ -5006,10 +5088,10 @@ static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm, return 0; } -static int skl_build_pipe_wm(struct intel_crtc_state *cstate, - struct skl_pipe_wm *pipe_wm) +static int skl_build_pipe_wm(struct intel_crtc_state *cstate) { struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; struct drm_crtc_state *crtc_state = &cstate->base; struct drm_plane *plane; const struct drm_plane_state *pstate; @@ -5026,11 +5108,9 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, to_intel_plane_state(pstate); if (INTEL_GEN(dev_priv) >= 11) - ret = icl_build_plane_wm(pipe_wm, - cstate, intel_pstate); + ret = icl_build_plane_wm(cstate, intel_pstate); else - ret = skl_build_plane_wm(pipe_wm, - cstate, intel_pstate); + ret = skl_build_plane_wm(cstate, intel_pstate); if (ret) return ret; } @@ -5056,11 +5136,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, { u32 val = 0; - if (level->plane_en) { + if (level->plane_en) val |= PLANE_WM_EN; - val |= level->plane_res_b; - val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; - } + if (level->ignore_lines) + val |= PLANE_WM_IGNORE_LINES; + val |= level->plane_res_b; + val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; I915_WRITE_FW(reg, val); } @@ -5126,6 +5207,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2) { return l1->plane_en == l2->plane_en && + l1->ignore_lines == l2->ignore_lines && l1->plane_res_l == l2->plane_res_l && l1->plane_res_b == l2->plane_res_b; } @@ -5183,23 +5265,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, return false; } -static int skl_update_pipe_wm(struct intel_crtc_state *cstate, - const struct skl_pipe_wm *old_pipe_wm, - struct skl_pipe_wm *pipe_wm, /* out */ - bool *changed /* out */) -{ - struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); - int ret; - - ret = skl_build_pipe_wm(cstate, pipe_wm); - if (ret) - return ret; - - *changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm); - - return 0; -} - static u32 pipes_modified(struct intel_atomic_state *state) { @@ -5269,6 +5334,11 @@ skl_compute_ddb(struct intel_atomic_state *state) return 0; } +static char enast(bool enable) +{ + return enable ? '*' : ' '; +} + static void skl_print_wm_changes(struct intel_atomic_state *state) { @@ -5279,8 +5349,16 @@ skl_print_wm_changes(struct intel_atomic_state *state) struct intel_crtc *crtc; int i; + if ((drm_debug & DRM_UT_KMS) == 0) + return; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm; + + old_pipe_wm = &old_crtc_state->wm.skl.optimal; + new_pipe_wm = &new_crtc_state->wm.skl.optimal; + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { enum plane_id plane_id = plane->id; const struct skl_ddb_entry *old, *new; @@ -5291,10 +5369,86 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_ddb_entry_equal(old, new)) continue; - DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", + DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", + plane->base.base.id, plane->base.name, + old->start, old->end, new->start, new->end, + skl_ddb_entry_size(old), skl_ddb_entry_size(new)); + } + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + enum plane_id plane_id = plane->id; + const struct skl_plane_wm *old_wm, *new_wm; + + old_wm = &old_pipe_wm->planes[plane_id]; + new_wm = &new_pipe_wm->planes[plane_id]; + + if (skl_plane_wm_equals(dev_priv, old_wm, new_wm)) + continue; + + DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en), + enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en), + enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en), + enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en), + enast(old_wm->trans_wm.plane_en), + enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en), + enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en), + enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en), + enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en), + enast(new_wm->trans_wm.plane_en)); + + DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d" + " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l, + + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l); + + DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", plane->base.base.id, plane->base.name, - old->start, old->end, - new->start, new->end); + old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b, + old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b, + old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b, + old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b, + old_wm->trans_wm.plane_res_b, + new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b, + new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b, + new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b, + new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b, + new_wm->trans_wm.plane_res_b); + + DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc); } } } @@ -5449,10 +5603,9 @@ static int skl_compute_wm(struct intel_atomic_state *state) { struct intel_crtc *crtc; - struct intel_crtc_state *cstate; + struct intel_crtc_state *new_crtc_state; struct intel_crtc_state *old_crtc_state; struct skl_ddb_values *results = &state->wm_results; - struct skl_pipe_wm *pipe_wm; bool changed = false; int ret, i; @@ -5470,12 +5623,8 @@ skl_compute_wm(struct intel_atomic_state *state) * pipe allocations had to change. */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - cstate, i) { - const struct skl_pipe_wm *old_pipe_wm = - &old_crtc_state->wm.skl.optimal; - - pipe_wm = &cstate->wm.skl.optimal; - ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed); + new_crtc_state, i) { + ret = skl_build_pipe_wm(new_crtc_state); if (ret) return ret; @@ -5483,7 +5632,9 @@ skl_compute_wm(struct intel_atomic_state *state) if (ret) return ret; - if (changed) + if (!skl_pipe_wm_equals(crtc, + &old_crtc_state->wm.skl.optimal, + &new_crtc_state->wm.skl.optimal)) results->dirty_pipes |= drm_crtc_mask(&crtc->base); } @@ -5609,6 +5760,7 @@ static inline void skl_wm_level_from_reg_val(u32 val, struct skl_wm_level *level) { level->plane_en = val & PLANE_WM_EN; + level->ignore_lines = val & PLANE_WM_IGNORE_LINES; level->plane_res_b = val & PLANE_WM_BLOCKS_MASK; level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) & PLANE_WM_LINES_MASK; @@ -5986,7 +6138,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) if (IS_CHERRYVIEW(dev_priv)) { mutex_lock(&dev_priv->pcu_lock); - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); if (val & DSP_MAXFIFO_PM5_ENABLE) wm->level = VLV_WM_LEVEL_PM5; @@ -6629,9 +6781,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) * punit into committing the voltage change) as that takes a lot less * power than the render powerwell. */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA); err = valleyview_set_rps(dev_priv, val); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA); if (err) DRM_ERROR("Failed to set RPS for idle\n"); @@ -6691,8 +6843,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->pcu_lock); } -void gen6_rps_boost(struct i915_request *rq, - struct intel_rps_client *rps_client) +void gen6_rps_boost(struct i915_request *rq) { struct intel_rps *rps = &rq->i915->gt_pm.rps; unsigned long flags; @@ -6721,7 +6872,7 @@ void gen6_rps_boost(struct i915_request *rq, if (READ_ONCE(rps->cur_freq) < rps->boost_freq) schedule_work(&rps->work); - atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts); + atomic_inc(&rps->boosts); } int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) @@ -6782,11 +6933,11 @@ static void valleyview_disable_rc6(struct drm_i915_private *dev_priv) { /* We're doing forcewake before Disabling RC6, * This what the BIOS expects when going into suspend */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); I915_WRITE(GEN6_RC_CONTROL, 0); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void valleyview_disable_rps(struct drm_i915_private *dev_priv) @@ -6945,7 +7096,7 @@ static void reset_rps(struct drm_i915_private *dev_priv, /* See the Gen9_GT_PM_Programming_Guide doc for the below */ static void gen9_enable_rps(struct drm_i915_private *dev_priv) { - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Program defaults and thresholds for RPS */ if (IS_GEN(dev_priv, 9)) @@ -6963,7 +7114,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv) * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ reset_rps(dev_priv, gen6_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen9_enable_rc6(struct drm_i915_private *dev_priv) @@ -6977,7 +7128,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) /* 1b: Get forcewake during program sequence. Although the driver * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 2a: Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7054,7 +7205,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN9_PG_ENABLE, GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen8_enable_rc6(struct drm_i915_private *dev_priv) @@ -7067,7 +7218,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv) /* 1b: Get forcewake during program sequence. Although the driver * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 2a: Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7088,14 +7239,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv) GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_RC6_ENABLE); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen8_enable_rps(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 1 Program defaults and thresholds for RPS*/ I915_WRITE(GEN6_RPNSWREQ, @@ -7128,7 +7279,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, gen6_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen6_enable_rc6(struct drm_i915_private *dev_priv) @@ -7148,7 +7299,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GTFIFODBG, gtfifodbg); } - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7196,7 +7347,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen6_enable_rps(struct drm_i915_private *dev_priv) @@ -7207,7 +7358,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) * Perhaps there might be some value in exposing these to * userspace... */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Power down if completely idle for over 50ms */ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); @@ -7215,7 +7366,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, gen6_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) @@ -7638,7 +7789,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv) /* 1a & 1b: Get forcewake during program sequence. Although the driver * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7670,14 +7821,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv) rc6_mode = GEN7_RC_CTL_TO_MODE; I915_WRITE(GEN6_RC_CONTROL, rc6_mode); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { u32 val; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* 1: Program defaults and thresholds for RPS*/ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); @@ -7712,7 +7863,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, valleyview_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) @@ -7730,7 +7881,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GTFIFODBG, gtfifodbg); } - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); /* Disable RC states. */ I915_WRITE(GEN6_RC_CONTROL, 0); @@ -7755,14 +7906,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { u32 val; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); @@ -7796,7 +7947,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv) reset_rps(dev_priv, valleyview_set_rps); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } static unsigned long intel_pxfreq(u32 vidfreq) @@ -8343,22 +8494,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) pm_runtime_put(&dev_priv->drm.pdev->dev); } -/** - * intel_suspend_gt_powersave - suspend PM work and helper threads - * @dev_priv: i915 device - * - * We don't want to disable RC6 or other features here, we just want - * to make sure any work we've queued has finished and won't bother - * us while we're suspended. - */ -void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) -{ - if (INTEL_GEN(dev_priv) < 6) - return; - - /* gen6_rps_idle() will be called later to disable interrupts */ -} - void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) { dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */ @@ -9552,7 +9687,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val I915_WRITE_FW(GEN6_PCODE_DATA1, 0); I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(&dev_priv->uncore, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, 500, 0, NULL)) { DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n", @@ -9600,7 +9735,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, I915_WRITE_FW(GEN6_PCODE_DATA1, 0); I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(&dev_priv->uncore, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, fast_timeout_us, slow_timeout_ms, NULL)) { @@ -9824,6 +9959,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, const i915_reg_t reg) { + struct intel_uncore *uncore = &dev_priv->uncore; u64 time_hw, prev_hw, overflow_hw; unsigned int fw_domains; unsigned long flags; @@ -9845,10 +9981,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency))) return 0; - fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); - spin_lock_irqsave(&dev_priv->uncore.lock, flags); - intel_uncore_forcewake_get__locked(dev_priv, fw_domains); + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_forcewake_get__locked(uncore, fw_domains); /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -9867,7 +10003,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, } overflow_hw = BIT_ULL(32); - time_hw = I915_READ_FW(reg); + time_hw = intel_uncore_read_fw(uncore, reg); } /* @@ -9889,8 +10025,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, time_hw += dev_priv->gt_pm.rc6.cur_residency[i]; dev_priv->gt_pm.rc6.cur_residency[i] = time_hw; - intel_uncore_forcewake_put__locked(dev_priv, fw_domains); - spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, flags); return mul_u64_u32_div(time_hw, mul, div); } diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 84a0fb981561..ec874d802d48 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -51,6 +51,7 @@ * must be correctly synchronized/cancelled when shutting down the pipe." */ +#include <drm/drm_atomic_helper.h> #include "intel_drv.h" #include "i915_drv.h" @@ -78,9 +79,6 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; - case I915_PSR_DEBUG_DEFAULT: - if (i915_modparams.enable_psr <= 0) - return false; default: return crtc_state->has_psr2; } @@ -435,32 +433,16 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp) drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); } -static void hsw_activate_psr1(struct intel_dp *intel_dp) +static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 max_sleep_time = 0x1f; - u32 val = EDP_PSR_ENABLE; + u32 val = 0; - /* Let's use 6 as the minimum to cover all known cases including the - * off-by-one issue that HW has in some cases. - */ - int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); - - /* sink_sync_latency of 8 means source has to wait for more than 8 - * frames, we'll go with 9 frames for now - */ - idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); - val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; - - val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; - if (IS_HASWELL(dev_priv)) - val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; - - if (dev_priv->psr.link_standby) - val |= EDP_PSR_LINK_STANDBY; + if (INTEL_GEN(dev_priv) >= 11) + val |= EDP_PSR_TP4_TIME_0US; if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0) - val |= EDP_PSR_TP1_TIME_0us; + val |= EDP_PSR_TP1_TIME_0us; else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100) val |= EDP_PSR_TP1_TIME_100us; else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500) @@ -469,7 +451,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) val |= EDP_PSR_TP1_TIME_2500us; if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0) - val |= EDP_PSR_TP2_TP3_TIME_0us; + val |= EDP_PSR_TP2_TP3_TIME_0us; else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR_TP2_TP3_TIME_100us; else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) @@ -483,6 +465,35 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) else val |= EDP_PSR_TP1_TP2_SEL; + return val; +} + +static void hsw_activate_psr1(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u32 max_sleep_time = 0x1f; + u32 val = EDP_PSR_ENABLE; + + /* Let's use 6 as the minimum to cover all known cases including the + * off-by-one issue that HW has in some cases. + */ + int idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + + /* sink_sync_latency of 8 means source has to wait for more than 8 + * frames, we'll go with 9 frames for now + */ + idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); + val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; + + val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; + if (IS_HASWELL(dev_priv)) + val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; + + if (dev_priv->psr.link_standby) + val |= EDP_PSR_LINK_STANDBY; + + val |= intel_psr1_get_tp_time(intel_dp); + if (INTEL_GEN(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; @@ -509,16 +520,24 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1); - if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 && - dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50) + if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 && + dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50) val |= EDP_PSR2_TP2_TIME_50us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100) + else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100) val |= EDP_PSR2_TP2_TIME_100us; - else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500) + else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500) val |= EDP_PSR2_TP2_TIME_500us; else val |= EDP_PSR2_TP2_TIME_2500us; + /* + * FIXME: There is probably a issue in DMC firmwares(icl_dmc_ver1_07.bin + * and kbl_dmc_ver1_04.bin at least) that causes PSR2 SU to fail after + * exiting DC6 if EDP_PSR_TP1_TP3_SEL is kept in PSR_CTL, so for now + * lets workaround the issue by cleaning PSR_CTL before enable PSR2. + */ + I915_WRITE(EDP_PSR_CTL, 0); + I915_WRITE(EDP_PSR2_CTL, val); } @@ -530,11 +549,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0; - /* - * FIXME psr2_support is messed up. It's both computed - * dynamically during PSR enable, and extracted from sink - * caps during eDP detection. - */ if (!dev_priv->psr.sink_psr2_support) return false; @@ -575,6 +589,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (crtc_state->crc_enabled) { + DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n"); + return false; + } + return true; } @@ -718,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, { struct intel_dp *intel_dp = dev_priv->psr.dp; - if (dev_priv->psr.enabled) - return; + WARN_ON(dev_priv->psr.enabled); + + dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); + dev_priv->psr.busy_frontbuffer_bits = 0; + dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; DRM_DEBUG_KMS("Enabling PSR%s\n", dev_priv->psr.psr2_enabled ? "2" : "1"); @@ -752,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp, WARN_ON(dev_priv->drrs.dp); mutex_lock(&dev_priv->psr.lock); - if (dev_priv->psr.prepared) { - DRM_DEBUG_KMS("PSR already in use\n"); + + if (!psr_global_enabled(dev_priv->psr.debug)) { + DRM_DEBUG_KMS("PSR disabled by flag\n"); goto unlock; } - dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); - dev_priv->psr.busy_frontbuffer_bits = 0; - dev_priv->psr.prepared = true; - dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; - - if (psr_global_enabled(dev_priv->psr.debug)) - intel_psr_enable_locked(dev_priv, crtc_state); - else - DRM_DEBUG_KMS("PSR disabled by flag\n"); + intel_psr_enable_locked(dev_priv, crtc_state); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -819,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) } /* Wait till PSR is idle */ - if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0, - 2000)) + if (intel_wait_for_register(&dev_priv->uncore, + psr_status, psr_status_mask, 0, 2000)) DRM_ERROR("Timed out waiting PSR idle state\n"); /* Disable PSR on Sink */ @@ -848,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp, return; mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.prepared) { - mutex_unlock(&dev_priv->psr.lock); - return; - } intel_psr_disable_locked(intel_dp); - dev_priv->psr.prepared = false; mutex_unlock(&dev_priv->psr.lock); cancel_work_sync(&dev_priv->psr.work); } +static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) +{ + /* + * Display WA #0884: all + * This documented WA for bxt can be safely applied + * broadly so we can force HW tracking to exit PSR + * instead of disabling and re-enabling. + * Workaround tells us to write 0 to CUR_SURFLIVE_A, + * but it makes more sense write to the current active + * pipe. + */ + I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); +} + +/** + * intel_psr_update - Update PSR state + * @intel_dp: Intel DP + * @crtc_state: new CRTC state + * + * This functions will update PSR states, disabling, enabling or switching PSR + * version when executing fastsets. For full modeset, intel_psr_disable() and + * intel_psr_enable() should be called instead. + */ +void intel_psr_update(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct i915_psr *psr = &dev_priv->psr; + bool enable, psr2_enable; + + if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp) + return; + + mutex_lock(&dev_priv->psr.lock); + + enable = crtc_state->has_psr && psr_global_enabled(psr->debug); + psr2_enable = intel_psr2_enabled(dev_priv, crtc_state); + + if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) { + /* Force a PSR exit when enabling CRC to avoid CRC timeouts */ + if (crtc_state->crc_enabled && psr->enabled) + psr_force_hw_tracking_exit(dev_priv); + + goto unlock; + } + + if (psr->enabled) + intel_psr_disable_locked(intel_dp); + + if (enable) + intel_psr_enable_locked(dev_priv, crtc_state); + +unlock: + mutex_unlock(&dev_priv->psr.lock); +} + /** * intel_psr_wait_for_idle - wait for PSR1 to idle * @new_crtc_state: new CRTC state @@ -890,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, * defensive enough to cover everything. */ - return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS, + return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS, EDP_PSR_STATUS_STATE_MASK, EDP_PSR_STATUS_STATE_IDLE, 2, 50, out_value); @@ -915,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) mutex_unlock(&dev_priv->psr.lock); - err = intel_wait_for_register(dev_priv, reg, mask, 0, 50); + err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50); if (err) DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); @@ -924,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) return err == 0 && dev_priv->psr.enabled; } -static bool switching_psr(struct drm_i915_private *dev_priv, - struct intel_crtc_state *crtc_state, - u32 mode) +static int intel_psr_fastset_force(struct drm_i915_private *dev_priv) { - /* Can't switch psr state anyway if PSR2 is not supported. */ - if (!crtc_state || !crtc_state->has_psr2) - return false; + struct drm_device *dev = &dev_priv->drm; + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_state *state; + struct drm_crtc *crtc; + int err; - if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1) - return true; + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; - if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1) - return true; + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + state->acquire_ctx = &ctx; + +retry: + drm_for_each_crtc(crtc, dev) { + struct drm_crtc_state *crtc_state; + struct intel_crtc_state *intel_crtc_state; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + err = PTR_ERR(crtc_state); + goto error; + } + + intel_crtc_state = to_intel_crtc_state(crtc_state); - return false; + if (crtc_state->active && intel_crtc_state->has_psr) { + /* Mark mode as changed to trigger a pipe->update() */ + crtc_state->mode_changed = true; + break; + } + } + + err = drm_atomic_commit(state); + +error: + if (err == -EDEADLK) { + drm_atomic_state_clear(state); + err = drm_modeset_backoff(&ctx); + if (!err) + goto retry; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + drm_atomic_state_put(state); + + return err; } -int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, - struct drm_modeset_acquire_ctx *ctx, - u64 val) +int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) { - struct drm_device *dev = &dev_priv->drm; - struct drm_connector_state *conn_state; - struct intel_crtc_state *crtc_state = NULL; - struct drm_crtc_commit *commit; - struct drm_crtc *crtc; - struct intel_dp *dp; + const u32 mode = val & I915_PSR_DEBUG_MODE_MASK; + u32 old_mode; int ret; - bool enable; - u32 mode = val & I915_PSR_DEBUG_MODE_MASK; if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) || mode > I915_PSR_DEBUG_FORCE_PSR1) { @@ -961,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, return -EINVAL; } - ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); - if (ret) - return ret; - - /* dev_priv->psr.dp should be set once and then never touched again. */ - dp = READ_ONCE(dev_priv->psr.dp); - conn_state = dp->attached_connector->base.state; - crtc = conn_state->crtc; - if (crtc) { - ret = drm_modeset_lock(&crtc->mutex, ctx); - if (ret) - return ret; - - crtc_state = to_intel_crtc_state(crtc->state); - commit = crtc_state->base.commit; - } else { - commit = conn_state->commit; - } - if (commit) { - ret = wait_for_completion_interruptible(&commit->hw_done); - if (ret) - return ret; - } - ret = mutex_lock_interruptible(&dev_priv->psr.lock); if (ret) return ret; - enable = psr_global_enabled(val); - - if (!enable || switching_psr(dev_priv, crtc_state, mode)) - intel_psr_disable_locked(dev_priv->psr.dp); - + old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; dev_priv->psr.debug = val; - if (crtc) - dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); - if (dev_priv->psr.prepared && enable) - intel_psr_enable_locked(dev_priv, crtc_state); - mutex_unlock(&dev_priv->psr.lock); + + if (old_mode != mode) + ret = intel_psr_fastset_force(dev_priv); + return ret; } @@ -1121,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits; /* By definition flush = invalidate + flush */ - if (frontbuffer_bits) { - /* - * Display WA #0884: all - * This documented WA for bxt can be safely applied - * broadly so we can force HW tracking to exit PSR - * instead of disabling and re-enabling. - * Workaround tells us to write 0 to CUR_SURFLIVE_A, - * but it makes more sense write to the current active - * pipe. - */ - I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0); - } + if (frontbuffer_bits) + psr_force_hw_tracking_exit(dev_priv); if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits) schedule_work(&dev_priv->psr.work); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 7f841dba87b3..48ba4d61a4ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -43,12 +43,6 @@ */ #define LEGACY_REQUEST_SIZE 200 -static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine) -{ - return (i915_ggtt_offset(engine->status_page.vma) + - I915_GEM_HWS_INDEX_ADDR); -} - unsigned int intel_ring_update_space(struct intel_ring *ring) { unsigned int space; @@ -317,9 +311,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = rq->fence.seqno; *cs++ = GFX_OP_PIPE_CONTROL(4); - *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; - *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT; - *cs++ = rq->global_seqno; + *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); *cs++ = MI_USER_INTERRUPT; *cs++ = MI_NOOP; @@ -424,10 +418,10 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = GFX_OP_PIPE_CONTROL(4); *cs++ = (PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_GLOBAL_GTT_IVB | - PIPE_CONTROL_CS_STALL); - *cs++ = intel_hws_seqno_address(rq->engine); - *cs++ = rq->global_seqno; + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_GLOBAL_GTT_IVB); + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); *cs++ = MI_USER_INTERRUPT; *cs++ = MI_NOOP; @@ -448,8 +442,8 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = rq->fence.seqno; *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; - *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT; - *cs++ = rq->global_seqno; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); *cs++ = MI_USER_INTERRUPT; *cs++ = MI_NOOP; @@ -473,8 +467,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = rq->fence.seqno; *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; - *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT; - *cs++ = rq->global_seqno; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); for (i = 0; i < GEN7_XCS_WA; i++) { *cs++ = MI_STORE_DWORD_INDEX; @@ -554,16 +548,17 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) */ default: GEM_BUG_ON(engine->id); - case RCS: + /* fallthrough */ + case RCS0: hwsp = RENDER_HWS_PGA_GEN7; break; - case BCS: + case BCS0: hwsp = BLT_HWS_PGA_GEN7; break; - case VCS: + case VCS0: hwsp = BSD_HWS_PGA_GEN7; break; - case VECS: + case VECS0: hwsp = VEBOX_HWS_PGA_GEN7; break; } @@ -580,19 +575,19 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset) static void flush_cs_tlb(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - i915_reg_t instpm = RING_INSTPM(engine->mmio_base); if (!IS_GEN_RANGE(dev_priv, 6, 7)) return; /* ring should be idle before issuing a sync flush*/ - WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); - - I915_WRITE(instpm, - _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | - INSTPM_SYNC_FLUSH)); - if (intel_wait_for_register(dev_priv, - instpm, INSTPM_SYNC_FLUSH, 0, + WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); + + ENGINE_WRITE(engine, RING_INSTPM, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (intel_wait_for_register(engine->uncore, + RING_INSTPM(engine->mmio_base), + INSTPM_SYNC_FLUSH, 0, 1000)) DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", engine->name); @@ -611,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; if (INTEL_GEN(dev_priv) > 2) { - I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); - if (intel_wait_for_register(dev_priv, + ENGINE_WRITE(engine, + RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); + if (intel_wait_for_register(engine->uncore, RING_MI_MODE(engine->mmio_base), MODE_IDLE, MODE_IDLE, 1000)) { DRM_ERROR("%s : timed out trying to stop ring\n", engine->name); - /* Sometimes we observe that the idle flag is not + + /* + * Sometimes we observe that the idle flag is not * set even though the ring is empty. So double * check before giving up. */ - if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) + if (ENGINE_READ(engine, RING_HEAD) != + ENGINE_READ(engine, RING_TAIL)) return false; } } - I915_WRITE_HEAD(engine, I915_READ_TAIL(engine)); + ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); - I915_WRITE_HEAD(engine, 0); - I915_WRITE_TAIL(engine, 0); + ENGINE_WRITE(engine, RING_HEAD, 0); + ENGINE_WRITE(engine, RING_TAIL, 0); /* The ring must be empty before it is disabled */ - I915_WRITE_CTL(engine, 0); + ENGINE_WRITE(engine, RING_CTL, 0); - return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; + return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0; } static int init_ring_common(struct intel_engine_cs *engine) @@ -645,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine) struct intel_ring *ring = engine->buffer; int ret = 0; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); if (!stop_ring(engine)) { /* G45 ring initialization often fails to reset head to zero */ DRM_DEBUG_DRIVER("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", engine->name, - I915_READ_CTL(engine), - I915_READ_HEAD(engine), - I915_READ_TAIL(engine), - I915_READ_START(engine)); + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_HEAD), + ENGINE_READ(engine, RING_TAIL), + ENGINE_READ(engine, RING_START)); if (!stop_ring(engine)) { DRM_ERROR("failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", engine->name, - I915_READ_CTL(engine), - I915_READ_HEAD(engine), - I915_READ_TAIL(engine), - I915_READ_START(engine)); + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_HEAD), + ENGINE_READ(engine, RING_TAIL), + ENGINE_READ(engine, RING_START)); ret = -EIO; goto out; } @@ -678,18 +677,18 @@ static int init_ring_common(struct intel_engine_cs *engine) intel_engine_reset_breadcrumbs(engine); /* Enforce ordering by reading HEAD register back */ - I915_READ_HEAD(engine); + ENGINE_READ(engine, RING_HEAD); /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(engine, i915_ggtt_offset(ring->vma)); + ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma)); /* WaClearRingBufHeadRegAtInit:ctg,elk */ - if (I915_READ_HEAD(engine)) + if (ENGINE_READ(engine, RING_HEAD)) DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n", - engine->name, I915_READ_HEAD(engine)); + engine->name, ENGINE_READ(engine, RING_HEAD)); /* Check that the ring offsets point within the ring! */ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); @@ -697,42 +696,44 @@ static int init_ring_common(struct intel_engine_cs *engine) intel_ring_update_space(ring); /* First wake the ring up to an empty/idle ring */ - I915_WRITE_HEAD(engine, ring->head); - I915_WRITE_TAIL(engine, ring->head); - (void)I915_READ_TAIL(engine); + ENGINE_WRITE(engine, RING_HEAD, ring->head); + ENGINE_WRITE(engine, RING_TAIL, ring->head); + ENGINE_POSTING_READ(engine, RING_TAIL); - I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID); + ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID); /* If the head is still not zero, the ring is dead */ - if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base), + if (intel_wait_for_register(engine->uncore, + RING_CTL(engine->mmio_base), RING_VALID, RING_VALID, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", engine->name, - I915_READ_CTL(engine), - I915_READ_CTL(engine) & RING_VALID, - I915_READ_HEAD(engine), ring->head, - I915_READ_TAIL(engine), ring->tail, - I915_READ_START(engine), + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_CTL) & RING_VALID, + ENGINE_READ(engine, RING_HEAD), ring->head, + ENGINE_READ(engine, RING_TAIL), ring->tail, + ENGINE_READ(engine, RING_START), i915_ggtt_offset(ring->vma)); ret = -EIO; goto out; } if (INTEL_GEN(dev_priv) > 2) - I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_WRITE(engine, + RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); /* Now awake, let it get started */ if (ring->tail != ring->head) { - I915_WRITE_TAIL(engine, ring->tail); - (void)I915_READ_TAIL(engine); + ENGINE_WRITE(engine, RING_TAIL, ring->tail); + ENGINE_POSTING_READ(engine, RING_TAIL); } /* Papering over lost _interrupts_ immediately following the restart */ intel_engine_queue_breadcrumbs(engine); out: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); return ret; } @@ -758,11 +759,6 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled) } } - GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n", - engine->name, - rq ? rq->global_seqno : 0, - intel_engine_get_seqno(engine), - yesno(stalled)); /* * The guilty request will get skipped on a hung engine. * @@ -878,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); if (INTEL_GEN(dev_priv) >= 6) - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); return 0; } @@ -892,18 +888,12 @@ static void cancel_requests(struct intel_engine_cs *engine) /* Mark all submitted requests as skipped. */ list_for_each_entry(request, &engine->timeline.requests, link) { - GEM_BUG_ON(!request->global_seqno); - if (!i915_request_signaled(request)) dma_fence_set_error(&request->fence, -EIO); i915_request_mark_complete(request); } - intel_write_status_page(engine, - I915_GEM_HWS_INDEX, - intel_engine_last_submit(engine)); - /* Remaining _unready_ requests will be nop'ed when submitted */ spin_unlock_irqrestore(&engine->timeline.lock, flags); @@ -911,12 +901,10 @@ static void cancel_requests(struct intel_engine_cs *engine) static void i9xx_submit_request(struct i915_request *request) { - struct drm_i915_private *dev_priv = request->i915; - i915_request_submit(request); - I915_WRITE_TAIL(request->engine, - intel_ring_set_tail(request->ring, request->tail)); + ENGINE_WRITE(request->engine, RING_TAIL, + intel_ring_set_tail(request->ring, request->tail)); } static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) @@ -931,8 +919,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = rq->fence.seqno; *cs++ = MI_STORE_DWORD_INDEX; - *cs++ = I915_GEM_HWS_INDEX_ADDR; - *cs++ = rq->global_seqno; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); *cs++ = MI_USER_INTERRUPT; @@ -953,14 +941,14 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) *cs++ = MI_FLUSH; *cs++ = MI_STORE_DWORD_INDEX; - *cs++ = I915_GEM_HWS_SEQNO_ADDR; - *cs++ = rq->fence.seqno; + *cs++ = I915_GEM_HWS_HANGCHECK_ADDR; + *cs++ = intel_engine_next_hangcheck_seqno(rq->engine); BUILD_BUG_ON(GEN5_WA_STORES < 1); for (i = 0; i < GEN5_WA_STORES; i++) { *cs++ = MI_STORE_DWORD_INDEX; - *cs++ = I915_GEM_HWS_INDEX_ADDR; - *cs++ = rq->global_seqno; + *cs++ = I915_GEM_HWS_SEQNO_ADDR; + *cs++ = rq->fence.seqno; } *cs++ = MI_USER_INTERRUPT; @@ -988,20 +976,20 @@ gen5_irq_disable(struct intel_engine_cs *engine) static void i9xx_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + GEM_BUG_ON(engine->id != RCS0); - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ_FW(RING_IMR(engine->mmio_base)); + engine->i915->irq_mask &= ~engine->irq_enable_mask; + ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask); + ENGINE_POSTING_READ(engine, RING_IMR); } static void i9xx_irq_disable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; + GEM_BUG_ON(engine->id != RCS0); - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); + engine->i915->irq_mask |= engine->irq_enable_mask; + ENGINE_WRITE(engine, RING_IMR, engine->i915->irq_mask); } static void @@ -1041,47 +1029,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode) static void gen6_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - engine->irq_keep_mask)); + ENGINE_WRITE(engine, RING_IMR, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ - POSTING_READ_FW(RING_IMR(engine->mmio_base)); + ENGINE_POSTING_READ(engine, RING_IMR); - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); + gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); } static void gen6_irq_disable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); + gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); } static void hsw_vebox_irq_enable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask); /* Flush/delay to ensure the RING_IMR is active before the GT IMR */ - POSTING_READ_FW(RING_IMR(engine->mmio_base)); + ENGINE_POSTING_READ(engine, RING_IMR); - gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask); + gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask); } static void hsw_vebox_irq_disable(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - I915_WRITE_IMR(engine, ~0); - gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask); + ENGINE_WRITE(engine, RING_IMR, ~0); + gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask); } static int @@ -1211,15 +1190,6 @@ int intel_ring_pin(struct intel_ring *ring) else flags |= PIN_HIGH; - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - if (flags & PIN_MAPPABLE || map == I915_MAP_WC) - ret = i915_gem_object_set_to_gtt_domain(vma->obj, true); - else - ret = i915_gem_object_set_to_cpu_domain(vma->obj, true); - if (unlikely(ret)) - goto unpin_timeline; - } - ret = i915_vma_pin(vma, 0, 0, flags); if (unlikely(ret)) goto unpin_timeline; @@ -1323,6 +1293,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, if (!ring) return ERR_PTR(-ENOMEM); + kref_init(&ring->ref); INIT_LIST_HEAD(&ring->request_list); ring->timeline = i915_timeline_get(timeline); @@ -1347,9 +1318,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine, return ring; } -void -intel_ring_free(struct intel_ring *ring) +void intel_ring_free(struct kref *ref) { + struct intel_ring *ring = container_of(ref, typeof(*ring), ref); struct drm_i915_gem_object *obj = ring->vma->obj; i915_vma_close(ring->vma); @@ -1359,17 +1330,24 @@ intel_ring_free(struct intel_ring *ring) kfree(ring); } -static void intel_ring_context_destroy(struct intel_context *ce) +static void __ring_context_fini(struct intel_context *ce) { - GEM_BUG_ON(ce->pin_count); - - if (!ce->state) - return; - GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj)); i915_gem_object_put(ce->state->obj); } +static void ring_context_destroy(struct kref *ref) +{ + struct intel_context *ce = container_of(ref, typeof(*ce), ref); + + GEM_BUG_ON(intel_context_is_pinned(ce)); + + if (ce->state) + __ring_context_fini(ce); + + intel_context_free(ce); +} + static int __context_pin_ppgtt(struct i915_gem_context *ctx) { struct i915_hw_ppgtt *ppgtt; @@ -1400,17 +1378,6 @@ static int __context_pin(struct intel_context *ce) if (!vma) return 0; - /* - * Clear this page out of any CPU caches for coherent swap-in/out. - * We only want to do this on the first bind so that we do not stall - * on an active context (which by nature is already on the GPU). - */ - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { - err = i915_gem_object_set_to_gtt_domain(vma->obj, true); - if (err) - return err; - } - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) return err; @@ -1420,6 +1387,7 @@ static int __context_pin(struct intel_context *ce) * it cannot reclaim the object until we release it. */ vma->obj->pin_global++; + vma->obj->mm.dirty = true; return 0; } @@ -1436,12 +1404,10 @@ static void __context_unpin(struct intel_context *ce) i915_vma_unpin(vma); } -static void intel_ring_context_unpin(struct intel_context *ce) +static void ring_context_unpin(struct intel_context *ce) { __context_unpin_ppgtt(ce->gem_context); __context_unpin(ce); - - i915_gem_context_put(ce->gem_context); } static struct i915_vma * @@ -1456,6 +1422,24 @@ alloc_context_vma(struct intel_engine_cs *engine) if (IS_ERR(obj)) return ERR_CAST(obj); + /* + * Try to make the context utilize L3 as well as LLC. + * + * On VLV we don't have L3 controls in the PTEs so we + * shouldn't touch the cache level, especially as that + * would make the object snooped which might have a + * negative performance impact. + * + * Snooping is required on non-llc platforms in execlist + * mode, but since all GGTT accesses use PAT entry 0 we + * get snooping anyway regardless of cache_level. + * + * This is only applicable for Ivy Bridge devices since + * later platforms don't have L3 control bits in the PTE. + */ + if (IS_IVYBRIDGE(i915)) + i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); + if (engine->default_state) { void *defaults, *vaddr; @@ -1473,29 +1457,10 @@ alloc_context_vma(struct intel_engine_cs *engine) } memcpy(vaddr, defaults, engine->context_size); - i915_gem_object_unpin_map(engine->default_state); - i915_gem_object_unpin_map(obj); - } - /* - * Try to make the context utilize L3 as well as LLC. - * - * On VLV we don't have L3 controls in the PTEs so we - * shouldn't touch the cache level, especially as that - * would make the object snooped which might have a - * negative performance impact. - * - * Snooping is required on non-llc platforms in execlist - * mode, but since all GGTT accesses use PAT entry 0 we - * get snooping anyway regardless of cache_level. - * - * This is only applicable for Ivy Bridge devices since - * later platforms don't have L3 control bits in the PTE. - */ - if (IS_IVYBRIDGE(i915)) { - /* Ignore any error, regard it as a simple optimisation */ - i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); } vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); @@ -1513,70 +1478,46 @@ err_obj: return ERR_PTR(err); } -static struct intel_context * -__ring_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx, - struct intel_context *ce) +static int ring_context_pin(struct intel_context *ce) { + struct intel_engine_cs *engine = ce->engine; int err; + /* One ringbuffer to rule them all */ + GEM_BUG_ON(!engine->buffer); + ce->ring = engine->buffer; + if (!ce->state && engine->context_size) { struct i915_vma *vma; vma = alloc_context_vma(engine); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err; - } + if (IS_ERR(vma)) + return PTR_ERR(vma); ce->state = vma; } err = __context_pin(ce); if (err) - goto err; + return err; err = __context_pin_ppgtt(ce->gem_context); if (err) goto err_unpin; - i915_gem_context_get(ctx); - - /* One ringbuffer to rule them all */ - GEM_BUG_ON(!engine->buffer); - ce->ring = engine->buffer; - - return ce; + return 0; err_unpin: __context_unpin(ce); -err: - ce->pin_count = 0; - return ERR_PTR(err); + return err; } static const struct intel_context_ops ring_context_ops = { - .unpin = intel_ring_context_unpin, - .destroy = intel_ring_context_destroy, + .pin = ring_context_pin, + .unpin = ring_context_unpin, + .destroy = ring_context_destroy, }; -static struct intel_context * -intel_ring_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) -{ - struct intel_context *ce = to_intel_context(ctx, engine); - - lockdep_assert_held(&ctx->i915->drm.struct_mutex); - - if (likely(ce->pin_count++)) - return ce; - GEM_BUG_ON(!ce->pin_count); /* no overflow please! */ - - ce->ops = &ring_context_ops; - - return __ring_context_pin(engine, ctx, ce); -} - static int intel_init_ring_buffer(struct intel_engine_cs *engine) { struct i915_timeline *timeline; @@ -1587,9 +1528,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) if (err) return err; - timeline = i915_timeline_create(engine->i915, - engine->name, - engine->status_page.vma); + timeline = i915_timeline_create(engine->i915, engine->status_page.vma); if (IS_ERR(timeline)) { err = PTR_ERR(timeline); goto err; @@ -1621,7 +1560,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) err_unpin: intel_ring_unpin(ring); err_ring: - intel_ring_free(ring); + intel_ring_put(ring); err: intel_engine_cleanup_common(engine); return err; @@ -1632,10 +1571,10 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; WARN_ON(INTEL_GEN(dev_priv) > 2 && - (I915_READ_MODE(engine) & MODE_IDLE) == 0); + (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); intel_ring_unpin(engine->buffer); - intel_ring_free(engine->buffer); + intel_ring_put(engine->buffer); if (engine->cleanup) engine->cleanup(engine); @@ -1667,11 +1606,11 @@ static int load_pd_dir(struct i915_request *rq, return PTR_ERR(cs); *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); *cs++ = PP_DIR_DCLV_2G; *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); *cs++ = ppgtt->pd.base.ggtt_offset << 10; intel_ring_advance(rq, cs); @@ -1690,7 +1629,7 @@ static int flush_pd_dir(struct i915_request *rq) /* Stall until the page table load is complete */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); + *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); *cs++ = i915_scratch_offset(rq->i915); *cs++ = MI_NOOP; @@ -1703,8 +1642,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) struct drm_i915_private *i915 = rq->i915; struct intel_engine_cs *engine = rq->engine; enum intel_engine_id id; - const int num_rings = - IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0; + const int num_engines = + IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; bool force_restore = false; int len; u32 *cs; @@ -1718,7 +1657,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) len = 4; if (IS_GEN(i915, 7)) - len += 2 + (num_rings ? 4*num_rings + 6 : 0); + len += 2 + (num_engines ? 4 * num_engines + 6 : 0); if (flags & MI_FORCE_RESTORE) { GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); flags &= ~MI_FORCE_RESTORE; @@ -1733,10 +1672,10 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ if (IS_GEN(i915, 7)) { *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; - if (num_rings) { + if (num_engines) { struct intel_engine_cs *signaller; - *cs++ = MI_LOAD_REGISTER_IMM(num_rings); + *cs++ = MI_LOAD_REGISTER_IMM(num_engines); for_each_engine(signaller, i915, id) { if (signaller == engine) continue; @@ -1763,8 +1702,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) * placeholder we use to flush other contexts. */ *cs++ = MI_SET_CONTEXT; - *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context, - engine)->state) | + *cs++ = i915_ggtt_offset(engine->kernel_context->state) | MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT; } @@ -1779,11 +1717,11 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) *cs++ = MI_NOOP; if (IS_GEN(i915, 7)) { - if (num_rings) { + if (num_engines) { struct intel_engine_cs *signaller; i915_reg_t last_reg = {}; /* keep gcc quiet */ - *cs++ = MI_LOAD_REGISTER_IMM(num_rings); + *cs++ = MI_LOAD_REGISTER_IMM(num_engines); for_each_engine(signaller, i915, id) { if (signaller == engine) continue; @@ -1861,7 +1799,7 @@ static int switch_context(struct i915_request *rq) * explanation. */ loops = 1; - if (engine->id == BCS && IS_VALLEYVIEW(engine->i915)) + if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915)) loops = 32; do { @@ -1870,15 +1808,15 @@ static int switch_context(struct i915_request *rq) goto err; } while (--loops); - if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) { - unwind_mm = intel_engine_flag(engine); - ppgtt->pd_dirty_rings &= ~unwind_mm; + if (ppgtt->pd_dirty_engines & engine->mask) { + unwind_mm = engine->mask; + ppgtt->pd_dirty_engines &= ~unwind_mm; hw_flags = MI_FORCE_RESTORE; } } if (rq->hw_context->state) { - GEM_BUG_ON(engine->id != RCS); + GEM_BUG_ON(engine->id != RCS0); /* * The kernel context(s) is treated as pure scratch and is not @@ -1938,7 +1876,7 @@ static int switch_context(struct i915_request *rq) err_mm: if (unwind_mm) - ppgtt->pd_dirty_rings |= unwind_mm; + ppgtt->pd_dirty_engines |= unwind_mm; err: return ret; } @@ -1947,7 +1885,7 @@ static int ring_request_alloc(struct i915_request *request) { int ret; - GEM_BUG_ON(!request->hw_context->pin_count); + GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); GEM_BUG_ON(request->timeline->has_initial_breadcrumb); /* @@ -2108,23 +2046,23 @@ int intel_ring_cacheline_align(struct i915_request *rq) static void gen6_bsd_submit_request(struct i915_request *request) { - struct drm_i915_private *dev_priv = request->i915; + struct intel_uncore *uncore = request->engine->uncore; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); /* Every tail move must follow the sequence below */ /* Disable notification that the ring is IDLE. The GT * will then assume that it is busy and bring it out of rc6. */ - I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); /* Clear the context id. Here be magic! */ - I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0); + intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); /* Wait for the ring not to be idle, i.e. for it to wake up. */ - if (__intel_wait_for_register_fw(dev_priv, + if (__intel_wait_for_register_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, GEN6_BSD_SLEEP_INDICATOR, 0, @@ -2137,10 +2075,10 @@ static void gen6_bsd_submit_request(struct i915_request *request) /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. */ - I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, + _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); } static int mi_flush_dw(struct i915_request *rq, u32 flags) @@ -2282,7 +2220,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->reset.reset = reset_ring; engine->reset.finish = reset_finish; - engine->context_pin = intel_ring_context_pin; + engine->cops = &ring_context_ops; engine->request_alloc = ring_request_alloc; /* diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 710ffb221775..e58d6f04177b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -6,22 +6,20 @@ #include <linux/hashtable.h> #include <linux/irq_work.h> +#include <linux/random.h> #include <linux/seqlock.h> #include "i915_gem_batch_pool.h" - -#include "i915_reg.h" #include "i915_pmu.h" +#include "i915_reg.h" #include "i915_request.h" #include "i915_selftest.h" #include "i915_timeline.h" +#include "intel_engine_types.h" #include "intel_gpu_commands.h" #include "intel_workarounds.h" struct drm_printer; -struct i915_sched_attr; - -#define I915_CMD_HASH_ORDER 9 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, * but keeps the logic simple. Indeed, the whole purpose of this macro is just @@ -31,28 +29,44 @@ struct i915_sched_attr; #define CACHELINE_BYTES 64 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) -struct intel_hw_status_page { - struct i915_vma *vma; - u32 *addr; -}; +/* + * The register defines to be used with the following macros need to accept a + * base param, e.g: + * + * REG_FOO(base) _MMIO((base) + <relative offset>) + * ENGINE_READ(engine, REG_FOO); + * + * register arrays are to be defined and accessed as follows: + * + * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>) + * ENGINE_READ_IDX(engine, REG_BAR, i) + */ + +#define __ENGINE_REG_OP(op__, engine__, ...) \ + intel_uncore_##op__((engine__)->uncore, __VA_ARGS__) -#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base)) -#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val) +#define __ENGINE_READ_OP(op__, engine__, reg__) \ + __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base)) -#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base)) -#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val) +#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__) +#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__) +#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__) +#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__) -#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base)) -#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val) +#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \ + __ENGINE_REG_OP(read64_2x32, (engine__), \ + lower_reg__((engine__)->mmio_base), \ + upper_reg__((engine__)->mmio_base)) -#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base)) -#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val) +#define ENGINE_READ_IDX(engine__, reg__, idx__) \ + __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__))) -#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base)) -#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val) +#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \ + __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__)) -#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base)) -#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val) +#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__) +#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__) +#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__) /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. @@ -90,506 +104,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) return "unknown"; } -#define I915_MAX_SLICES 3 -#define I915_MAX_SUBSLICES 8 - -#define instdone_slice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) - -#define instdone_subslice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) - -#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ - for ((slice__) = 0, (subslice__) = 0; \ - (slice__) < I915_MAX_SLICES; \ - (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ - (slice__) += ((subslice__) == 0)) \ - for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ - (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) - -struct intel_instdone { - u32 instdone; - /* The following exist only in the RCS engine */ - u32 slice_common; - u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES]; - u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; -}; - -struct intel_engine_hangcheck { - u64 acthd; - u32 seqno; - unsigned long action_timestamp; - struct intel_instdone instdone; -}; - -struct intel_ring { - struct i915_vma *vma; - void *vaddr; - - struct i915_timeline *timeline; - struct list_head request_list; - struct list_head active_link; - - u32 head; - u32 tail; - u32 emit; - - u32 space; - u32 size; - u32 effective_size; -}; - -struct i915_gem_context; -struct drm_i915_reg_table; - -/* - * we use a single page to load ctx workarounds so all of these - * values are referred in terms of dwords - * - * struct i915_wa_ctx_bb: - * offset: specifies batch starting position, also helpful in case - * if we want to have multiple batches at different offsets based on - * some criteria. It is not a requirement at the moment but provides - * an option for future use. - * size: size of the batch in DWORDS - */ -struct i915_ctx_workarounds { - struct i915_wa_ctx_bb { - u32 offset; - u32 size; - } indirect_ctx, per_ctx; - struct i915_vma *vma; -}; - -struct i915_request; - -#define I915_MAX_VCS 4 -#define I915_MAX_VECS 2 - -/* - * Engine IDs definitions. - * Keep instances of the same type engine together. - */ -enum intel_engine_id { - RCS = 0, - BCS, - VCS, - VCS2, - VCS3, - VCS4, -#define _VCS(n) (VCS + (n)) - VECS, - VECS2 -#define _VECS(n) (VECS + (n)) -}; - -struct i915_priolist { - struct list_head requests[I915_PRIORITY_COUNT]; - struct rb_node node; - unsigned long used; - int priority; -}; - -#define priolist_for_each_request(it, plist, idx) \ - for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ - list_for_each_entry(it, &(plist)->requests[idx], sched.link) - -#define priolist_for_each_request_consume(it, n, plist, idx) \ - for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \ - list_for_each_entry_safe(it, n, \ - &(plist)->requests[idx - 1], \ - sched.link) - -struct st_preempt_hang { - struct completion completion; - unsigned int count; - bool inject_hang; -}; - -/** - * struct intel_engine_execlists - execlist submission queue and port state - * - * The struct intel_engine_execlists represents the combined logical state of - * driver and the hardware state for execlist mode of submission. - */ -struct intel_engine_execlists { - /** - * @tasklet: softirq tasklet for bottom handler - */ - struct tasklet_struct tasklet; - - /** - * @default_priolist: priority list for I915_PRIORITY_NORMAL - */ - struct i915_priolist default_priolist; - - /** - * @no_priolist: priority lists disabled - */ - bool no_priolist; - - /** - * @submit_reg: gen-specific execlist submission register - * set to the ExecList Submission Port (elsp) register pre-Gen11 and to - * the ExecList Submission Queue Contents register array for Gen11+ - */ - u32 __iomem *submit_reg; - - /** - * @ctrl_reg: the enhanced execlists control register, used to load the - * submit queue on the HW and to request preemptions to idle - */ - u32 __iomem *ctrl_reg; - - /** - * @port: execlist port states - * - * For each hardware ELSP (ExecList Submission Port) we keep - * track of the last request and the number of times we submitted - * that port to hw. We then count the number of times the hw reports - * a context completion or preemption. As only one context can - * be active on hw, we limit resubmission of context to port[0]. This - * is called Lite Restore, of the context. - */ - struct execlist_port { - /** - * @request_count: combined request and submission count - */ - struct i915_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, execlists) ((p) - (execlists)->port) - - /** - * @context_id: context ID for port - */ - GEM_DEBUG_DECL(u32 context_id); - -#define EXECLIST_MAX_PORTS 2 - } port[EXECLIST_MAX_PORTS]; - - /** - * @active: is the HW active? We consider the HW as active after - * submitting any context for execution and until we have seen the - * last context completion event. After that, we do not expect any - * more events until we submit, and so can park the HW. - * - * As we have a small number of different sources from which we feed - * the HW, we track the state of each inside a single bitfield. - */ - unsigned int active; -#define EXECLISTS_ACTIVE_USER 0 -#define EXECLISTS_ACTIVE_PREEMPT 1 -#define EXECLISTS_ACTIVE_HWACK 2 - - /** - * @port_mask: number of execlist ports - 1 - */ - unsigned int port_mask; - - /** - * @queue_priority_hint: Highest pending priority. - * - * When we add requests into the queue, or adjust the priority of - * executing requests, we compute the maximum priority of those - * pending requests. We can then use this value to determine if - * we need to preempt the executing requests to service the queue. - * However, since the we may have recorded the priority of an inflight - * request we wanted to preempt but since completed, at the time of - * dequeuing the priority hint may no longer may match the highest - * available request priority. - */ - int queue_priority_hint; - - /** - * @queue: queue of requests, in priority lists - */ - struct rb_root_cached queue; - - /** - * @csb_write: control register for Context Switch buffer - * - * Note this register may be either mmio or HWSP shadow. - */ - u32 *csb_write; - - /** - * @csb_status: status array for Context Switch buffer - * - * Note these register may be either mmio or HWSP shadow. - */ - u32 *csb_status; - - /** - * @preempt_complete_status: expected CSB upon completing preemption - */ - u32 preempt_complete_status; - - /** - * @csb_head: context status buffer head - */ - u8 csb_head; - - I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;) -}; - -#define INTEL_ENGINE_CS_MAX_NAME 8 - -struct intel_engine_cs { - struct drm_i915_private *i915; - char name[INTEL_ENGINE_CS_MAX_NAME]; - - enum intel_engine_id id; - unsigned int hw_id; - unsigned int guc_id; - - u8 uabi_id; - u8 uabi_class; - - u8 class; - u8 instance; - u32 context_size; - u32 mmio_base; - - struct intel_ring *buffer; - - struct i915_timeline timeline; - - struct drm_i915_gem_object *default_state; - void *pinned_default_state; - - /* Rather than have every client wait upon all user interrupts, - * with the herd waking after every interrupt and each doing the - * heavyweight seqno dance, we delegate the task (of being the - * bottom-half of the user interrupt) to the first client. After - * every interrupt, we wake up one client, who does the heavyweight - * coherent seqno read and either goes back to sleep (if incomplete), - * or wakes up all the completed clients in parallel, before then - * transferring the bottom-half status to the next client in the queue. - * - * Compared to walking the entire list of waiters in a single dedicated - * bottom-half, we reduce the latency of the first waiter by avoiding - * a context switch, but incur additional coherent seqno reads when - * following the chain of request breadcrumbs. Since it is most likely - * that we have a single client waiting on each seqno, then reducing - * the overhead of waking that client is much preferred. - */ - struct intel_breadcrumbs { - spinlock_t irq_lock; - struct list_head signalers; - - struct irq_work irq_work; /* for use from inside irq_lock */ - - unsigned int irq_enabled; - - bool irq_armed; - } breadcrumbs; - - struct { - /** - * @enable: Bitmask of enable sample events on this engine. - * - * Bits correspond to sample event types, for instance - * I915_SAMPLE_QUEUED is bit 0 etc. - */ - u32 enable; - /** - * @enable_count: Reference count for the enabled samplers. - * - * Index number corresponds to @enum drm_i915_pmu_engine_sample. - */ - unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; - /** - * @sample: Counter values for sampling events. - * - * Our internal timer stores the current counters in this field. - * - * Index number corresponds to @enum drm_i915_pmu_engine_sample. - */ - struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; - } pmu; - - /* - * A pool of objects to use as shadow copies of client batch buffers - * when the command parser is enabled. Prevents the client from - * modifying the batch contents after software parsing. - */ - struct i915_gem_batch_pool batch_pool; - - struct intel_hw_status_page status_page; - struct i915_ctx_workarounds wa_ctx; - struct i915_wa_list ctx_wa_list; - struct i915_wa_list wa_list; - struct i915_wa_list whitelist; - - u32 irq_keep_mask; /* always keep these interrupts */ - u32 irq_enable_mask; /* bitmask to enable ring interrupt */ - void (*irq_enable)(struct intel_engine_cs *engine); - void (*irq_disable)(struct intel_engine_cs *engine); - - int (*init_hw)(struct intel_engine_cs *engine); - - struct { - void (*prepare)(struct intel_engine_cs *engine); - void (*reset)(struct intel_engine_cs *engine, bool stalled); - void (*finish)(struct intel_engine_cs *engine); - } reset; - - void (*park)(struct intel_engine_cs *engine); - void (*unpark)(struct intel_engine_cs *engine); - - void (*set_default_submission)(struct intel_engine_cs *engine); - - struct intel_context *(*context_pin)(struct intel_engine_cs *engine, - struct i915_gem_context *ctx); - - int (*request_alloc)(struct i915_request *rq); - int (*init_context)(struct i915_request *rq); - - int (*emit_flush)(struct i915_request *request, u32 mode); -#define EMIT_INVALIDATE BIT(0) -#define EMIT_FLUSH BIT(1) -#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH) - int (*emit_bb_start)(struct i915_request *rq, - u64 offset, u32 length, - unsigned int dispatch_flags); -#define I915_DISPATCH_SECURE BIT(0) -#define I915_DISPATCH_PINNED BIT(1) - int (*emit_init_breadcrumb)(struct i915_request *rq); - u32 *(*emit_fini_breadcrumb)(struct i915_request *rq, - u32 *cs); - unsigned int emit_fini_breadcrumb_dw; - - /* Pass the request to the hardware queue (e.g. directly into - * the legacy ringbuffer or to the end of an execlist). - * - * This is called from an atomic context with irqs disabled; must - * be irq safe. - */ - void (*submit_request)(struct i915_request *rq); - - /* - * Call when the priority on a request has changed and it and its - * dependencies may need rescheduling. Note the request itself may - * not be ready to run! - */ - void (*schedule)(struct i915_request *request, - const struct i915_sched_attr *attr); - - /* - * Cancel all requests on the hardware, or queued for execution. - * This should only cancel the ready requests that have been - * submitted to the engine (via the engine->submit_request callback). - * This is called when marking the device as wedged. - */ - void (*cancel_requests)(struct intel_engine_cs *engine); - - void (*cleanup)(struct intel_engine_cs *engine); - - struct intel_engine_execlists execlists; - - /* Contexts are pinned whilst they are active on the GPU. The last - * context executed remains active whilst the GPU is idle - the - * switch away and write to the context object only occurs on the - * next execution. Contexts are only unpinned on retirement of the - * following request ensuring that we can always write to the object - * on the context switch even after idling. Across suspend, we switch - * to the kernel context and trash it as the save may not happen - * before the hardware is powered down. - */ - struct intel_context *last_retired_context; - - /* status_notifier: list of callbacks for context-switch changes */ - struct atomic_notifier_head context_status_notifier; - - struct intel_engine_hangcheck hangcheck; - -#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0) -#define I915_ENGINE_SUPPORTS_STATS BIT(1) -#define I915_ENGINE_HAS_PREEMPTION BIT(2) - unsigned int flags; - - /* - * Table of commands the command parser needs to know about - * for this engine. - */ - DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER); - - /* - * Table of registers allowed in commands that read/write registers. - */ - const struct drm_i915_reg_table *reg_tables; - int reg_table_count; - - /* - * Returns the bitmask for the length field of the specified command. - * Return 0 for an unrecognized/invalid command. - * - * If the command parser finds an entry for a command in the engine's - * cmd_tables, it gets the command's length based on the table entry. - * If not, it calls this function to determine the per-engine length - * field encoding for the command (i.e. different opcode ranges use - * certain bits to encode the command length in the header). - */ - u32 (*get_cmd_length_mask)(u32 cmd_header); - - struct { - /** - * @lock: Lock protecting the below fields. - */ - seqlock_t lock; - /** - * @enabled: Reference count indicating number of listeners. - */ - unsigned int enabled; - /** - * @active: Number of contexts currently scheduled in. - */ - unsigned int active; - /** - * @enabled_at: Timestamp when busy stats were enabled. - */ - ktime_t enabled_at; - /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. - */ - ktime_t start; - /** - * @total: Total time this engine was busy. - * - * Accumulated time not counting the most recent block in cases - * where engine is currently busy (active > 0). - */ - ktime_t total; - } stats; -}; - -static inline bool -intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine) -{ - return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER; -} - -static inline bool -intel_engine_supports_stats(const struct intel_engine_cs *engine) -{ - return engine->flags & I915_ENGINE_SUPPORTS_STATS; -} - -static inline bool -intel_engine_has_preemption(const struct intel_engine_cs *engine) -{ - return engine->flags & I915_ENGINE_HAS_PREEMPTION; -} +void intel_engines_set_scheduler_caps(struct drm_i915_private *i915); static inline bool __execlists_need_preempt(int prio, int last) { @@ -674,12 +189,6 @@ execlists_port_complete(struct intel_engine_execlists * const execlists, return port; } -static inline unsigned int -intel_engine_flag(const struct intel_engine_cs *engine) -{ - return BIT(engine->id); -} - static inline u32 intel_read_status_page(const struct intel_engine_cs *engine, int reg) { @@ -722,10 +231,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) * * The area from dword 0x30 to 0x3ff is available for driver usage. */ -#define I915_GEM_HWS_INDEX 0x30 -#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX * sizeof(u32)) #define I915_GEM_HWS_PREEMPT 0x32 #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32)) +#define I915_GEM_HWS_HANGCHECK 0x34 +#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32)) #define I915_GEM_HWS_SEQNO 0x40 #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32)) #define I915_GEM_HWS_SCRATCH 0x80 @@ -743,7 +252,18 @@ int intel_ring_pin(struct intel_ring *ring); void intel_ring_reset(struct intel_ring *ring, u32 tail); unsigned int intel_ring_update_space(struct intel_ring *ring); void intel_ring_unpin(struct intel_ring *ring); -void intel_ring_free(struct intel_ring *ring); +void intel_ring_free(struct kref *ref); + +static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) +{ + kref_get(&ring->ref); + return ring; +} + +static inline void intel_ring_put(struct intel_ring *ring) +{ + kref_put(&ring->ref, intel_ring_free); +} void intel_engine_stop(struct intel_engine_cs *engine); void intel_engine_cleanup(struct intel_engine_cs *engine); @@ -844,8 +364,6 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) return (head - tail - CACHELINE_BYTES) & (size - 1); } -void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno); - int intel_engine_setup_common(struct intel_engine_cs *engine); int intel_engine_init_common(struct intel_engine_cs *engine); void intel_engine_cleanup_common(struct intel_engine_cs *engine); @@ -863,44 +381,6 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask); u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); -static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine) -{ - /* - * We are only peeking at the tail of the submit queue (and not the - * queue itself) in order to gain a hint as to the current active - * state of the engine. Callers are not expected to be taking - * engine->timeline->lock, nor are they expected to be concerned - * wtih serialising this hint with anything, so document it as - * a hint and nothing more. - */ - return READ_ONCE(engine->timeline.seqno); -} - -static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) -{ - return intel_read_status_page(engine, I915_GEM_HWS_INDEX); -} - -static inline bool intel_engine_signaled(struct intel_engine_cs *engine, - u32 seqno) -{ - return i915_seqno_passed(intel_engine_get_seqno(engine), seqno); -} - -static inline bool intel_engine_has_completed(struct intel_engine_cs *engine, - u32 seqno) -{ - GEM_BUG_ON(!seqno); - return intel_engine_signaled(engine, seqno); -} - -static inline bool intel_engine_has_started(struct intel_engine_cs *engine, - u32 seqno) -{ - GEM_BUG_ON(!seqno); - return intel_engine_signaled(engine, seqno - 1); -} - void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone); @@ -960,14 +440,14 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) } static inline u32 * -gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset) +gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) { /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ GEM_BUG_ON(gtt_offset & (1 << 5)); /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW; + *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags; *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT; *cs++ = 0; *cs++ = value; @@ -987,7 +467,6 @@ void intel_engines_sanitize(struct drm_i915_private *i915, bool force); bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engines_are_idle(struct drm_i915_private *dev_priv); -bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine); void intel_engine_lost_context(struct intel_engine_cs *engine); void intel_engines_park(struct drm_i915_private *i915); @@ -1066,6 +545,9 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine); ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine); +struct i915_request * +intel_engine_find_active_request(struct intel_engine_cs *engine); + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) @@ -1086,4 +568,17 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists) #endif +static inline u32 +intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine) +{ + return engine->hangcheck.next_seqno = + next_pseudo_random32(engine->hangcheck.next_seqno); +} + +static inline u32 +intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine) +{ + return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK); +} + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index a017a4232c0f..40ddfbb97acb 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -565,7 +565,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, int pw_idx = power_well->desc->hsw.idx; /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ - WARN_ON(intel_wait_for_register(dev_priv, + WARN_ON(intel_wait_for_register(&dev_priv->uncore, regs->driver, HSW_PWR_WELL_CTL_STATE(pw_idx), HSW_PWR_WELL_CTL_STATE(pw_idx), @@ -620,7 +620,7 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, enum skl_power_gate pg) { /* Timeout 5us for PG#0, for other PGs 1us */ - WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS, + WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS, SKL_FUSE_PG_DIST_STATUS(pg), SKL_FUSE_PG_DIST_STATUS(pg), 1)); } @@ -1521,7 +1521,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv) * The PHY may be busy with some initial calibration and whatnot, * so the power state can take a while to actually change. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DISPLAY_PHY_STATUS, phy_status_mask, phy_status, @@ -1556,7 +1556,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, vlv_set_power_well(dev_priv, power_well, true); /* Poll for phypwrgood signal */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, DISPLAY_PHY_STATUS, PHY_POWERGOOD(phy), PHY_POWERGOOD(phy), @@ -1760,7 +1760,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->pcu_lock); - state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); + state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); /* * We only ever set the power-on and power-gate states, anything * else is unexpected. @@ -1772,7 +1772,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, * A transient state at this point would mean some unexpected party * is poking at the power controls too. */ - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); WARN_ON(ctrl << 16 != state); mutex_unlock(&dev_priv->pcu_lock); @@ -1793,20 +1793,20 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, mutex_lock(&dev_priv->pcu_lock); #define COND \ - ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) + ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) if (COND) goto out; - ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); ctrl &= ~DP_SSC_MASK(pipe); ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); - vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl); + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); if (wait_for(COND, 100)) DRM_ERROR("timeout setting power well state %08x (%08x)\n", state, - vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ)); + vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); #undef COND @@ -3576,7 +3576,11 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv) !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) DRM_ERROR("DBuf power enable timeout\n"); else - dev_priv->wm.skl_hw.ddb.enabled_slices = 2; + /* + * FIXME: for now pretend that we only have 1 slice, see + * intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; } static void icl_dbuf_disable(struct drm_i915_private *dev_priv) @@ -3591,7 +3595,11 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv) (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) DRM_ERROR("DBuf power disable timeout!\n"); else - dev_priv->wm.skl_hw.ddb.enabled_slices = 0; + /* + * FIXME: for now pretend that the first slice is always + * enabled, see intel_enabled_dbuf_slices_num(). + */ + dev_priv->wm.skl_hw.ddb.enabled_slices = 1; } static void icl_mbus_init(struct drm_i915_private *dev_priv) @@ -3993,6 +4001,36 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) cmn->desc->ops->disable(dev_priv, cmn); } +static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) +{ + bool ret; + + mutex_lock(&dev_priv->pcu_lock); + ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; + mutex_unlock(&dev_priv->pcu_lock); + + return ret; +} + +static void assert_ved_power_gated(struct drm_i915_private *dev_priv) +{ + WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), + "VED not power gated\n"); +} + +static void assert_isp_power_gated(struct drm_i915_private *dev_priv) +{ + static const struct pci_device_id isp_ids[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, + {} + }; + + WARN(!pci_dev_present(isp_ids) && + !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), + "ISP not power gated\n"); +} + static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); /** @@ -4029,10 +4067,13 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) mutex_lock(&power_domains->lock); chv_phy_control_init(i915); mutex_unlock(&power_domains->lock); + assert_isp_power_gated(i915); } else if (IS_VALLEYVIEW(i915)) { mutex_lock(&power_domains->lock); vlv_cmnlane_wa(i915); mutex_unlock(&power_domains->lock); + assert_ved_power_gated(i915); + assert_isp_power_gated(i915); } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) { intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e7b0884ba5a5..68f497493d43 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -978,34 +978,109 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, &tx_rate, 1); } -static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, - const struct intel_crtc_state *pipe_config, - const struct drm_connector_state *conn_state) +static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo, + unsigned int if_index, + u8 *data, unsigned int length) +{ + u8 set_buf_index[2] = { if_index, 0 }; + u8 hbuf_size, tx_rate, av_split; + int i; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_HBUF_AV_SPLIT, + &av_split, 1)) + return -ENXIO; + + if (av_split < if_index) + return 0; + + if (!intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_HBUF_TXRATE, + &tx_rate, 1)) + return -ENXIO; + + if (tx_rate == SDVO_HBUF_TX_DISABLED) + return 0; + + if (!intel_sdvo_set_value(intel_sdvo, + SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return -ENXIO; + + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO, + &hbuf_size, 1)) + return -ENXIO; + + /* Buffer size is 0 based, hooray! */ + hbuf_size++; + + DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n", + if_index, length, hbuf_size); + + hbuf_size = min_t(unsigned int, length, hbuf_size); + + for (i = 0; i < hbuf_size; i += 8) { + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0)) + return -ENXIO; + if (!intel_sdvo_read_response(intel_sdvo, &data[i], + min_t(unsigned int, 8, hbuf_size - i))) + return -ENXIO; + } + + return hbuf_size; +} + +static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo, + struct intel_crtc_state *crtc_state, + struct drm_connector_state *conn_state) { + struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi; const struct drm_display_mode *adjusted_mode = - &pipe_config->base.adjusted_mode; - u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; - union hdmi_infoframe frame; + &crtc_state->base.adjusted_mode; int ret; - ssize_t len; - ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, + if (!crtc_state->has_hdmi_sink) + return true; + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); + + ret = drm_hdmi_avi_infoframe_from_display_mode(frame, conn_state->connector, adjusted_mode); - if (ret < 0) { - DRM_ERROR("couldn't fill AVI infoframe\n"); + if (ret) return false; - } - drm_hdmi_avi_infoframe_quant_range(&frame.avi, + drm_hdmi_avi_infoframe_quant_range(frame, conn_state->connector, adjusted_mode, - pipe_config->limited_color_range ? + crtc_state->limited_color_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); - len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data)); - if (len < 0) + ret = hdmi_avi_infoframe_check(frame); + if (WARN_ON(ret)) + return false; + + return true; +} + +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, + const struct intel_crtc_state *crtc_state) +{ + u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; + const union hdmi_infoframe *frame = &crtc_state->infoframes.avi; + ssize_t len; + + if ((crtc_state->infoframes.enable & + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0) + return true; + + if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI)) + return false; + + len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data)); + if (WARN_ON(len < 0)) return false; return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, @@ -1013,6 +1088,40 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, sdvo_data, sizeof(sdvo_data)); } +static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo, + struct intel_crtc_state *crtc_state) +{ + u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; + union hdmi_infoframe *frame = &crtc_state->infoframes.avi; + ssize_t len; + int ret; + + if (!crtc_state->has_hdmi_sink) + return; + + len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, + sdvo_data, sizeof(sdvo_data)); + if (len < 0) { + DRM_DEBUG_KMS("failed to read AVI infoframe\n"); + return; + } else if (len == 0) { + return; + } + + crtc_state->infoframes.enable |= + intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); + + ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data)); + if (ret) { + DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n"); + return; + } + + if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI) + DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n", + frame->any.type, HDMI_INFOFRAME_TYPE_AVI); +} + static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo, const struct drm_connector_state *conn_state) { @@ -1193,6 +1302,12 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, if (intel_sdvo_connector->is_hdmi) adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; + if (!intel_sdvo_compute_avi_infoframe(intel_sdvo, + pipe_config, conn_state)) { + DRM_DEBUG_KMS("bad AVI infoframe\n"); + return -EINVAL; + } + return 0; } @@ -1315,8 +1430,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); - intel_sdvo_set_avi_infoframe(intel_sdvo, - crtc_state, conn_state); + intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state); } else intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); @@ -1507,6 +1621,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, } } + WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, + "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", + pipe_config->pixel_multiplier, encoder_pixel_multiplier); + if (sdvox & HDMI_COLOR_RANGE_16_235) pipe_config->limited_color_range = true; @@ -1519,9 +1637,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, pipe_config->has_hdmi_sink = true; } - WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, - "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", - pipe_config->pixel_multiplier, encoder_pixel_multiplier); + intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config); } static void intel_disable_sdvo(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 75c872bb8cc9..57de41b1f989 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -51,7 +51,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 5)) { DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", @@ -63,7 +63,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn, I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val); I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0, 5)) { DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", @@ -208,7 +208,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, u32 value = 0; WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); @@ -224,7 +224,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, @@ -248,7 +248,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, 100)) { DRM_ERROR("timeout waiting for SBI to become ready\n"); @@ -264,7 +264,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, SBI_CTL_STAT, SBI_BUSY, 0, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 53174d579574..65de7387bf1b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -269,7 +269,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->base.fb; struct drm_rect *src = &plane_state->base.src; - u32 src_x, src_y, src_w, src_h; + u32 src_x, src_y, src_w, src_h, hsub, vsub; + bool rotated = drm_rotation_90_or_270(plane_state->base.rotation); /* * Hardware doesn't handle subpixel coordinates. @@ -287,18 +288,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) src->y1 = src_y << 16; src->y2 = (src_y + src_h) << 16; - if (fb->format->is_yuv && - (src_x & 1 || src_w & 1)) { - DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n", - src_x, src_w); + if (!fb->format->is_yuv) + return 0; + + /* YUV specific checks */ + if (!rotated) { + hsub = fb->format->hsub; + vsub = fb->format->vsub; + } else { + hsub = vsub = max(fb->format->hsub, fb->format->vsub); + } + + if (src_x % hsub || src_w % hsub) { + DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n", + src_x, src_w, hsub, rotated ? "rotated " : ""); return -EINVAL; } - if (fb->format->is_yuv && - fb->format->num_planes > 1 && - (src_y & 1 || src_h & 1)) { - DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n", - src_y, src_h); + if (src_y % vsub || src_h % vsub) { + DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n", + src_y, src_h, vsub, rotated ? "rotated " : ""); return -EINVAL; } @@ -622,6 +631,9 @@ skl_disable_plane(struct intel_plane *plane, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (icl_is_hdr_plane(dev_priv, plane_id)) + I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0); + skl_write_plane_wm(plane, crtc_state); I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); @@ -754,7 +766,12 @@ vlv_update_clrc(const struct intel_plane_state *plane_state) static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { - return SP_GAMMA_ENABLE; + u32 sprctl = 0; + + if (crtc_state->gamma_enable) + sprctl |= SP_GAMMA_ENABLE; + + return sprctl; } static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, @@ -929,12 +946,12 @@ vlv_plane_get_hw_state(struct intel_plane *plane, static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); u32 sprctl = 0; - sprctl |= SPRITE_GAMMA_ENABLE; + if (crtc_state->gamma_enable) + sprctl |= SPRITE_GAMMA_ENABLE; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (crtc_state->csc_enable) sprctl |= SPRITE_PIPE_CSC_ENABLE; return sprctl; @@ -1120,7 +1137,15 @@ g4x_sprite_max_stride(struct intel_plane *plane, static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { - return DVS_GAMMA_ENABLE; + u32 dvscntr = 0; + + if (crtc_state->gamma_enable) + dvscntr |= DVS_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + dvscntr |= DVS_PIPE_CSC_ENABLE; + + return dvscntr; } static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, @@ -1506,6 +1531,11 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", drm_get_format_name(fb->format->format, &format_name)); @@ -1805,7 +1835,7 @@ static const u32 skl_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const uint32_t icl_plane_formats[] = { +static const u32 icl_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1826,7 +1856,7 @@ static const uint32_t icl_plane_formats[] = { DRM_FORMAT_XVYU16161616, }; -static const uint32_t icl_hdr_plane_formats[] = { +static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1867,7 +1897,7 @@ static const u32 skl_planar_formats[] = { DRM_FORMAT_NV12, }; -static const uint32_t glk_planar_formats[] = { +static const u32 glk_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1886,7 +1916,7 @@ static const uint32_t glk_planar_formats[] = { DRM_FORMAT_P016, }; -static const uint32_t icl_planar_formats[] = { +static const u32 icl_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1911,7 +1941,7 @@ static const uint32_t icl_planar_formats[] = { DRM_FORMAT_XVYU16161616, }; -static const uint32_t icl_hdr_planar_formats[] = { +static const u32 icl_hdr_planar_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -2082,12 +2112,7 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: - case DRM_FORMAT_Y210: - case DRM_FORMAT_Y212: - case DRM_FORMAT_Y216: case DRM_FORMAT_XVYU2101010: - case DRM_FORMAT_XVYU12_16161616: - case DRM_FORMAT_XVYU16161616: if (modifier == I915_FORMAT_MOD_Yf_TILED) return true; /* fall through */ @@ -2096,6 +2121,11 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_ABGR16161616F: case DRM_FORMAT_XRGB16161616F: case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: if (modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED || modifier == I915_FORMAT_MOD_Y_TILED) diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index e711eb3268bc..2d360d53757f 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -332,8 +332,6 @@ void intel_uc_sanitize(struct drm_i915_private *i915) GEM_BUG_ON(!HAS_GUC(i915)); - guc_disable_communication(guc); - intel_huc_sanitize(huc); intel_guc_sanitize(guc); @@ -451,6 +449,23 @@ void intel_uc_fini_hw(struct drm_i915_private *i915) guc_disable_communication(guc); } +/** + * intel_uc_reset_prepare - Prepare for reset + * @i915: device private + * + * Preparing for full gpu reset. + */ +void intel_uc_reset_prepare(struct drm_i915_private *i915) +{ + struct intel_guc *guc = &i915->guc; + + if (!USES_GUC(i915)) + return; + + guc_disable_communication(guc); + intel_uc_sanitize(i915); +} + int intel_uc_suspend(struct drm_i915_private *i915) { struct intel_guc *guc = &i915->guc; @@ -468,7 +483,7 @@ int intel_uc_suspend(struct drm_i915_private *i915) return err; } - gen9_disable_guc_interrupts(i915); + guc_disable_communication(guc); return 0; } @@ -484,7 +499,7 @@ int intel_uc_resume(struct drm_i915_private *i915) if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) return 0; - gen9_enable_guc_interrupts(i915); + guc_enable_communication(guc); err = intel_guc_resume(guc); if (err) { diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 870faf9011b9..c14729786652 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -38,6 +38,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv); void intel_uc_fini_hw(struct drm_i915_private *dev_priv); int intel_uc_init(struct drm_i915_private *dev_priv); void intel_uc_fini(struct drm_i915_private *dev_priv); +void intel_uc_reset_prepare(struct drm_i915_private *i915); int intel_uc_suspend(struct drm_i915_private *dev_priv); int intel_uc_resume(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 75646a1e0051..5c80704bf283 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -31,7 +31,7 @@ #define FORCEWAKE_ACK_TIMEOUT_MS 50 #define GT_FIFO_TIMEOUT_MS 10 -#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__)) +#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__)) static const char * const forcewake_domain_names[] = { "render", @@ -58,16 +58,20 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) return "unknown"; } +#define fw_ack(d) readl((d)->reg_ack) +#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set) +#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set) + static inline void -fw_domain_reset(struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_reset(const struct intel_uncore_forcewake_domain *d) { /* * We don't really know if the powerwell for the forcewake domain we are * trying to reset here does exist at this point (engines could be fused * off in ICL+), so no waiting for acks */ - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset); + /* WaRsClearFWBitsAtReset:bdw,skl */ + fw_clear(d, 0xffff); } static inline void @@ -81,36 +85,32 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) } static inline int -__wait_for_ack(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +__wait_for_ack(const struct intel_uncore_forcewake_domain *d, const u32 ack, const u32 value) { - return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value, + return wait_for_atomic((fw_ack(d) & ack) == value, FORCEWAKE_ACK_TIMEOUT_MS); } static inline int -wait_ack_clear(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +wait_ack_clear(const struct intel_uncore_forcewake_domain *d, const u32 ack) { - return __wait_for_ack(i915, d, ack, 0); + return __wait_for_ack(d, ack, 0); } static inline int -wait_ack_set(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +wait_ack_set(const struct intel_uncore_forcewake_domain *d, const u32 ack) { - return __wait_for_ack(i915, d, ack, ack); + return __wait_for_ack(d, ack, ack); } static inline void -fw_domain_wait_ack_clear(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) { - if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL)) + if (wait_ack_clear(d, FORCEWAKE_KERNEL)) DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", intel_uncore_forcewake_domain_to_str(d->id)); } @@ -121,8 +121,7 @@ enum ack_type { }; static int -fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d, +fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d, const enum ack_type type) { const u32 ack_bit = FORCEWAKE_KERNEL; @@ -146,129 +145,122 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915, pass = 1; do { - wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK); + wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK); - __raw_i915_write32(i915, d->reg_set, - _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK)); + fw_set(d, FORCEWAKE_KERNEL_FALLBACK); /* Give gt some time to relax before the polling frenzy */ udelay(10 * pass); - wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK); + wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK); - ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value; + ack_detected = (fw_ack(d) & ack_bit) == value; - __raw_i915_write32(i915, d->reg_set, - _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK)); + fw_clear(d, FORCEWAKE_KERNEL_FALLBACK); } while (!ack_detected && pass++ < 10); DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n", intel_uncore_forcewake_domain_to_str(d->id), type == ACK_SET ? "set" : "clear", - __raw_i915_read32(i915, d->reg_ack), + fw_ack(d), pass); return ack_detected ? 0 : -ETIMEDOUT; } static inline void -fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d) { - if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL))) + if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL))) return; - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR)) - fw_domain_wait_ack_clear(i915, d); + if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR)) + fw_domain_wait_ack_clear(d); } static inline void -fw_domain_get(struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_get(const struct intel_uncore_forcewake_domain *d) { - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set); + fw_set(d, FORCEWAKE_KERNEL); } static inline void -fw_domain_wait_ack_set(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) { - if (wait_ack_set(i915, d, FORCEWAKE_KERNEL)) + if (wait_ack_set(d, FORCEWAKE_KERNEL)) DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", intel_uncore_forcewake_domain_to_str(d->id)); } static inline void -fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d) { - if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL))) + if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL))) return; - if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET)) - fw_domain_wait_ack_set(i915, d); + if (fw_domain_wait_ack_with_fallback(d, ACK_SET)) + fw_domain_wait_ack_set(d); } static inline void -fw_domain_put(const struct drm_i915_private *i915, - const struct intel_uncore_forcewake_domain *d) +fw_domain_put(const struct intel_uncore_forcewake_domain *d) { - __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear); + fw_clear(d, FORCEWAKE_KERNEL); } static void -fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains) +fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) { - fw_domain_wait_ack_clear(i915, d); - fw_domain_get(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { + fw_domain_wait_ack_clear(d); + fw_domain_get(d); } - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_wait_ack_set(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_wait_ack_set(d); - i915->uncore.fw_domains_active |= fw_domains; + uncore->fw_domains_active |= fw_domains; } static void -fw_domains_get_with_fallback(struct drm_i915_private *i915, +fw_domains_get_with_fallback(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) { - fw_domain_wait_ack_clear_fallback(i915, d); - fw_domain_get(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) { + fw_domain_wait_ack_clear_fallback(d); + fw_domain_get(d); } - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_wait_ack_set_fallback(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_wait_ack_set_fallback(d); - i915->uncore.fw_domains_active |= fw_domains; + uncore->fw_domains_active |= fw_domains; } static void -fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains) +fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_put(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_put(d); - i915->uncore.fw_domains_active &= ~fw_domains; + uncore->fw_domains_active &= ~fw_domains; } static void -fw_domains_reset(struct drm_i915_private *i915, +fw_domains_reset(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; @@ -277,61 +269,61 @@ fw_domains_reset(struct drm_i915_private *i915, if (!fw_domains) return; - GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(d, fw_domains, i915, tmp) - fw_domain_reset(i915, d); + for_each_fw_domain_masked(d, fw_domains, uncore, tmp) + fw_domain_reset(d); } -static inline u32 gt_thread_status(struct drm_i915_private *dev_priv) +static inline u32 gt_thread_status(struct intel_uncore *uncore) { u32 val; - val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG); + val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG); val &= GEN6_GT_THREAD_STATUS_CORE_MASK; return val; } -static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) +static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore) { /* * w/a for a sporadic read returning 0 by waiting for the GT * thread to wake up. */ - WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000), + WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000), "GT thread status wait timed out\n"); } -static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, +static void fw_domains_get_with_thread_status(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - fw_domains_get(dev_priv, fw_domains); + fw_domains_get(uncore, fw_domains); /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ - __gen6_gt_wait_for_thread_c0(dev_priv); + __gen6_gt_wait_for_thread_c0(uncore); } -static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) +static inline u32 fifo_free_entries(struct intel_uncore *uncore) { - u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); + u32 count = __raw_uncore_read32(uncore, GTFIFOCTL); return count & GT_FIFO_FREE_ENTRIES_MASK; } -static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore) { u32 n; /* On VLV, FIFO will be shared by both SW and HW. * So, we need to read the FREE_ENTRIES everytime */ - if (IS_VALLEYVIEW(dev_priv)) - n = fifo_free_entries(dev_priv); + if (IS_VALLEYVIEW(uncore_to_i915(uncore))) + n = fifo_free_entries(uncore); else - n = dev_priv->uncore.fifo_count; + n = uncore->fifo_count; if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) { - if (wait_for_atomic((n = fifo_free_entries(dev_priv)) > + if (wait_for_atomic((n = fifo_free_entries(uncore)) > GT_FIFO_NUM_RESERVED_ENTRIES, GT_FIFO_TIMEOUT_MS)) { DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n); @@ -339,7 +331,7 @@ static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) } } - dev_priv->uncore.fifo_count = n - 1; + uncore->fifo_count = n - 1; } static enum hrtimer_restart @@ -347,30 +339,29 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) { struct intel_uncore_forcewake_domain *domain = container_of(timer, struct intel_uncore_forcewake_domain, timer); - struct drm_i915_private *dev_priv = - container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]); + struct intel_uncore *uncore = forcewake_domain_to_uncore(domain); unsigned long irqflags; - assert_rpm_device_not_suspended(dev_priv); + assert_rpm_device_not_suspended(uncore->rpm); if (xchg(&domain->active, false)) return HRTIMER_RESTART; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); if (WARN_ON(domain->wake_count == 0)) domain->wake_count++; if (--domain->wake_count == 0) - dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); + uncore->funcs.force_wake_put(uncore, domain->mask); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irqrestore(&uncore->lock, irqflags); return HRTIMER_NORESTART; } /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */ static unsigned int -intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) +intel_uncore_forcewake_reset(struct intel_uncore *uncore) { unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; @@ -388,7 +379,7 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) active_domains = 0; - for_each_fw_domain(domain, dev_priv, tmp) { + for_each_fw_domain(domain, uncore, tmp) { smp_store_mb(domain->active, false); if (hrtimer_cancel(&domain->timer) == 0) continue; @@ -396,9 +387,9 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) intel_uncore_fw_release_timer(&domain->timer); } - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); - for_each_fw_domain(domain, dev_priv, tmp) { + for_each_fw_domain(domain, uncore, tmp) { if (hrtimer_active(&domain->timer)) active_domains |= domain->mask; } @@ -411,20 +402,20 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv) break; } - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irqrestore(&uncore->lock, irqflags); cond_resched(); } WARN_ON(active_domains); - fw = dev_priv->uncore.fw_domains_active; + fw = uncore->fw_domains_active; if (fw) - dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); + uncore->funcs.force_wake_put(uncore, fw); - fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains); - assert_forcewakes_inactive(dev_priv); + fw_domains_reset(uncore, uncore->fw_domains); + assert_forcewakes_inactive(uncore); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_unlock_irqrestore(&uncore->lock, irqflags); return fw; /* track the lost user forcewake domains */ } @@ -460,8 +451,8 @@ static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { - dev_priv->edram_cap = __raw_i915_read32(dev_priv, - HSW_EDRAM_CAP); + dev_priv->edram_cap = __raw_uncore_read32(&dev_priv->uncore, + HSW_EDRAM_CAP); /* NB: We can't write IDICR yet because we do not have gt funcs * set up */ @@ -475,121 +466,115 @@ static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv) } static bool -fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore) { u32 dbg; - dbg = __raw_i915_read32(dev_priv, FPGA_DBG); + dbg = __raw_uncore_read32(uncore, FPGA_DBG); if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM))) return false; - __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); return true; } static bool -vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore) { u32 cer; - cer = __raw_i915_read32(dev_priv, CLAIM_ER); + cer = __raw_uncore_read32(uncore, CLAIM_ER); if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK)))) return false; - __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR); + __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR); return true; } static bool -gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv) +gen6_check_for_fifo_debug(struct intel_uncore *uncore) { u32 fifodbg; - fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); + fifodbg = __raw_uncore_read32(uncore, GTFIFODBG); if (unlikely(fifodbg)) { DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg); - __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg); + __raw_uncore_write32(uncore, GTFIFODBG, fifodbg); } return fifodbg; } static bool -check_for_unclaimed_mmio(struct drm_i915_private *dev_priv) +check_for_unclaimed_mmio(struct intel_uncore *uncore) { bool ret = false; - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv)) - ret |= fpga_check_for_unclaimed_mmio(dev_priv); + if (intel_uncore_has_fpga_dbg_unclaimed(uncore)) + ret |= fpga_check_for_unclaimed_mmio(uncore); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - ret |= vlv_check_for_unclaimed_mmio(dev_priv); + if (intel_uncore_has_dbg_unclaimed(uncore)) + ret |= vlv_check_for_unclaimed_mmio(uncore); - if (IS_GEN_RANGE(dev_priv, 6, 7)) - ret |= gen6_check_for_fifo_debug(dev_priv); + if (intel_uncore_has_fifo(uncore)) + ret |= gen6_check_for_fifo_debug(uncore); return ret; } -static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv, +static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, unsigned int restore_forcewake) { /* clear out unclaimed reg detection bit */ - if (check_for_unclaimed_mmio(dev_priv)) + if (check_for_unclaimed_mmio(uncore)) DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); /* WaDisableShadowRegForCpd:chv */ - if (IS_CHERRYVIEW(dev_priv)) { - __raw_i915_write32(dev_priv, GTFIFOCTL, - __raw_i915_read32(dev_priv, GTFIFOCTL) | - GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | - GT_FIFO_CTL_RC6_POLICY_STALL); + if (IS_CHERRYVIEW(uncore_to_i915(uncore))) { + __raw_uncore_write32(uncore, GTFIFOCTL, + __raw_uncore_read32(uncore, GTFIFOCTL) | + GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | + GT_FIFO_CTL_RC6_POLICY_STALL); } iosf_mbi_punit_acquire(); - intel_uncore_forcewake_reset(dev_priv); + intel_uncore_forcewake_reset(uncore); if (restore_forcewake) { - spin_lock_irq(&dev_priv->uncore.lock); - dev_priv->uncore.funcs.force_wake_get(dev_priv, - restore_forcewake); - - if (IS_GEN_RANGE(dev_priv, 6, 7)) - dev_priv->uncore.fifo_count = - fifo_free_entries(dev_priv); - spin_unlock_irq(&dev_priv->uncore.lock); + spin_lock_irq(&uncore->lock); + uncore->funcs.force_wake_get(uncore, restore_forcewake); + + if (intel_uncore_has_fifo(uncore)) + uncore->fifo_count = fifo_free_entries(uncore); + spin_unlock_irq(&uncore->lock); } iosf_mbi_punit_release(); } -void intel_uncore_suspend(struct drm_i915_private *dev_priv) +void intel_uncore_suspend(struct intel_uncore *uncore) { iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( - &dev_priv->uncore.pmic_bus_access_nb); - dev_priv->uncore.fw_domains_saved = - intel_uncore_forcewake_reset(dev_priv); + &uncore->pmic_bus_access_nb); + uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore); iosf_mbi_punit_release(); } -void intel_uncore_resume_early(struct drm_i915_private *dev_priv) +void intel_uncore_resume_early(struct intel_uncore *uncore) { unsigned int restore_forcewake; - restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved); - __intel_uncore_early_sanitize(dev_priv, restore_forcewake); + restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); + __intel_uncore_early_sanitize(uncore, restore_forcewake); - iosf_mbi_register_pmic_bus_access_notifier( - &dev_priv->uncore.pmic_bus_access_nb); - i915_check_and_clear_faults(dev_priv); + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } -void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv) +void intel_uncore_runtime_resume(struct intel_uncore *uncore) { - iosf_mbi_register_pmic_bus_access_notifier( - &dev_priv->uncore.pmic_bus_access_nb); + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); } void intel_uncore_sanitize(struct drm_i915_private *dev_priv) @@ -598,15 +583,15 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv) intel_sanitize_gt_powersave(dev_priv); } -static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, +static void __intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; - fw_domains &= dev_priv->uncore.fw_domains; + fw_domains &= uncore->fw_domains; - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { if (domain->wake_count++) { fw_domains &= ~domain->mask; domain->active = true; @@ -614,12 +599,12 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, } if (fw_domains) - dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); + uncore->funcs.force_wake_get(uncore, fw_domains); } /** * intel_uncore_forcewake_get - grab forcewake domain references - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to get reference on * * This function can be used get GT's forcewake domain references. @@ -630,100 +615,100 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, * call to intel_unforce_forcewake_put(). Usually caller wants all the domains * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. */ -void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { unsigned long irqflags; - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - assert_rpm_wakelock_held(dev_priv); + __assert_rpm_wakelock_held(uncore->rpm); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - __intel_uncore_forcewake_get(dev_priv, fw_domains); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); + __intel_uncore_forcewake_get(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, irqflags); } /** * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * * This function is a wrapper around intel_uncore_forcewake_get() to acquire * the GT powerwell and in the process disable our debugging for the * duration of userspace's bypass. */ -void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) +void intel_uncore_forcewake_user_get(struct intel_uncore *uncore) { - spin_lock_irq(&dev_priv->uncore.lock); - if (!dev_priv->uncore.user_forcewake.count++) { - intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); + spin_lock_irq(&uncore->lock); + if (!uncore->user_forcewake.count++) { + intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL); /* Save and disable mmio debugging for the user bypass */ - dev_priv->uncore.user_forcewake.saved_mmio_check = - dev_priv->uncore.unclaimed_mmio_check; - dev_priv->uncore.user_forcewake.saved_mmio_debug = + uncore->user_forcewake.saved_mmio_check = + uncore->unclaimed_mmio_check; + uncore->user_forcewake.saved_mmio_debug = i915_modparams.mmio_debug; - dev_priv->uncore.unclaimed_mmio_check = 0; + uncore->unclaimed_mmio_check = 0; i915_modparams.mmio_debug = 0; } - spin_unlock_irq(&dev_priv->uncore.lock); + spin_unlock_irq(&uncore->lock); } /** * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * * This function complements intel_uncore_forcewake_user_get() and releases * the GT powerwell taken on behalf of the userspace bypass. */ -void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) +void intel_uncore_forcewake_user_put(struct intel_uncore *uncore) { - spin_lock_irq(&dev_priv->uncore.lock); - if (!--dev_priv->uncore.user_forcewake.count) { - if (intel_uncore_unclaimed_mmio(dev_priv)) - dev_info(dev_priv->drm.dev, + spin_lock_irq(&uncore->lock); + if (!--uncore->user_forcewake.count) { + if (intel_uncore_unclaimed_mmio(uncore)) + dev_info(uncore_to_i915(uncore)->drm.dev, "Invalid mmio detected during user access\n"); - dev_priv->uncore.unclaimed_mmio_check = - dev_priv->uncore.user_forcewake.saved_mmio_check; + uncore->unclaimed_mmio_check = + uncore->user_forcewake.saved_mmio_check; i915_modparams.mmio_debug = - dev_priv->uncore.user_forcewake.saved_mmio_debug; + uncore->user_forcewake.saved_mmio_debug; - intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL); } - spin_unlock_irq(&dev_priv->uncore.lock); + spin_unlock_irq(&uncore->lock); } /** * intel_uncore_forcewake_get__locked - grab forcewake domain references - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to get reference on * * See intel_uncore_forcewake_get(). This variant places the onus * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. */ -void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - lockdep_assert_held(&dev_priv->uncore.lock); + lockdep_assert_held(&uncore->lock); - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - __intel_uncore_forcewake_get(dev_priv, fw_domains); + __intel_uncore_forcewake_get(uncore, fw_domains); } -static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, +static void __intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; - fw_domains &= dev_priv->uncore.fw_domains; + fw_domains &= uncore->fw_domains; - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) { + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { if (WARN_ON(domain->wake_count == 0)) continue; @@ -738,66 +723,66 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, /** * intel_uncore_forcewake_put - release a forcewake domain reference - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to put references * * This function drops the device-level forcewakes for specified * domains obtained by intel_uncore_forcewake_get(). */ -void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { unsigned long irqflags; - if (!dev_priv->uncore.funcs.force_wake_put) + if (!uncore->funcs.force_wake_put) return; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - __intel_uncore_forcewake_put(dev_priv, fw_domains); - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + spin_lock_irqsave(&uncore->lock, irqflags); + __intel_uncore_forcewake_put(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, irqflags); } /** * intel_uncore_forcewake_put__locked - grab forcewake domain references - * @dev_priv: i915 device instance + * @uncore: the intel_uncore structure * @fw_domains: forcewake domains to get reference on * * See intel_uncore_forcewake_put(). This variant places the onus * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. */ -void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - lockdep_assert_held(&dev_priv->uncore.lock); + lockdep_assert_held(&uncore->lock); - if (!dev_priv->uncore.funcs.force_wake_put) + if (!uncore->funcs.force_wake_put) return; - __intel_uncore_forcewake_put(dev_priv, fw_domains); + __intel_uncore_forcewake_put(uncore, fw_domains); } -void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) +void assert_forcewakes_inactive(struct intel_uncore *uncore) { - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - WARN(dev_priv->uncore.fw_domains_active, + WARN(uncore->fw_domains_active, "Expected all fw_domains to be inactive, but %08x are still on\n", - dev_priv->uncore.fw_domains_active); + uncore->fw_domains_active); } -void assert_forcewakes_active(struct drm_i915_private *dev_priv, +void assert_forcewakes_active(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { - if (!dev_priv->uncore.funcs.force_wake_get) + if (!uncore->funcs.force_wake_get) return; - assert_rpm_wakelock_held(dev_priv); + __assert_rpm_wakelock_held(uncore->rpm); - fw_domains &= dev_priv->uncore.fw_domains; - WARN(fw_domains & ~dev_priv->uncore.fw_domains_active, + fw_domains &= uncore->fw_domains; + WARN(fw_domains & ~uncore->fw_domains_active, "Expected %08x fw_domains to be active, but %08x are off\n", - fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active); + fw_domains, fw_domains & ~uncore->fw_domains_active); } /* We give fast paths for the really cool registers */ @@ -806,7 +791,7 @@ void assert_forcewakes_active(struct drm_i915_private *dev_priv, #define GEN11_NEEDS_FORCE_WAKE(reg) \ ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) -#define __gen6_reg_read_fw_domains(offset) \ +#define __gen6_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ if (NEEDS_FORCE_WAKE(offset)) \ @@ -846,13 +831,13 @@ static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry) }) static enum forcewake_domains -find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) +find_fw_domain(struct intel_uncore *uncore, u32 offset) { const struct intel_forcewake_range *entry; entry = BSEARCH(offset, - dev_priv->uncore.fw_domains_table, - dev_priv->uncore.fw_domains_table_entries, + uncore->fw_domains_table, + uncore->fw_domains_table_entries, fw_range_cmp); if (!entry) @@ -864,11 +849,11 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset) * translate it here to the list of available domains. */ if (entry->domains == FORCEWAKE_ALL) - return dev_priv->uncore.fw_domains; + return uncore->fw_domains; - WARN(entry->domains & ~dev_priv->uncore.fw_domains, + WARN(entry->domains & ~uncore->fw_domains, "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n", - entry->domains & ~dev_priv->uncore.fw_domains, offset); + entry->domains & ~uncore->fw_domains, offset); return entry->domains; } @@ -892,19 +877,19 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = { GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA), }; -#define __fwtable_reg_read_fw_domains(offset) \ +#define __fwtable_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (NEEDS_FORCE_WAKE((offset))) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) -#define __gen11_fwtable_reg_read_fw_domains(offset) \ +#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (GEN11_NEEDS_FORCE_WAKE((offset))) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) @@ -956,7 +941,7 @@ static bool is_gen##x##_shadowed(u32 offset) \ __is_genX_shadowed(8) __is_genX_shadowed(11) -#define __gen8_reg_write_fw_domains(offset) \ +#define __gen8_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ @@ -986,19 +971,19 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = { GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA), }; -#define __fwtable_reg_write_fw_domains(offset) \ +#define __fwtable_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) -#define __gen11_fwtable_reg_write_fw_domains(offset) \ +#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ - __fwd = find_fw_domain(dev_priv, offset); \ + __fwd = find_fw_domain(uncore, offset); \ __fwd; \ }) @@ -1073,21 +1058,21 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = { }; static void -ilk_dummy_write(struct drm_i915_private *dev_priv) +ilk_dummy_write(struct intel_uncore *uncore) { /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up * the chip from rc6 before touching it for real. MI_MODE is masked, * hence harmless to write 0 into. */ - __raw_i915_write32(dev_priv, MI_MODE, 0); + __raw_uncore_write32(uncore, MI_MODE, 0); } static void -__unclaimed_reg_debug(struct drm_i915_private *dev_priv, +__unclaimed_reg_debug(struct intel_uncore *uncore, const i915_reg_t reg, const bool read, const bool before) { - if (WARN(check_for_unclaimed_mmio(dev_priv) && !before, + if (WARN(check_for_unclaimed_mmio(uncore) && !before, "Unclaimed %s register 0x%x\n", read ? "read from" : "write to", i915_mmio_reg_offset(reg))) @@ -1096,7 +1081,7 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, } static inline void -unclaimed_reg_debug(struct drm_i915_private *dev_priv, +unclaimed_reg_debug(struct intel_uncore *uncore, const i915_reg_t reg, const bool read, const bool before) @@ -1104,12 +1089,12 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, if (likely(!i915_modparams.mmio_debug)) return; - __unclaimed_reg_debug(dev_priv, reg, read, before); + __unclaimed_reg_debug(uncore, reg, read, before); } #define GEN2_READ_HEADER(x) \ u##x val = 0; \ - assert_rpm_wakelock_held(dev_priv); + __assert_rpm_wakelock_held(uncore->rpm); #define GEN2_READ_FOOTER \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ @@ -1117,18 +1102,18 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, #define __gen2_read(x) \ static u##x \ -gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ +gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ GEN2_READ_HEADER(x); \ - val = __raw_i915_read##x(dev_priv, reg); \ + val = __raw_uncore_read##x(uncore, reg); \ GEN2_READ_FOOTER; \ } #define __gen5_read(x) \ static u##x \ -gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ +gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ GEN2_READ_HEADER(x); \ - ilk_dummy_write(dev_priv); \ - val = __raw_i915_read##x(dev_priv, reg); \ + ilk_dummy_write(uncore); \ + val = __raw_uncore_read##x(uncore, reg); \ GEN2_READ_FOOTER; \ } @@ -1151,53 +1136,53 @@ __gen2_read(64) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ u##x val = 0; \ - assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ - unclaimed_reg_debug(dev_priv, reg, true, true) + __assert_rpm_wakelock_held(uncore->rpm); \ + spin_lock_irqsave(&uncore->lock, irqflags); \ + unclaimed_reg_debug(uncore, reg, true, true) #define GEN6_READ_FOOTER \ - unclaimed_reg_debug(dev_priv, reg, true, false); \ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ + unclaimed_reg_debug(uncore, reg, true, false); \ + spin_unlock_irqrestore(&uncore->lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ return val -static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, +static noinline void ___force_wake_auto(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; unsigned int tmp; - GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains); + GEM_BUG_ON(fw_domains & ~uncore->fw_domains); - for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) fw_domain_arm_timer(domain); - dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); + uncore->funcs.force_wake_get(uncore, fw_domains); } -static inline void __force_wake_auto(struct drm_i915_private *dev_priv, +static inline void __force_wake_auto(struct intel_uncore *uncore, enum forcewake_domains fw_domains) { if (WARN_ON(!fw_domains)) return; /* Turn on all requested but inactive supported forcewake domains. */ - fw_domains &= dev_priv->uncore.fw_domains; - fw_domains &= ~dev_priv->uncore.fw_domains_active; + fw_domains &= uncore->fw_domains; + fw_domains &= ~uncore->fw_domains_active; if (fw_domains) - ___force_wake_auto(dev_priv, fw_domains); + ___force_wake_auto(uncore, fw_domains); } #define __gen_read(func, x) \ static u##x \ -func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ +func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - fw_engine = __##func##_reg_read_fw_domains(offset); \ + fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \ if (fw_engine) \ - __force_wake_auto(dev_priv, fw_engine); \ - val = __raw_i915_read##x(dev_priv, reg); \ + __force_wake_auto(uncore, fw_engine); \ + val = __raw_uncore_read##x(uncore, reg); \ GEN6_READ_FOOTER; \ } #define __gen6_read(x) __gen_read(gen6, x) @@ -1225,24 +1210,24 @@ __gen6_read(64) #define GEN2_WRITE_HEADER \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_rpm_wakelock_held(dev_priv); \ + __assert_rpm_wakelock_held(uncore->rpm); \ #define GEN2_WRITE_FOOTER #define __gen2_write(x) \ static void \ -gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ GEN2_WRITE_HEADER; \ - __raw_i915_write##x(dev_priv, reg, val); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN2_WRITE_FOOTER; \ } #define __gen5_write(x) \ static void \ -gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ GEN2_WRITE_HEADER; \ - ilk_dummy_write(dev_priv); \ - __raw_i915_write##x(dev_priv, reg, val); \ + ilk_dummy_write(uncore); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN2_WRITE_FOOTER; \ } @@ -1263,33 +1248,33 @@ __gen2_write(32) u32 offset = i915_mmio_reg_offset(reg); \ unsigned long irqflags; \ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ - assert_rpm_wakelock_held(dev_priv); \ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ - unclaimed_reg_debug(dev_priv, reg, false, true) + __assert_rpm_wakelock_held(uncore->rpm); \ + spin_lock_irqsave(&uncore->lock, irqflags); \ + unclaimed_reg_debug(uncore, reg, false, true) #define GEN6_WRITE_FOOTER \ - unclaimed_reg_debug(dev_priv, reg, false, false); \ - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags) + unclaimed_reg_debug(uncore, reg, false, false); \ + spin_unlock_irqrestore(&uncore->lock, irqflags) #define __gen6_write(x) \ static void \ -gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ GEN6_WRITE_HEADER; \ if (NEEDS_FORCE_WAKE(offset)) \ - __gen6_gt_wait_for_fifo(dev_priv); \ - __raw_i915_write##x(dev_priv, reg, val); \ + __gen6_gt_wait_for_fifo(uncore); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } #define __gen_write(func, x) \ static void \ -func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ +func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - fw_engine = __##func##_reg_write_fw_domains(offset); \ + fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \ if (fw_engine) \ - __force_wake_auto(dev_priv, fw_engine); \ - __raw_i915_write##x(dev_priv, reg, val); \ + __force_wake_auto(uncore, fw_engine); \ + __raw_uncore_write##x(uncore, reg, val); \ GEN6_WRITE_FOOTER; \ } #define __gen8_write(x) __gen_write(gen8, x) @@ -1316,23 +1301,23 @@ __gen6_write(32) #undef GEN6_WRITE_FOOTER #undef GEN6_WRITE_HEADER -#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \ +#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \ do { \ - (i915)->uncore.funcs.mmio_writeb = x##_write8; \ - (i915)->uncore.funcs.mmio_writew = x##_write16; \ - (i915)->uncore.funcs.mmio_writel = x##_write32; \ + (uncore)->funcs.mmio_writeb = x##_write8; \ + (uncore)->funcs.mmio_writew = x##_write16; \ + (uncore)->funcs.mmio_writel = x##_write32; \ } while (0) -#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \ +#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \ do { \ - (i915)->uncore.funcs.mmio_readb = x##_read8; \ - (i915)->uncore.funcs.mmio_readw = x##_read16; \ - (i915)->uncore.funcs.mmio_readl = x##_read32; \ - (i915)->uncore.funcs.mmio_readq = x##_read64; \ + (uncore)->funcs.mmio_readb = x##_read8; \ + (uncore)->funcs.mmio_readw = x##_read16; \ + (uncore)->funcs.mmio_readl = x##_read32; \ + (uncore)->funcs.mmio_readq = x##_read64; \ } while (0) -static void fw_domain_init(struct drm_i915_private *dev_priv, +static void fw_domain_init(struct intel_uncore *uncore, enum forcewake_domain_id domain_id, i915_reg_t reg_set, i915_reg_t reg_ack) @@ -1342,7 +1327,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) return; - d = &dev_priv->uncore.fw_domain[domain_id]; + d = &uncore->fw_domain[domain_id]; WARN_ON(d->wake_count); @@ -1350,8 +1335,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, WARN_ON(!i915_mmio_reg_valid(reg_ack)); d->wake_count = 0; - d->reg_set = reg_set; - d->reg_ack = reg_ack; + d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set); + d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack); d->id = domain_id; @@ -1371,12 +1356,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); d->timer.function = intel_uncore_fw_release_timer; - dev_priv->uncore.fw_domains |= BIT(domain_id); + uncore->fw_domains |= BIT(domain_id); - fw_domain_reset(dev_priv, d); + fw_domain_reset(d); } -static void fw_domain_fini(struct drm_i915_private *dev_priv, +static void fw_domain_fini(struct intel_uncore *uncore, enum forcewake_domain_id domain_id) { struct intel_uncore_forcewake_domain *d; @@ -1384,85 +1369,76 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv, if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) return; - d = &dev_priv->uncore.fw_domain[domain_id]; + d = &uncore->fw_domain[domain_id]; WARN_ON(d->wake_count); WARN_ON(hrtimer_cancel(&d->timer)); memset(d, 0, sizeof(*d)); - dev_priv->uncore.fw_domains &= ~BIT(domain_id); + uncore->fw_domains &= ~BIT(domain_id); } -static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) +static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) { - if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv)) - return; + struct drm_i915_private *i915 = uncore_to_i915(uncore); - if (IS_GEN(dev_priv, 6)) { - dev_priv->uncore.fw_reset = 0; - dev_priv->uncore.fw_set = FORCEWAKE_KERNEL; - dev_priv->uncore.fw_clear = 0; - } else { - /* WaRsClearFWBitsAtReset:bdw,skl */ - dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff); - dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); - dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); - } + if (!intel_uncore_has_forcewake(uncore)) + return; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(i915) >= 11) { int i; - dev_priv->uncore.funcs.force_wake_get = + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); - fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, + fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, FORCEWAKE_BLITTER_GEN9, FORCEWAKE_ACK_BLITTER_GEN9); for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(dev_priv, _VCS(i))) + if (!HAS_ENGINE(i915, _VCS(i))) continue; - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, FORCEWAKE_MEDIA_VDBOX_GEN11(i), FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); } for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(dev_priv, _VECS(i))) + if (!HAS_ENGINE(i915, _VECS(i))) continue; - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, FORCEWAKE_MEDIA_VEBOX_GEN11(i), FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i)); } - } else if (IS_GEN_RANGE(dev_priv, 9, 10)) { - dev_priv->uncore.funcs.force_wake_get = + } else if (IS_GEN_RANGE(i915, 9, 10)) { + uncore->funcs.force_wake_get = fw_domains_get_with_fallback; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER_GEN9, FORCEWAKE_ACK_RENDER_GEN9); - fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, + fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER, FORCEWAKE_BLITTER_GEN9, FORCEWAKE_ACK_BLITTER_GEN9); - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->uncore.funcs.force_wake_get = fw_domains_get; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + uncore->funcs.force_wake_get = fw_domains_get; + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); - fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, + fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA, FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - dev_priv->uncore.funcs.force_wake_get = + } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { + uncore->funcs.force_wake_get = fw_domains_get_with_thread_status; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_ACK_HSW); - } else if (IS_IVYBRIDGE(dev_priv)) { + } else if (IS_IVYBRIDGE(i915)) { u32 ecobus; /* IVB configs may use multi-threaded forcewake */ @@ -1474,9 +1450,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) * (correctly) interpreted by the test below as MT * forcewake being disabled. */ - dev_priv->uncore.funcs.force_wake_get = + uncore->funcs.force_wake_get = fw_domains_get_with_thread_status; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; + uncore->funcs.force_wake_put = fw_domains_put; /* We need to init first for ECOBUS access and then * determine later if we want to reinit, in case of MT access is @@ -1485,41 +1461,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv) * before the ecobus check. */ - __raw_i915_write32(dev_priv, FORCEWAKE, 0); - __raw_posting_read(dev_priv, ECOBUS); + __raw_uncore_write32(uncore, FORCEWAKE, 0); + __raw_posting_read(uncore, ECOBUS); - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); - spin_lock_irq(&dev_priv->uncore.lock); - fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER); - ecobus = __raw_i915_read32(dev_priv, ECOBUS); - fw_domains_put(dev_priv, FORCEWAKE_RENDER); - spin_unlock_irq(&dev_priv->uncore.lock); + spin_lock_irq(&uncore->lock); + fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER); + ecobus = __raw_uncore_read32(uncore, ECOBUS); + fw_domains_put(uncore, FORCEWAKE_RENDER); + spin_unlock_irq(&uncore->lock); if (!(ecobus & FORCEWAKE_MT_ENABLE)) { DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); DRM_INFO("when using vblank-synced partial screen updates.\n"); - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } - } else if (IS_GEN(dev_priv, 6)) { - dev_priv->uncore.funcs.force_wake_get = + } else if (IS_GEN(i915, 6)) { + uncore->funcs.force_wake_get = fw_domains_get_with_thread_status; - dev_priv->uncore.funcs.force_wake_put = fw_domains_put; - fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, + uncore->funcs.force_wake_put = fw_domains_put; + fw_domain_init(uncore, FW_DOMAIN_ID_RENDER, FORCEWAKE, FORCEWAKE_ACK); } /* All future platforms are expected to require complex power gating */ - WARN_ON(dev_priv->uncore.fw_domains == 0); + WARN_ON(uncore->fw_domains == 0); } -#define ASSIGN_FW_DOMAINS_TABLE(d) \ +#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \ { \ - dev_priv->uncore.fw_domains_table = \ + (uncore)->fw_domains_table = \ (struct intel_forcewake_range *)(d); \ - dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \ + (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \ } static int i915_pmic_bus_access_notifier(struct notifier_block *nb, @@ -1544,66 +1520,129 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, * the access. */ disable_rpm_wakeref_asserts(dev_priv); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); enable_rpm_wakeref_asserts(dev_priv); break; case MBI_PMIC_BUS_ACCESS_END: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); break; } return NOTIFY_OK; } -void intel_uncore_init(struct drm_i915_private *dev_priv) +static int uncore_mmio_setup(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct pci_dev *pdev = i915->drm.pdev; + int mmio_bar; + int mmio_size; + + mmio_bar = IS_GEN(i915, 2) ? 1 : 0; + /* + * Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (INTEL_GEN(i915) < 5) + mmio_size = 512 * 1024; + else + mmio_size = 2 * 1024 * 1024; + uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size); + if (uncore->regs == NULL) { + DRM_ERROR("failed to map registers\n"); + + return -EIO; + } + + return 0; +} + +static void uncore_mmio_cleanup(struct intel_uncore *uncore) +{ + struct drm_i915_private *i915 = uncore_to_i915(uncore); + struct pci_dev *pdev = i915->drm.pdev; + + pci_iounmap(pdev, uncore->regs); +} + + +int intel_uncore_init(struct intel_uncore *uncore) { - i915_check_vgpu(dev_priv); + struct drm_i915_private *i915 = uncore_to_i915(uncore); + int ret; + + ret = uncore_mmio_setup(uncore); + if (ret) + return ret; - intel_uncore_edram_detect(dev_priv); - intel_uncore_fw_domains_init(dev_priv); - __intel_uncore_early_sanitize(dev_priv, 0); + i915_check_vgpu(i915); - dev_priv->uncore.unclaimed_mmio_check = 1; - dev_priv->uncore.pmic_bus_access_nb.notifier_call = + if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) + uncore->flags |= UNCORE_HAS_FORCEWAKE; + + intel_uncore_edram_detect(i915); + intel_uncore_fw_domains_init(uncore); + __intel_uncore_early_sanitize(uncore, 0); + + uncore->unclaimed_mmio_check = 1; + uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; - if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2); - } else if (IS_GEN(dev_priv, 5)) { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5); - } else if (IS_GEN_RANGE(dev_priv, 6, 7)) { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6); - - if (IS_VALLEYVIEW(dev_priv)) { - ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); + uncore->rpm = &i915->runtime_pm; + + if (!intel_uncore_has_forcewake(uncore)) { + if (IS_GEN(i915, 5)) { + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen5); + } else { + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen2); + } + } else if (IS_GEN_RANGE(i915, 6, 7)) { + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); + + if (IS_VALLEYVIEW(i915)) { + ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges); + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else { - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); } - } else if (IS_GEN(dev_priv, 8)) { - if (IS_CHERRYVIEW(dev_priv)) { - ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); + } else if (IS_GEN(i915, 8)) { + if (IS_CHERRYVIEW(i915)) { + ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else { - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); } - } else if (IS_GEN_RANGE(dev_priv, 9, 10)) { - ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable); + } else if (IS_GEN_RANGE(i915, 9, 10)) { + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); } else { - ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges); - ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable); - ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable); + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); } - iosf_mbi_register_pmic_bus_access_notifier( - &dev_priv->uncore.pmic_bus_access_nb); + if (HAS_FPGA_DBG_UNCLAIMED(i915)) + uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; + + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED; + + if (IS_GEN_RANGE(i915, 6, 7)) + uncore->flags |= UNCORE_HAS_FIFO; + + iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); + + return 0; } /* @@ -1611,45 +1650,48 @@ void intel_uncore_init(struct drm_i915_private *dev_priv) * the forcewake domains. Prune them, to make sure they only reference existing * engines. */ -void intel_uncore_prune(struct drm_i915_private *dev_priv) +void intel_uncore_prune(struct intel_uncore *uncore) { - if (INTEL_GEN(dev_priv) >= 11) { - enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains; + struct drm_i915_private *i915 = uncore_to_i915(uncore); + + if (INTEL_GEN(i915) >= 11) { + enum forcewake_domains fw_domains = uncore->fw_domains; enum forcewake_domain_id domain_id; int i; for (i = 0; i < I915_MAX_VCS; i++) { domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; - if (HAS_ENGINE(dev_priv, _VCS(i))) + if (HAS_ENGINE(i915, _VCS(i))) continue; if (fw_domains & BIT(domain_id)) - fw_domain_fini(dev_priv, domain_id); + fw_domain_fini(uncore, domain_id); } for (i = 0; i < I915_MAX_VECS; i++) { domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; - if (HAS_ENGINE(dev_priv, _VECS(i))) + if (HAS_ENGINE(i915, _VECS(i))) continue; if (fw_domains & BIT(domain_id)) - fw_domain_fini(dev_priv, domain_id); + fw_domain_fini(uncore, domain_id); } } } -void intel_uncore_fini(struct drm_i915_private *dev_priv) +void intel_uncore_fini(struct intel_uncore *uncore) { /* Paranoia: make sure we have disabled everything before we exit. */ - intel_uncore_sanitize(dev_priv); + intel_uncore_sanitize(uncore_to_i915(uncore)); iosf_mbi_punit_acquire(); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( - &dev_priv->uncore.pmic_bus_access_nb); - intel_uncore_forcewake_reset(dev_priv); + &uncore->pmic_bus_access_nb); + intel_uncore_forcewake_reset(uncore); iosf_mbi_punit_release(); + uncore_mmio_cleanup(uncore); } static const struct reg_whitelist { @@ -1717,7 +1759,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, /** * __intel_wait_for_register_fw - wait until register matches expected state - * @dev_priv: the i915 device + * @uncore: the struct intel_uncore * @reg: the register to read * @mask: mask to apply to register value * @value: expected value @@ -1741,7 +1783,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, * * Returns 0 if the register matches the desired condition, or -ETIMEOUT. */ -int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, +int __intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 mask, u32 value, @@ -1750,7 +1792,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, u32 *out_value) { u32 uninitialized_var(reg_value); -#define done (((reg_value = I915_READ_FW(reg)) & mask) == value) +#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value) int ret; /* Catch any overuse of this function */ @@ -1772,7 +1814,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, /** * __intel_wait_for_register - wait until register matches expected state - * @dev_priv: the i915 device + * @uncore: the struct intel_uncore * @reg: the register to read * @mask: mask to apply to register value * @value: expected value @@ -1789,33 +1831,34 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, * * Returns 0 if the register matches the desired condition, or -ETIMEOUT. */ -int __intel_wait_for_register(struct drm_i915_private *dev_priv, - i915_reg_t reg, - u32 mask, - u32 value, - unsigned int fast_timeout_us, - unsigned int slow_timeout_ms, - u32 *out_value) +int __intel_wait_for_register(struct intel_uncore *uncore, + i915_reg_t reg, + u32 mask, + u32 value, + unsigned int fast_timeout_us, + unsigned int slow_timeout_ms, + u32 *out_value) { unsigned fw = - intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); + intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); u32 reg_value; int ret; might_sleep_if(slow_timeout_ms); - spin_lock_irq(&dev_priv->uncore.lock); - intel_uncore_forcewake_get__locked(dev_priv, fw); + spin_lock_irq(&uncore->lock); + intel_uncore_forcewake_get__locked(uncore, fw); - ret = __intel_wait_for_register_fw(dev_priv, + ret = __intel_wait_for_register_fw(uncore, reg, mask, value, fast_timeout_us, 0, ®_value); - intel_uncore_forcewake_put__locked(dev_priv, fw); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_uncore_forcewake_put__locked(uncore, fw); + spin_unlock_irq(&uncore->lock); if (ret && slow_timeout_ms) - ret = __wait_for(reg_value = I915_READ_NOTRACE(reg), + ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore, + reg), (reg_value & mask) == value, slow_timeout_ms * 1000, 10, 1000); @@ -1828,82 +1871,90 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv, return ret; } -bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) +bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore) { - return check_for_unclaimed_mmio(dev_priv); + return check_for_unclaimed_mmio(uncore); } bool -intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) +intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) { bool ret = false; - spin_lock_irq(&dev_priv->uncore.lock); + spin_lock_irq(&uncore->lock); - if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0)) + if (unlikely(uncore->unclaimed_mmio_check <= 0)) goto out; - if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) { + if (unlikely(intel_uncore_unclaimed_mmio(uncore))) { if (!i915_modparams.mmio_debug) { DRM_DEBUG("Unclaimed register detected, " "enabling oneshot unclaimed register reporting. " "Please use i915.mmio_debug=N for more information.\n"); i915_modparams.mmio_debug++; } - dev_priv->uncore.unclaimed_mmio_check--; + uncore->unclaimed_mmio_check--; ret = true; } out: - spin_unlock_irq(&dev_priv->uncore.lock); + spin_unlock_irq(&uncore->lock); return ret; } static enum forcewake_domains -intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_read(struct intel_uncore *uncore, i915_reg_t reg) { + struct drm_i915_private *i915 = uncore_to_i915(uncore); u32 offset = i915_mmio_reg_offset(reg); enum forcewake_domains fw_domains; - if (INTEL_GEN(dev_priv) >= 11) { - fw_domains = __gen11_fwtable_reg_read_fw_domains(offset); - } else if (HAS_FWTABLE(dev_priv)) { - fw_domains = __fwtable_reg_read_fw_domains(offset); - } else if (INTEL_GEN(dev_priv) >= 6) { - fw_domains = __gen6_reg_read_fw_domains(offset); + if (INTEL_GEN(i915) >= 11) { + fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset); + } else if (HAS_FWTABLE(i915)) { + fw_domains = __fwtable_reg_read_fw_domains(uncore, offset); + } else if (INTEL_GEN(i915) >= 6) { + fw_domains = __gen6_reg_read_fw_domains(uncore, offset); } else { - WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5)); + /* on devices with FW we expect to hit one of the above cases */ + if (intel_uncore_has_forcewake(uncore)) + MISSING_CASE(INTEL_GEN(i915)); + fw_domains = 0; } - WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); + WARN_ON(fw_domains & ~uncore->fw_domains); return fw_domains; } static enum forcewake_domains -intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_write(struct intel_uncore *uncore, i915_reg_t reg) { + struct drm_i915_private *i915 = uncore_to_i915(uncore); u32 offset = i915_mmio_reg_offset(reg); enum forcewake_domains fw_domains; - if (INTEL_GEN(dev_priv) >= 11) { - fw_domains = __gen11_fwtable_reg_write_fw_domains(offset); - } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { - fw_domains = __fwtable_reg_write_fw_domains(offset); - } else if (IS_GEN(dev_priv, 8)) { - fw_domains = __gen8_reg_write_fw_domains(offset); - } else if (IS_GEN_RANGE(dev_priv, 6, 7)) { + if (INTEL_GEN(i915) >= 11) { + fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset); + } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) { + fw_domains = __fwtable_reg_write_fw_domains(uncore, offset); + } else if (IS_GEN(i915, 8)) { + fw_domains = __gen8_reg_write_fw_domains(uncore, offset); + } else if (IS_GEN_RANGE(i915, 6, 7)) { fw_domains = FORCEWAKE_RENDER; } else { - WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5)); + /* on devices with FW we expect to hit one of the above cases */ + if (intel_uncore_has_forcewake(uncore)) + MISSING_CASE(INTEL_GEN(i915)); + fw_domains = 0; } - WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); + WARN_ON(fw_domains & ~uncore->fw_domains); return fw_domains; } @@ -1911,7 +1962,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, /** * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access * a register - * @dev_priv: pointer to struct drm_i915_private + * @uncore: pointer to struct intel_uncore * @reg: register in question * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE * @@ -1923,21 +1974,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, * callers to do FIFO management on their own or risk losing writes. */ enum forcewake_domains -intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, i915_reg_t reg, unsigned int op) { enum forcewake_domains fw_domains = 0; WARN_ON(!op); - if (intel_vgpu_active(dev_priv)) + if (!intel_uncore_has_forcewake(uncore)) return 0; if (op & FW_REG_READ) - fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); + fw_domains = intel_uncore_forcewake_for_read(uncore, reg); if (op & FW_REG_WRITE) - fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); + fw_domains |= intel_uncore_forcewake_for_write(uncore, reg); return fw_domains; } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index e5e157d288de..50d226f68753 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -28,10 +28,13 @@ #include <linux/spinlock.h> #include <linux/notifier.h> #include <linux/hrtimer.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include "i915_reg.h" struct drm_i915_private; +struct i915_runtime_pm; +struct intel_uncore; enum forcewake_domain_id { FW_DOMAIN_ID_RENDER = 0, @@ -62,25 +65,25 @@ enum forcewake_domains { }; struct intel_uncore_funcs { - void (*force_wake_get)(struct drm_i915_private *dev_priv, + void (*force_wake_get)(struct intel_uncore *uncore, enum forcewake_domains domains); - void (*force_wake_put)(struct drm_i915_private *dev_priv, + void (*force_wake_put)(struct intel_uncore *uncore, enum forcewake_domains domains); - u8 (*mmio_readb)(struct drm_i915_private *dev_priv, + u8 (*mmio_readb)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - u16 (*mmio_readw)(struct drm_i915_private *dev_priv, + u16 (*mmio_readw)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - u32 (*mmio_readl)(struct drm_i915_private *dev_priv, + u32 (*mmio_readl)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - u64 (*mmio_readq)(struct drm_i915_private *dev_priv, + u64 (*mmio_readq)(struct intel_uncore *uncore, i915_reg_t r, bool trace); - void (*mmio_writeb)(struct drm_i915_private *dev_priv, + void (*mmio_writeb)(struct intel_uncore *uncore, i915_reg_t r, u8 val, bool trace); - void (*mmio_writew)(struct drm_i915_private *dev_priv, + void (*mmio_writew)(struct intel_uncore *uncore, i915_reg_t r, u16 val, bool trace); - void (*mmio_writel)(struct drm_i915_private *dev_priv, + void (*mmio_writel)(struct intel_uncore *uncore, i915_reg_t r, u32 val, bool trace); }; @@ -92,8 +95,18 @@ struct intel_forcewake_range { }; struct intel_uncore { + void __iomem *regs; + + struct i915_runtime_pm *rpm; + spinlock_t lock; /** lock is also taken in irq contexts. */ + unsigned int flags; +#define UNCORE_HAS_FORCEWAKE BIT(0) +#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1) +#define UNCORE_HAS_DBG_UNCLAIMED BIT(2) +#define UNCORE_HAS_FIFO BIT(3) + const struct intel_forcewake_range *fw_domains_table; unsigned int fw_domains_table_entries; @@ -106,18 +119,14 @@ struct intel_uncore { enum forcewake_domains fw_domains_active; enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ - u32 fw_set; - u32 fw_clear; - u32 fw_reset; - struct intel_uncore_forcewake_domain { enum forcewake_domain_id id; enum forcewake_domains mask; unsigned int wake_count; bool active; struct hrtimer timer; - i915_reg_t reg_set; - i915_reg_t reg_ack; + u32 __iomem *reg_set; + u32 __iomem *reg_ack; } fw_domain[FW_DOMAIN_ID_COUNT]; struct { @@ -131,86 +140,242 @@ struct intel_uncore { }; /* Iterate over initialised fw domains */ -#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \ +#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \ for (tmp__ = (mask__); \ - tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) + tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) + +#define for_each_fw_domain(domain__, uncore__, tmp__) \ + for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__) + +static inline struct intel_uncore * +forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d) +{ + return container_of(d, struct intel_uncore, fw_domain[d->id]); +} + +static inline bool +intel_uncore_has_forcewake(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_FORCEWAKE; +} + +static inline bool +intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED; +} -#define for_each_fw_domain(domain__, dev_priv__, tmp__) \ - for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__) +static inline bool +intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED; +} +static inline bool +intel_uncore_has_fifo(const struct intel_uncore *uncore) +{ + return uncore->flags & UNCORE_HAS_FIFO; +} void intel_uncore_sanitize(struct drm_i915_private *dev_priv); -void intel_uncore_init(struct drm_i915_private *dev_priv); -void intel_uncore_prune(struct drm_i915_private *dev_priv); -bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); -bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); -void intel_uncore_fini(struct drm_i915_private *dev_priv); -void intel_uncore_suspend(struct drm_i915_private *dev_priv); -void intel_uncore_resume_early(struct drm_i915_private *dev_priv); -void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv); +int intel_uncore_init(struct intel_uncore *uncore); +void intel_uncore_prune(struct intel_uncore *uncore); +bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); +bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); +void intel_uncore_fini(struct intel_uncore *uncore); +void intel_uncore_suspend(struct intel_uncore *uncore); +void intel_uncore_resume_early(struct intel_uncore *uncore); +void intel_uncore_runtime_resume(struct intel_uncore *uncore); u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); -void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); -void assert_forcewakes_active(struct drm_i915_private *dev_priv, +void assert_forcewakes_inactive(struct intel_uncore *uncore); +void assert_forcewakes_active(struct intel_uncore *uncore, enum forcewake_domains fw_domains); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); enum forcewake_domains -intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, +intel_uncore_forcewake_for_reg(struct intel_uncore *uncore, i915_reg_t reg, unsigned int op); #define FW_REG_READ (1) #define FW_REG_WRITE (2) -void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get(struct intel_uncore *uncore, enum forcewake_domains domains); -void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put(struct intel_uncore *uncore, enum forcewake_domains domains); /* Like above but the caller must manage the uncore.lock itself. * Must be used with I915_READ_FW and friends. */ -void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, enum forcewake_domains domains); -void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, +void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore, enum forcewake_domains domains); -void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv); -void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv); +void intel_uncore_forcewake_user_get(struct intel_uncore *uncore); +void intel_uncore_forcewake_user_put(struct intel_uncore *uncore); -int __intel_wait_for_register(struct drm_i915_private *dev_priv, +int __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t reg, u32 mask, u32 value, unsigned int fast_timeout_us, unsigned int slow_timeout_ms, u32 *out_value); -static inline -int intel_wait_for_register(struct drm_i915_private *dev_priv, - i915_reg_t reg, - u32 mask, - u32 value, - unsigned int timeout_ms) +static inline int +intel_wait_for_register(struct intel_uncore *uncore, + i915_reg_t reg, + u32 mask, + u32 value, + unsigned int timeout_ms) { - return __intel_wait_for_register(dev_priv, reg, mask, value, 2, + return __intel_wait_for_register(uncore, reg, mask, value, 2, timeout_ms, NULL); } -int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, + +int __intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 mask, u32 value, unsigned int fast_timeout_us, unsigned int slow_timeout_ms, u32 *out_value); -static inline -int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, - i915_reg_t reg, - u32 mask, - u32 value, +static inline int +intel_wait_for_register_fw(struct intel_uncore *uncore, + i915_reg_t reg, + u32 mask, + u32 value, unsigned int timeout_ms) { - return __intel_wait_for_register_fw(dev_priv, reg, mask, value, + return __intel_wait_for_register_fw(uncore, reg, mask, value, 2, timeout_ms, NULL); } +/* register access functions */ +#define __raw_read(x__, s__) \ +static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \ + i915_reg_t reg) \ +{ \ + return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \ +} + +#define __raw_write(x__, s__) \ +static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \ + i915_reg_t reg, u##x__ val) \ +{ \ + write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \ +} +__raw_read(8, b) +__raw_read(16, w) +__raw_read(32, l) +__raw_read(64, q) + +__raw_write(8, b) +__raw_write(16, w) +__raw_write(32, l) +__raw_write(64, q) + +#undef __raw_read +#undef __raw_write + +#define __uncore_read(name__, x__, s__, trace__) \ +static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \ + i915_reg_t reg) \ +{ \ + return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \ +} + +#define __uncore_write(name__, x__, s__, trace__) \ +static inline void intel_uncore_##name__(struct intel_uncore *uncore, \ + i915_reg_t reg, u##x__ val) \ +{ \ + uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \ +} + +__uncore_read(read8, 8, b, true) +__uncore_read(read16, 16, w, true) +__uncore_read(read, 32, l, true) +__uncore_read(read16_notrace, 16, w, false) +__uncore_read(read_notrace, 32, l, false) + +__uncore_write(write8, 8, b, true) +__uncore_write(write16, 16, w, true) +__uncore_write(write, 32, l, true) +__uncore_write(write_notrace, 32, l, false) + +/* Be very careful with read/write 64-bit values. On 32-bit machines, they + * will be implemented using 2 32-bit writes in an arbitrary order with + * an arbitrary delay between them. This can cause the hardware to + * act upon the intermediate value, possibly leading to corruption and + * machine death. For this reason we do not support I915_WRITE64, or + * uncore->funcs.mmio_writeq. + * + * When reading a 64-bit value as two 32-bit values, the delay may cause + * the two reads to mismatch, e.g. a timestamp overflowing. Also note that + * occasionally a 64-bit register does not actually support a full readq + * and must be read using two 32-bit reads. + * + * You have been warned. + */ +__uncore_read(read64, 64, q, true) + +static inline u64 +intel_uncore_read64_2x32(struct intel_uncore *uncore, + i915_reg_t lower_reg, i915_reg_t upper_reg) +{ + u32 upper, lower, old_upper, loop = 0; + upper = intel_uncore_read(uncore, upper_reg); + do { + old_upper = upper; + lower = intel_uncore_read(uncore, lower_reg); + upper = intel_uncore_read(uncore, upper_reg); + } while (upper != old_upper && loop++ < 2); + return (u64)upper << 32 | lower; +} + +#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__)) +#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__)) + +#undef __uncore_read +#undef __uncore_write + +/* These are untraced mmio-accessors that are only valid to be used inside + * critical sections, such as inside IRQ handlers, where forcewake is explicitly + * controlled. + * + * Think twice, and think again, before using these. + * + * As an example, these accessors can possibly be used between: + * + * spin_lock_irq(&uncore->lock); + * intel_uncore_forcewake_get__locked(); + * + * and + * + * intel_uncore_forcewake_put__locked(); + * spin_unlock_irq(&uncore->lock); + * + * + * Note: some registers may not need forcewake held, so + * intel_uncore_forcewake_{get,put} can be omitted, see + * intel_uncore_forcewake_for_reg(). + * + * Certain architectures will die if the same cacheline is concurrently accessed + * by different clients (e.g. on Ivybridge). Access to registers should + * therefore generally be serialised, by either the dev_priv->uncore.lock or + * a more localised lock guarding all access to that bank of registers. + */ +#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__) +#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__) +#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__) +#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__)) + +static inline void intel_uncore_rmw_or_fw(struct intel_uncore *uncore, + i915_reg_t reg, u32 or_val) +{ + intel_uncore_write_fw(uncore, reg, + intel_uncore_read_fw(uncore, reg) | or_val); +} + #define raw_reg_read(base, reg) \ readl(base + i915_mmio_reg_offset(reg)) #define raw_reg_write(base, reg, value) \ diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index bf3662ad5fed..fdbbb9a53804 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -772,6 +772,9 @@ struct psr_table { /* TP wake up time in multiple of 100 */ u16 tp1_wakeup_time; u16 tp2_tp3_wakeup_time; + + /* PSR2 TP2/TP3 wakeup time for 16 panels */ + u32 psr2_tp2_tp3_wakeup_time; } __packed; struct bdb_psr { diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c index 15f4a6dee5aa..a04dbc58ec1c 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.c +++ b/drivers/gpu/drm/i915/intel_workarounds.c @@ -555,6 +555,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine) GEN10_CACHE_MODE_SS, 0, /* write-only, so skip validation */ _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE)); + + /* WaDisableGPGPUMidThreadPreemption:icl */ + WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + GEN9_PREEMPT_GPGPU_LEVEL_MASK, + GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); } void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) @@ -564,26 +569,26 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) wa_init_start(wal, "context"); - if (INTEL_GEN(i915) < 8) - return; - else if (IS_BROADWELL(i915)) - bdw_ctx_workarounds_init(engine); - else if (IS_CHERRYVIEW(i915)) - chv_ctx_workarounds_init(engine); - else if (IS_SKYLAKE(i915)) - skl_ctx_workarounds_init(engine); - else if (IS_BROXTON(i915)) - bxt_ctx_workarounds_init(engine); - else if (IS_KABYLAKE(i915)) - kbl_ctx_workarounds_init(engine); - else if (IS_GEMINILAKE(i915)) - glk_ctx_workarounds_init(engine); - else if (IS_COFFEELAKE(i915)) - cfl_ctx_workarounds_init(engine); + if (IS_ICELAKE(i915)) + icl_ctx_workarounds_init(engine); else if (IS_CANNONLAKE(i915)) cnl_ctx_workarounds_init(engine); - else if (IS_ICELAKE(i915)) - icl_ctx_workarounds_init(engine); + else if (IS_COFFEELAKE(i915)) + cfl_ctx_workarounds_init(engine); + else if (IS_GEMINILAKE(i915)) + glk_ctx_workarounds_init(engine); + else if (IS_KABYLAKE(i915)) + kbl_ctx_workarounds_init(engine); + else if (IS_BROXTON(i915)) + bxt_ctx_workarounds_init(engine); + else if (IS_SKYLAKE(i915)) + skl_ctx_workarounds_init(engine); + else if (IS_CHERRYVIEW(i915)) + chv_ctx_workarounds_init(engine); + else if (IS_BROADWELL(i915)) + bdw_ctx_workarounds_init(engine); + else if (INTEL_GEN(i915) < 8) + return; else MISSING_CASE(INTEL_GEN(i915)); @@ -862,26 +867,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) static void gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal) { - if (INTEL_GEN(i915) < 8) - return; - else if (IS_BROADWELL(i915)) - return; - else if (IS_CHERRYVIEW(i915)) - return; - else if (IS_SKYLAKE(i915)) - skl_gt_workarounds_init(i915, wal); - else if (IS_BROXTON(i915)) - bxt_gt_workarounds_init(i915, wal); - else if (IS_KABYLAKE(i915)) - kbl_gt_workarounds_init(i915, wal); - else if (IS_GEMINILAKE(i915)) - glk_gt_workarounds_init(i915, wal); - else if (IS_COFFEELAKE(i915)) - cfl_gt_workarounds_init(i915, wal); + if (IS_ICELAKE(i915)) + icl_gt_workarounds_init(i915, wal); else if (IS_CANNONLAKE(i915)) cnl_gt_workarounds_init(i915, wal); - else if (IS_ICELAKE(i915)) - icl_gt_workarounds_init(i915, wal); + else if (IS_COFFEELAKE(i915)) + cfl_gt_workarounds_init(i915, wal); + else if (IS_GEMINILAKE(i915)) + glk_gt_workarounds_init(i915, wal); + else if (IS_KABYLAKE(i915)) + kbl_gt_workarounds_init(i915, wal); + else if (IS_BROXTON(i915)) + bxt_gt_workarounds_init(i915, wal); + else if (IS_SKYLAKE(i915)) + skl_gt_workarounds_init(i915, wal); + else if (INTEL_GEN(i915) <= 8) + return; else MISSING_CASE(INTEL_GEN(i915)); } @@ -904,7 +905,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, unsigned int i; for (i = 0, wa = wal->list; i < wal->count; i++, wa++) - fw |= intel_uncore_forcewake_for_reg(dev_priv, + fw |= intel_uncore_forcewake_for_reg(&dev_priv->uncore, wa->reg, FW_REG_READ | FW_REG_WRITE); @@ -926,7 +927,7 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) fw = wal_get_fw_for_rmw(dev_priv, wal); spin_lock_irqsave(&dev_priv->uncore.lock, flags); - intel_uncore_forcewake_get__locked(dev_priv, fw); + intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw); for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { u32 val = I915_READ_FW(wa->reg); @@ -937,7 +938,7 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal) I915_WRITE_FW(wa->reg, val); } - intel_uncore_forcewake_put__locked(dev_priv, fw); + intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw); spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); } @@ -1059,30 +1060,26 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) struct drm_i915_private *i915 = engine->i915; struct i915_wa_list *w = &engine->whitelist; - GEM_BUG_ON(engine->id != RCS); + GEM_BUG_ON(engine->id != RCS0); wa_init_start(w, "whitelist"); - if (INTEL_GEN(i915) < 8) - return; - else if (IS_BROADWELL(i915)) - return; - else if (IS_CHERRYVIEW(i915)) - return; - else if (IS_SKYLAKE(i915)) - skl_whitelist_build(w); - else if (IS_BROXTON(i915)) - bxt_whitelist_build(w); - else if (IS_KABYLAKE(i915)) - kbl_whitelist_build(w); - else if (IS_GEMINILAKE(i915)) - glk_whitelist_build(w); - else if (IS_COFFEELAKE(i915)) - cfl_whitelist_build(w); + if (IS_ICELAKE(i915)) + icl_whitelist_build(w); else if (IS_CANNONLAKE(i915)) cnl_whitelist_build(w); - else if (IS_ICELAKE(i915)) - icl_whitelist_build(w); + else if (IS_COFFEELAKE(i915)) + cfl_whitelist_build(w); + else if (IS_GEMINILAKE(i915)) + glk_whitelist_build(w); + else if (IS_KABYLAKE(i915)) + kbl_whitelist_build(w); + else if (IS_BROXTON(i915)) + bxt_whitelist_build(w); + else if (IS_SKYLAKE(i915)) + skl_whitelist_build(w); + else if (INTEL_GEN(i915) <= 8) + return; else MISSING_CASE(INTEL_GEN(i915)); @@ -1170,8 +1167,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_DISABLE_SAMPLER_PREFETCH); } - if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) { - /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */ + if (IS_GEN_RANGE(i915, 9, 11)) { + /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */ wa_masked_en(wal, GEN7_FF_SLICE_CS_CHICKEN1, GEN9_FFSC_PERCTX_PREEMPT_CTRL); @@ -1236,7 +1233,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8)) return; - if (engine->id == RCS) + if (engine->id == RCS0) rcs_engine_wa_init(engine, wal); else xcs_engine_wa_init(engine, wal); diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h index 7c734714b05e..a1bf51c611a9 100644 --- a/drivers/gpu/drm/i915/intel_workarounds.h +++ b/drivers/gpu/drm/i915/intel_workarounds.h @@ -9,18 +9,7 @@ #include <linux/slab.h> -struct i915_wa { - i915_reg_t reg; - u32 mask; - u32 val; -}; - -struct i915_wa_list { - const char *name; - struct i915_wa *list; - unsigned int count; - unsigned int wa_count; -}; +#include "intel_workarounds_types.h" static inline void intel_wa_list_free(struct i915_wa_list *wal) { diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h new file mode 100644 index 000000000000..30918da180ff --- /dev/null +++ b/drivers/gpu/drm/i915/intel_workarounds_types.h @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2014-2018 Intel Corporation + */ + +#ifndef __INTEL_WORKAROUNDS_TYPES_H__ +#define __INTEL_WORKAROUNDS_TYPES_H__ + +#include <linux/types.h> + +#include "i915_reg.h" + +struct i915_wa { + i915_reg_t reg; + u32 mask; + u32 val; +}; + +struct i915_wa_list { + const char *name; + struct i915_wa *list; + unsigned int count; + unsigned int wa_count; +}; + +#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c index 391f3d9ffdf1..419fd4d6a8f0 100644 --- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c @@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915, if (overflows_type(dma_size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index a9a2fa35876f..90721b54e7ae 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915, if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); @@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); @@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) if (IS_ERR(obj)) return ERR_CAST(obj); - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); @@ -1449,7 +1445,7 @@ static int igt_ppgtt_pin_update(void *arg) * huge-gtt-pages. */ - if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) { + if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) { pr_info("48b PPGTT not supported, skipping\n"); return 0; } @@ -1535,7 +1531,7 @@ static int igt_ppgtt_pin_update(void *arg) * land in the now stale 2M page. */ - err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf); + err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf); if (err) goto out_unpin; @@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg) } *vaddr = 0xdeadbeaf; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, vm, NULL); @@ -1653,7 +1650,7 @@ static int igt_shrink_thp(void *arg) if (err) goto out_unpin; - err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf); + err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf); if (err) goto out_unpin; @@ -1709,16 +1706,17 @@ int i915_gem_huge_page_mock_selftests(void) return -ENOMEM; /* Pretend to be a device which supports the 48b PPGTT */ - mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL; + mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; + mkwrite_device_info(dev_priv)->ppgtt_size = 48; mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV)); + ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; } - if (!i915_vm_is_48bit(&ppgtt->vm)) { + if (!i915_vm_is_4lvl(&ppgtt->vm)) { pr_err("failed to create 48b PPGTT\n"); err = -EINVAL; goto out_close; @@ -1734,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void) err = i915_subtests(tests, ppgtt); out_close: - i915_ppgtt_close(&ppgtt->vm); i915_ppgtt_put(ppgtt); out_unlock: @@ -1764,7 +1761,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return 0; } - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_terminally_wedged(dev_priv)) return 0; file = mock_file(dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 337b1f98b923..27d8f853111b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -150,7 +150,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915) SUBTEST(live_active_retire), }; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index e77b7ed449ae..50bb7bbd26d3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -84,14 +84,9 @@ static void simulate_hibernate(struct drm_i915_private *i915) static int pm_prepare(struct drm_i915_private *i915) { - int err = 0; - - if (i915_gem_suspend(i915)) { - pr_err("i915_gem_suspend failed\n"); - err = -EINVAL; - } + i915_gem_suspend(i915); - return err; + return 0; } static void pm_suspend(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c index fd89a5a33c1a..e43630b40fce 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c @@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) return PTR_ERR(vma); - rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); + rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -248,15 +248,15 @@ static bool always_valid(struct drm_i915_private *i915) static bool needs_fence_registers(struct drm_i915_private *i915) { - return !i915_terminally_wedged(&i915->gpu_error); + return !i915_terminally_wedged(i915); } static bool needs_mi_store_dword(struct drm_i915_private *i915) { - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return false; - return intel_engine_can_store_dword(i915->engine[RCS]); + return intel_engine_can_store_dword(i915->engine[RCS0]); } static const struct igt_coherency_mode { diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index d00d0bb07784..45f73b8b4e6d 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c @@ -76,7 +76,7 @@ static int live_nop_switch(void *arg) } for (n = 0; n < nctx; n++) { - ctx[n] = i915_gem_create_context(i915, file->driver_priv); + ctx[n] = live_context(i915, file); if (IS_ERR(ctx[n])) { err = PTR_ERR(ctx[n]); goto out_unlock; @@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value) offset += PAGE_SIZE; } *cmd = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); err = i915_gem_object_set_to_gtt_domain(obj, false); @@ -372,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value) return 0; } -static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) +static noinline int cpu_check(struct drm_i915_gem_object *obj, + unsigned int idx, unsigned int max) { unsigned int n, m, needs_flush; int err; @@ -390,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) for (m = 0; m < max; m++) { if (map[m] != m) { - pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", - n, m, map[m], m); + pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n", + __builtin_return_address(0), idx, + n, real_page_count(obj), m, max, + map[m], m); err = -EINVAL; goto out_unmap; } @@ -399,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max) for (; m < DW_PER_PAGE; m++) { if (map[m] != STACK_MAGIC) { - pr_err("Invalid value at page %d, offset %d: found %x expected %x\n", - n, m, map[m], STACK_MAGIC); + pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n", + __builtin_return_address(0), idx, n, m, + map[m], STACK_MAGIC); err = -EINVAL; goto out_unmap; } @@ -478,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) static int igt_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; - struct drm_i915_gem_object *obj = NULL; - unsigned long ncontexts, ndwords, dw; - struct igt_live_test t; - struct drm_file *file; - IGT_TIMEOUT(end_time); - LIST_HEAD(objects); + struct intel_engine_cs *engine; + enum intel_engine_id id; int err = -ENODEV; /* @@ -495,44 +496,167 @@ static int igt_ctx_exec(void *arg) if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; + for_each_engine(engine, i915, id) { + struct drm_i915_gem_object *obj = NULL; + unsigned long ncontexts, ndwords, dw; + struct igt_live_test t; + struct drm_file *file; + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (!engine->context_size) + continue; /* No logical context support in HW */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + err = igt_live_test_begin(&t, i915, __func__, engine->name); + if (err) + goto out_unlock; + + ncontexts = 0; + ndwords = 0; + dw = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; + intel_wakeref_t wakeref; + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + if (!obj) { + obj = create_test_object(ctx, file, &objects); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_unlock; + } + } + + with_intel_runtime_pm(i915, wakeref) + err = gpu_fill(obj, ctx, engine, dw); + if (err) { + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + ndwords, dw, max_dwords(obj), + engine->name, ctx->hw_id, + yesno(!!ctx->ppgtt), err); + goto out_unlock; + } + + if (++dw == max_dwords(obj)) { + obj = NULL; + dw = 0; + } + + ndwords++; + ncontexts++; + } + + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); + + ncontexts = dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); + + err = cpu_check(obj, ncontexts++, rem); + if (err) + break; + + dw += rem; + } + +out_unlock: + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + + mock_file_free(i915, file); + if (err) + return err; + } + + return 0; +} + +static int igt_shared_ctx_exec(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_gem_context *parent; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_live_test t; + struct drm_file *file; + int err = 0; + + /* + * Create a few different contexts with the same mm and write + * through each ctx using the GPU making sure those writes end + * up in the expected pages of our obj. + */ + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + file = mock_file(i915); if (IS_ERR(file)) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); + parent = live_context(i915, file); + if (IS_ERR(parent)) { + err = PTR_ERR(parent); + goto out_unlock; + } + + if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */ + err = 0; + goto out_unlock; + } + err = igt_live_test_begin(&t, i915, __func__, ""); if (err) goto out_unlock; - ncontexts = 0; - ndwords = 0; - dw = 0; - while (!time_after(jiffies, end_time)) { - struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - unsigned int id; + for_each_engine(engine, i915, id) { + unsigned long ncontexts, ndwords, dw; + struct drm_i915_gem_object *obj = NULL; + IGT_TIMEOUT(end_time); + LIST_HEAD(objects); - ctx = i915_gem_create_context(i915, file->driver_priv); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_unlock; - } + if (!intel_engine_can_store_dword(engine)) + continue; - for_each_engine(engine, i915, id) { + dw = 0; + ndwords = 0; + ncontexts = 0; + while (!time_after(jiffies, end_time)) { + struct i915_gem_context *ctx; intel_wakeref_t wakeref; - if (!engine->context_size) - continue; /* No logical context support in HW */ + ctx = kernel_context(i915); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_test; + } - if (!intel_engine_can_store_dword(engine)) - continue; + __assign_ppgtt(ctx, parent->ppgtt); if (!obj) { - obj = create_test_object(ctx, file, &objects); + obj = create_test_object(parent, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); - goto out_unlock; + kernel_context_close(ctx); + goto out_test; } } @@ -544,35 +668,39 @@ static int igt_ctx_exec(void *arg) ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, yesno(!!ctx->ppgtt), err); - goto out_unlock; + kernel_context_close(ctx); + goto out_test; } if (++dw == max_dwords(obj)) { obj = NULL; dw = 0; } + ndwords++; + ncontexts++; + + kernel_context_close(ctx); } - ncontexts++; - } - pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n", - ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords); + pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", + ncontexts, engine->name, ndwords); - dw = 0; - list_for_each_entry(obj, &objects, st_link) { - unsigned int rem = - min_t(unsigned int, ndwords - dw, max_dwords(obj)); + ncontexts = dw = 0; + list_for_each_entry(obj, &objects, st_link) { + unsigned int rem = + min_t(unsigned int, ndwords - dw, max_dwords(obj)); - err = cpu_check(obj, rem); - if (err) - break; + err = cpu_check(obj, ncontexts++, rem); + if (err) + goto out_test; - dw += rem; + dw += rem; + } } - -out_unlock: +out_test: if (igt_live_test_end(&t)) err = -EIO; +out_unlock: mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); @@ -604,12 +732,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) *cmd++ = upper_32_bits(vma->node.start); *cmd = MI_BATCH_BUFFER_END; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -710,47 +835,45 @@ __sseu_prepare(struct drm_i915_private *i915, unsigned int flags, struct i915_gem_context *ctx, struct intel_engine_cs *engine, - struct igt_spinner **spin_out) + struct igt_spinner **spin) { - int ret = 0; - - if (flags & (TEST_BUSY | TEST_RESET)) { - struct igt_spinner *spin; - struct i915_request *rq; + struct i915_request *rq; + int ret; - spin = kzalloc(sizeof(*spin), GFP_KERNEL); - if (!spin) { - ret = -ENOMEM; - goto out; - } + *spin = NULL; + if (!(flags & (TEST_BUSY | TEST_RESET))) + return 0; - ret = igt_spinner_init(spin, i915); - if (ret) - return ret; + *spin = kzalloc(sizeof(**spin), GFP_KERNEL); + if (!*spin) + return -ENOMEM; - rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); - if (IS_ERR(rq)) { - ret = PTR_ERR(rq); - igt_spinner_fini(spin); - kfree(spin); - goto out; - } + ret = igt_spinner_init(*spin, i915); + if (ret) + goto err_free; - i915_request_add(rq); + rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + goto err_fini; + } - if (!igt_wait_for_spinner(spin, rq)) { - pr_err("%s: Spinner failed to start!\n", name); - igt_spinner_end(spin); - igt_spinner_fini(spin); - kfree(spin); - ret = -ETIMEDOUT; - goto out; - } + i915_request_add(rq); - *spin_out = spin; + if (!igt_wait_for_spinner(*spin, rq)) { + pr_err("%s: Spinner failed to start!\n", name); + ret = -ETIMEDOUT; + goto err_end; } -out: + return 0; + +err_end: + igt_spinner_end(*spin); +err_fini: + igt_spinner_fini(*spin); +err_free: + kfree(fetch_and_zero(spin)); return ret; } @@ -897,22 +1020,23 @@ __sseu_test(struct drm_i915_private *i915, ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); if (ret) - goto out; + goto out_context; ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); if (ret) - goto out; + goto out_spin; ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, hweight32(sseu.slice_mask), spin); -out: +out_spin: if (spin) { igt_spinner_end(spin); igt_spinner_fini(spin); kfree(spin); } +out_context: kernel_context_close(kctx); return ret; @@ -924,7 +1048,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, unsigned int flags) { struct intel_sseu default_sseu = intel_device_default_sseu(i915); - struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_engine_cs *engine = i915->engine[RCS0]; struct drm_i915_gem_object *obj; struct i915_gem_context *ctx; struct intel_sseu pg_sseu; @@ -963,11 +1087,12 @@ __igt_ctx_sseu(struct drm_i915_private *i915, mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, file->driver_priv); + ctx = live_context(i915, file); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto out_unlock; } + i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */ obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { @@ -1048,7 +1173,7 @@ static int igt_ctx_readonly(void *arg) struct drm_i915_gem_object *obj = NULL; struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; - unsigned long ndwords, dw; + unsigned long idx, ndwords, dw; struct igt_live_test t; struct drm_file *file; I915_RND_STATE(prng); @@ -1072,7 +1197,7 @@ static int igt_ctx_readonly(void *arg) if (err) goto out_unlock; - ctx = i915_gem_create_context(i915, file->driver_priv); + ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_unlock; @@ -1126,9 +1251,10 @@ static int igt_ctx_readonly(void *arg) } } pr_info("Submitted %lu dwords (across %u engines)\n", - ndwords, RUNTIME_INFO(i915)->num_rings); + ndwords, RUNTIME_INFO(i915)->num_engines); dw = 0; + idx = 0; list_for_each_entry(obj, &objects, st_link) { unsigned int rem = min_t(unsigned int, ndwords - dw, max_dwords(obj)); @@ -1138,7 +1264,7 @@ static int igt_ctx_readonly(void *arg) if (i915_gem_object_is_readonly(obj)) num_writes = 0; - err = cpu_check(obj, num_writes); + err = cpu_check(obj, idx++, num_writes); if (err) break; @@ -1202,12 +1328,9 @@ static int write_to_scratch(struct i915_gem_context *ctx, } *cmd++ = value; *cmd = MI_BATCH_BUFFER_END; + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; - vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -1299,11 +1422,9 @@ static int read_from_scratch(struct i915_gem_context *ctx, *cmd++ = result; } *cmd = MI_BATCH_BUFFER_END; - i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); if (IS_ERR(vma)) { @@ -1397,13 +1518,13 @@ static int igt_vm_isolation(void *arg) if (err) goto out_unlock; - ctx_a = i915_gem_create_context(i915, file->driver_priv); + ctx_a = live_context(i915, file); if (IS_ERR(ctx_a)) { err = PTR_ERR(ctx_a); goto out_unlock; } - ctx_b = i915_gem_create_context(i915, file->driver_priv); + ctx_b = live_context(i915, file); if (IS_ERR(ctx_b)) { err = PTR_ERR(ctx_b); goto out_unlock; @@ -1433,7 +1554,7 @@ static int igt_vm_isolation(void *arg) div64_u64_rem(i915_prandom_u64_state(&prng), vm_total, &offset); - offset &= ~sizeof(u32); + offset &= -sizeof(u32); offset += I915_GTT_PAGE_SIZE; err = write_to_scratch(ctx_a, engine, @@ -1459,7 +1580,7 @@ static int igt_vm_isolation(void *arg) count += this; } pr_info("Checked %lu scratch offsets across %d engines\n", - count, RUNTIME_INFO(i915)->num_rings); + count, RUNTIME_INFO(i915)->num_engines); out_rpm: intel_runtime_pm_put(i915, wakeref); @@ -1493,63 +1614,56 @@ static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, { struct intel_engine_cs *engine; unsigned int tmp; - int err; + int pass; GEM_TRACE("Testing %s\n", __engine_name(i915, engines)); - for_each_engine_masked(engine, i915, engines, tmp) { - struct i915_request *rq; + for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */ + bool from_idle = pass & 1; + int err; - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return PTR_ERR(rq); + if (!from_idle) { + for_each_engine_masked(engine, i915, engines, tmp) { + struct i915_request *rq; - i915_request_add(rq); - } - - err = i915_gem_switch_to_kernel_context(i915); - if (err) - return err; + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) + return PTR_ERR(rq); - for_each_engine_masked(engine, i915, engines, tmp) { - if (!engine_has_kernel_context_barrier(engine)) { - pr_err("kernel context not last on engine %s!\n", - engine->name); - return -EINVAL; + i915_request_add(rq); + } } - } - err = i915_gem_wait_for_idle(i915, - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); - if (err) - return err; + err = i915_gem_switch_to_kernel_context(i915, + i915->gt.active_engines); + if (err) + return err; - GEM_BUG_ON(i915->gt.active_requests); - for_each_engine_masked(engine, i915, engines, tmp) { - if (engine->last_retired_context->gem_context != i915->kernel_context) { - pr_err("engine %s not idling in kernel context!\n", - engine->name); + if (!from_idle) { + err = i915_gem_wait_for_idle(i915, + I915_WAIT_LOCKED, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + } + + if (i915->gt.active_requests) { + pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n", + i915->gt.active_requests, + pass, from_idle ? "idle" : "busy", + __engine_name(i915, engines), + is_power_of_2(engines) ? "" : "s"); return -EINVAL; } - } - err = i915_gem_switch_to_kernel_context(i915); - if (err) - return err; + /* XXX Bonus points for proving we are the kernel context! */ - if (i915->gt.active_requests) { - pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n", - i915->gt.active_requests); - return -EINVAL; + mutex_unlock(&i915->drm.struct_mutex); + drain_delayed_work(&i915->gt.idle_work); + mutex_lock(&i915->drm.struct_mutex); } - for_each_engine_masked(engine, i915, engines, tmp) { - if (!intel_engine_has_kernel_context(engine)) { - pr_err("kernel context not last on engine %s!\n", - engine->name); - return -EINVAL; - } - } + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + return -EIO; return 0; } @@ -1593,8 +1707,6 @@ static int igt_switch_to_kernel_context(void *arg) out_unlock: GEM_TRACE_DUMP_ON(err); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) - err = -EIO; intel_runtime_pm_put(i915, wakeref); mutex_unlock(&i915->drm.struct_mutex); @@ -1603,10 +1715,117 @@ out_unlock: return err; } +static void mock_barrier_task(void *data) +{ + unsigned int *counter = data; + + ++*counter; +} + +static int mock_context_barrier(void *arg) +{ +#undef pr_fmt +#define pr_fmt(x) "context_barrier_task():" # x + struct drm_i915_private *i915 = arg; + struct i915_gem_context *ctx; + struct i915_request *rq; + intel_wakeref_t wakeref; + unsigned int counter; + int err; + + /* + * The context barrier provides us with a callback after it emits + * a request; useful for retiring old state after loading new. + */ + + mutex_lock(&i915->drm.struct_mutex); + + ctx = mock_context(i915, "mock"); + if (!ctx) { + err = -ENOMEM; + goto unlock; + } + + counter = 0; + err = context_barrier_task(ctx, 0, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + if (counter == 0) { + pr_err("Did not retire immediately with 0 engines\n"); + err = -EINVAL; + goto out; + } + + counter = 0; + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + if (counter == 0) { + pr_err("Did not retire immediately for all inactive engines\n"); + err = -EINVAL; + goto out; + } + + rq = ERR_PTR(-ENODEV); + with_intel_runtime_pm(i915, wakeref) + rq = i915_request_alloc(i915->engine[RCS0], ctx); + if (IS_ERR(rq)) { + pr_err("Request allocation failed!\n"); + goto out; + } + i915_request_add(rq); + GEM_BUG_ON(list_empty(&ctx->active_engines)); + + counter = 0; + context_barrier_inject_fault = BIT(RCS0); + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + context_barrier_inject_fault = 0; + if (err == -ENXIO) + err = 0; + else + pr_err("Did not hit fault injection!\n"); + if (counter != 0) { + pr_err("Invoked callback on error!\n"); + err = -EIO; + } + if (err) + goto out; + + counter = 0; + err = context_barrier_task(ctx, ALL_ENGINES, + NULL, mock_barrier_task, &counter); + if (err) { + pr_err("Failed at line %d, err=%d\n", __LINE__, err); + goto out; + } + mock_device_flush(i915); + if (counter == 0) { + pr_err("Did not retire on each active engines\n"); + err = -EINVAL; + goto out; + } + +out: + mock_context_close(ctx); +unlock: + mutex_unlock(&i915->drm.struct_mutex); + return err; +#undef pr_fmt +#define pr_fmt(x) x +} + int i915_gem_context_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_switch_to_kernel_context), + SUBTEST(mock_context_barrier), }; struct drm_i915_private *i915; int err; @@ -1629,10 +1848,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv) SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), SUBTEST(igt_ctx_sseu), + SUBTEST(igt_shared_ctx_exec), SUBTEST(igt_vm_isolation), }; - if (i915_terminally_wedged(&dev_priv->gpu_error)) + if (i915_terminally_wedged(dev_priv)) return 0; return i915_subtests(tests, dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index a7055b12e53c..2b943ee246c9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg) goto err; } memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); ptr = dma_buf_kmap(dmabuf, 1); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 32dce7176f63..89766688e420 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg) err = PTR_ERR(obj); goto cleanup; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); quirk_add(obj, &objects); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg) err = PTR_ERR(obj); goto cleanup; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); quirk_add(obj, &objects); /* Neighbouring; same colour - should fit */ @@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg) struct i915_gem_context *ctx; ctx = live_context(i915, file); - if (!ctx) + if (IS_ERR(ctx)) break; /* We will need some GGTT space for the rq's context */ @@ -547,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_evict_contexts), }; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 3850ef4a5ec8..9cca66e4420a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) if (overflows_type(size, obj->base.size)) return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(i915); + obj = i915_gem_object_alloc(); if (!obj) goto err; @@ -1010,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv); + ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); goto out_unlock; @@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); - i915_ppgtt_close(&ppgtt->vm); i915_ppgtt_put(ppgtt); out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); @@ -1681,25 +1680,31 @@ int i915_gem_gtt_mock_selftests(void) SUBTEST(igt_gtt_insert), }; struct drm_i915_private *i915; - struct i915_ggtt ggtt; + struct i915_ggtt *ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; - mock_init_ggtt(i915, &ggtt); + ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL); + if (!ggtt) { + err = -ENOMEM; + goto out_put; + } + mock_init_ggtt(i915, ggtt); mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, &ggtt); + err = i915_subtests(tests, ggtt); mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); i915_gem_drain_freed_objects(i915); - mock_fini_ggtt(&ggtt); + mock_fini_ggtt(ggtt); + kfree(ggtt); +out_put: drm_dev_put(&i915->drm); - return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c index 395ae878e0f7..971148fbe6f5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c @@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) if (err) return err; - rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context); + rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -583,7 +583,7 @@ static int igt_mmap_offset_exhaustion(void *arg) for (loop = 0; loop < 3; loop++) { intel_wakeref_t wakeref; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) break; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 6733dc5b6b4c..e6ffe2240126 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -42,7 +42,7 @@ static int igt_add_request(void *arg) /* Basic preliminary test to create a request and let it loose! */ mutex_lock(&i915->drm.struct_mutex); - request = mock_request(i915->engine[RCS], + request = mock_request(i915->engine[RCS0], i915->kernel_context, HZ / 10); if (!request) @@ -66,7 +66,7 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ mutex_lock(&i915->drm.struct_mutex); - request = mock_request(i915->engine[RCS], i915->kernel_context, T); + request = mock_request(i915->engine[RCS0], i915->kernel_context, T); if (!request) { err = -ENOMEM; goto out_unlock; @@ -136,19 +136,17 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ mutex_lock(&i915->drm.struct_mutex); - request = mock_request(i915->engine[RCS], i915->kernel_context, T); + request = mock_request(i915->engine[RCS0], i915->kernel_context, T); if (!request) { err = -ENOMEM; goto out_locked; } - mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */ if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { pr_err("fence wait success before submit (expected timeout)!\n"); - goto out_device; + goto out_locked; } - mutex_lock(&i915->drm.struct_mutex); i915_request_add(request); mutex_unlock(&i915->drm.struct_mutex); @@ -195,7 +193,7 @@ static int igt_request_rewind(void *arg) mutex_lock(&i915->drm.struct_mutex); ctx[0] = mock_context(i915, "A"); - request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ); + request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ); if (!request) { err = -ENOMEM; goto err_context_0; @@ -205,7 +203,7 @@ static int igt_request_rewind(void *arg) i915_request_add(request); ctx[1] = mock_context(i915, "B"); - vip = mock_request(i915->engine[RCS], ctx[1], 0); + vip = mock_request(i915->engine[RCS0], ctx[1], 0); if (!vip) { err = -ENOMEM; goto err_context_1; @@ -226,8 +224,7 @@ static int igt_request_rewind(void *arg) mutex_unlock(&i915->drm.struct_mutex); if (i915_request_wait(vip, 0, HZ) == -ETIME) { - pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n", - vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS])); + pr_err("timed out waiting for high priority request\n"); goto err; } @@ -418,7 +415,7 @@ static int mock_breadcrumbs_smoketest(void *arg) { struct drm_i915_private *i915 = arg; struct smoketest t = { - .engine = i915->engine[RCS], + .engine = i915->engine[RCS0], .ncontexts = 1024, .max_batch = 1024, .request_alloc = __mock_request_alloc @@ -622,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) } *cmd = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(i915); + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - if (err) - goto err; + i915_gem_chipset_flush(i915); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); if (IS_ERR(vma)) { @@ -780,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (err) goto err; - err = i915_gem_object_set_to_wc_domain(obj, true); - if (err) - goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); @@ -802,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) *cmd++ = lower_32_bits(vma->node.start); } *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ - i915_gem_chipset_flush(i915); + __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); + i915_gem_chipset_flush(i915); + return vma; err: @@ -1219,7 +1212,7 @@ out_flush: num_fences += atomic_long_read(&t[id].num_fences); } pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", - num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus); + num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); mutex_lock(&i915->drm.struct_mutex); ret = igt_live_test_end(&live) ?: ret; @@ -1246,7 +1239,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_breadcrumbs_smoketest), }; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 10ef0e636a24..b18eaefef798 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -133,7 +133,7 @@ static int __run_selftests(const char *name, if (signal_pending(current)) return -EINTR; - pr_debug(DRIVER_NAME ": Running %s\n", st->name); + pr_info(DRIVER_NAME ": Running %s\n", st->name); if (data) err = st->live(data); else @@ -255,7 +255,7 @@ int __i915_subtests(const char *caller, if (!apply_subtest_filter(caller, st->name)) continue; - pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name); + pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name); GEM_TRACE("Running %s/%s\n", caller, st->name); err = st->func(data); diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c index cdbc8f134e5e..cbf45d85cbff 100644 --- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c @@ -571,21 +571,27 @@ static int test_timer(void *arg) unsigned long target, delay; struct timed_fence tf; + preempt_disable(); timed_fence_init(&tf, target = jiffies); if (!i915_sw_fence_done(&tf.fence)) { pr_err("Fence with immediate expiration not signaled\n"); goto err; } + preempt_enable(); timed_fence_fini(&tf); for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) { + preempt_disable(); timed_fence_init(&tf, target = jiffies + delay); if (i915_sw_fence_done(&tf.fence)) { pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay); goto err; } + preempt_enable(); i915_sw_fence_wait(&tf.fence); + + preempt_disable(); if (!i915_sw_fence_done(&tf.fence)) { pr_err("Fence not signaled after wait\n"); goto err; @@ -595,13 +601,14 @@ static int test_timer(void *arg) target, jiffies); goto err; } - + preempt_enable(); timed_fence_fini(&tf); } return 0; err: + preempt_enable(); timed_fence_fini(&tf); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index 12ea69b1a1e5..8e7bcaa1eb66 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -64,7 +64,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, unsigned long cacheline; int err; - tl = i915_timeline_create(state->i915, "mock", NULL); + tl = i915_timeline_create(state->i915, NULL); if (IS_ERR(tl)) return PTR_ERR(tl); @@ -476,7 +476,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915) { struct i915_timeline *tl; - tl = i915_timeline_create(i915, "live", NULL); + tl = i915_timeline_create(i915, NULL); if (IS_ERR(tl)) return tl; @@ -641,6 +641,118 @@ out: #undef NUM_TIMELINES } +static int live_hwsp_wrap(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_timeline *tl; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = 0; + + /* + * Across a seqno wrap, we need to keep the old cacheline alive for + * foreign GPU references. + */ + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + tl = i915_timeline_create(i915, NULL); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto out_rpm; + } + if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) + goto out_free; + + err = i915_timeline_pin(tl); + if (err) + goto out_free; + + for_each_engine(engine, i915, id) { + const u32 *hwsp_seqno[2]; + struct i915_request *rq; + u32 seqno[2]; + + if (!intel_engine_can_store_dword(engine)) + continue; + + rq = i915_request_alloc(engine, i915->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + tl->seqno = -4u; + + err = i915_timeline_get_seqno(tl, rq, &seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n", + seqno[0], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[0] = tl->hwsp_seqno; + + err = i915_timeline_get_seqno(tl, rq, &seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n", + seqno[1], tl->hwsp_offset); + + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]); + if (err) { + i915_request_add(rq); + goto out; + } + hwsp_seqno[1] = tl->hwsp_seqno; + + /* With wrap should come a new hwsp */ + GEM_BUG_ON(seqno[1] >= seqno[0]); + GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]); + + i915_request_add(rq); + + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + pr_err("Wait for timeline writes timed out!\n"); + err = -EIO; + goto out; + } + + if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) { + pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n", + *hwsp_seqno[0], *hwsp_seqno[1], + seqno[0], seqno[1]); + err = -EINVAL; + goto out; + } + + i915_retire_requests(i915); /* recycle HWSP */ + } + +out: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + + i915_timeline_unpin(tl); +out_free: + i915_timeline_put(tl); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + + return err; +} + static int live_hwsp_recycle(void *arg) { struct drm_i915_private *i915 = arg; @@ -723,6 +835,7 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915) SUBTEST(live_hwsp_recycle), SUBTEST(live_hwsp_engine), SUBTEST(live_hwsp_alternate), + SUBTEST(live_hwsp_wrap), }; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index cf1de82741fa..fc594b030f5a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -725,24 +725,30 @@ int i915_vma_mock_selftests(void) SUBTEST(igt_vma_partial), }; struct drm_i915_private *i915; - struct i915_ggtt ggtt; + struct i915_ggtt *ggtt; int err; i915 = mock_gem_device(); if (!i915) return -ENOMEM; - mock_init_ggtt(i915, &ggtt); + ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL); + if (!ggtt) { + err = -ENOMEM; + goto out_put; + } + mock_init_ggtt(i915, ggtt); mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, &ggtt); + err = i915_subtests(tests, ggtt); mock_device_flush(i915); mutex_unlock(&i915->drm.struct_mutex); i915_gem_drain_freed_objects(i915); - mock_fini_ggtt(&ggtt); + mock_fini_ggtt(ggtt); + kfree(ggtt); +out_put: drm_dev_put(&i915->drm); - return err; } diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index af66e3d4e23a..94aee4071a66 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) cond_resched(); if (flags & I915_WAIT_LOCKED && - i915_gem_switch_to_kernel_context(i915)) { + i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) { pr_err("Failed to switch back to kernel context; declaring wedged\n"); i915_gem_set_wedged(i915); } @@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) i915_gem_set_wedged(i915); } - return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; + return i915_terminally_wedged(i915); } diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 9ebd9225684e..16890dfe74c0 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915) goto err_hws; } - i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); @@ -144,6 +144,13 @@ igt_spinner_create_request(struct igt_spinner *spin, i915_gem_chipset_flush(spin->i915); + if (engine->emit_init_breadcrumb && + rq->timeline->has_initial_breadcrumb) { + err = engine->emit_init_breadcrumb(rq); + if (err) + goto cancel_rq; + } + err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); cancel_rq: diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index c5e0a0e98fcb..b05a21eaa8f4 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client, dev_priv->preempt_context : dev_priv->kernel_context; if (client->owner != ctx_owner || - client->engines != INTEL_INFO(dev_priv)->ring_mask || + client->engines != INTEL_INFO(dev_priv)->engine_mask || client->priority != client_priority || client->doorbell_id == GUC_DOORBELL_INVALID) return -EINVAL; @@ -261,7 +261,7 @@ static int igt_guc_doorbells(void *arg) for (i = 0; i < ATTEMPTS; i++) { clients[i] = guc_client_alloc(dev_priv, - INTEL_INFO(dev_priv)->ring_mask, + INTEL_INFO(dev_priv)->engine_mask, i % GUC_CLIENT_PRIORITY_NUM, dev_priv->kernel_context); diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 7b6f3bea9ef8..76b4fa150f2e 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -56,6 +56,8 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) if (IS_ERR(h->ctx)) return PTR_ERR(h->ctx); + GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx)); + h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(h->hws)) { err = PTR_ERR(h->hws); @@ -68,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915) goto err_hws; } - i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC); vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); @@ -242,6 +244,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) *batch++ = MI_BATCH_BUFFER_END; /* not reached */ i915_gem_chipset_flush(h->i915); + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) + goto cancel_rq; + } + flags = 0; if (INTEL_GEN(vm->i915) <= 5) flags |= I915_DISPATCH_SECURE; @@ -334,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg) timeout = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) timeout = -EIO; i915_request_put(rq); @@ -375,7 +383,7 @@ static int igt_global_reset(void *arg) igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) err = -EIO; return err; @@ -393,15 +401,13 @@ static int igt_wedged_reset(void *arg) i915_gem_set_wedged(i915); - mutex_lock(&i915->drm.struct_mutex); - GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error)); + GEM_BUG_ON(!i915_reset_failed(i915)); i915_reset(i915, ALL_ENGINES, NULL); - mutex_unlock(&i915->drm.struct_mutex); intel_runtime_pm_put(i915, wakeref); igt_global_reset_unlock(i915); - return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0; + return i915_reset_failed(i915) ? -EIO : 0; } static bool wait_for_idle(struct intel_engine_cs *engine) @@ -409,6 +415,222 @@ static bool wait_for_idle(struct intel_engine_cs *engine) return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0; } +static int igt_reset_nop(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + unsigned int reset_count, count; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + IGT_TIMEOUT(end_time); + int err = 0; + + /* Check that we can reset during non-user portions of requests */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + i915_gem_context_clear_bannable(ctx); + wakeref = intel_runtime_pm_get(i915); + reset_count = i915_reset_count(&i915->gpu_error); + count = 0; + do { + mutex_lock(&i915->drm.struct_mutex); + for_each_engine(engine, i915, id) { + int i; + + for (i = 0; i < 16; i++) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + } + mutex_unlock(&i915->drm.struct_mutex); + + igt_global_reset_lock(i915); + i915_reset(i915, ALL_ENGINES, NULL); + igt_global_reset_unlock(i915); + if (i915_reset_failed(i915)) { + err = -EIO; + break; + } + + if (i915_reset_count(&i915->gpu_error) != + reset_count + ++count) { + pr_err("Full GPU reset not recorded!\n"); + err = -EINVAL; + break; + } + + if (!i915_reset_flush(i915)) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("%s failed to idle after reset\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + err = -EIO; + break; + } + + err = igt_flush_test(i915, 0); + if (err) + break; + } while (time_before(jiffies, end_time)); + pr_info("%s: %d resets\n", __func__, count); + + mutex_lock(&i915->drm.struct_mutex); + err = igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + + intel_runtime_pm_put(i915, wakeref); + +out: + mock_file_free(i915, file); + if (i915_reset_failed(i915)) + err = -EIO; + return err; +} + +static int igt_reset_nop_engine(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + int err = 0; + + /* Check that we can engine-reset during non-user portions */ + + if (!intel_has_reset_engine(i915)) + return 0; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + i915_gem_context_clear_bannable(ctx); + wakeref = intel_runtime_pm_get(i915); + for_each_engine(engine, i915, id) { + unsigned int reset_count, reset_engine_count; + unsigned int count; + IGT_TIMEOUT(end_time); + + reset_count = i915_reset_count(&i915->gpu_error); + reset_engine_count = i915_reset_engine_count(&i915->gpu_error, + engine); + count = 0; + + set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + do { + int i; + + if (!wait_for_idle(engine)) { + pr_err("%s failed to idle before reset\n", + engine->name); + err = -EIO; + break; + } + + mutex_lock(&i915->drm.struct_mutex); + for (i = 0; i < 16; i++) { + struct i915_request *rq; + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + + i915_request_add(rq); + } + mutex_unlock(&i915->drm.struct_mutex); + + err = i915_reset_engine(engine, NULL); + if (err) { + pr_err("i915_reset_engine failed\n"); + break; + } + + if (i915_reset_count(&i915->gpu_error) != reset_count) { + pr_err("Full GPU reset recorded! (engine reset expected)\n"); + err = -EINVAL; + break; + } + + if (i915_reset_engine_count(&i915->gpu_error, engine) != + reset_engine_count + ++count) { + pr_err("%s engine reset not recorded!\n", + engine->name); + err = -EINVAL; + break; + } + + if (!i915_reset_flush(i915)) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("%s failed to idle after reset\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + err = -EIO; + break; + } + } while (time_before(jiffies, end_time)); + clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags); + pr_info("%s(%s): %d resets\n", __func__, engine->name, count); + + if (err) + break; + + err = igt_flush_test(i915, 0); + if (err) + break; + } + + mutex_lock(&i915->drm.struct_mutex); + err = igt_flush_test(i915, I915_WAIT_LOCKED); + mutex_unlock(&i915->drm.struct_mutex); + + intel_runtime_pm_put(i915, wakeref); +out: + mock_file_free(i915, file); + if (i915_reset_failed(i915)) + err = -EIO; + return err; +} + static int __igt_reset_engine(struct drm_i915_private *i915, bool active) { struct intel_engine_cs *engine; @@ -523,7 +745,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active) break; } - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) err = -EIO; if (active) { @@ -565,11 +787,10 @@ static int active_request_put(struct i915_request *rq) return 0; if (i915_request_wait(rq, 0, 5 * HZ) < 0) { - GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n", + GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n", rq->engine->name, rq->fence.context, - rq->fence.seqno, - i915_request_global_seqno(rq)); + rq->fence.seqno); GEM_TRACE_DUMP(); i915_gem_set_wedged(rq->i915); @@ -762,7 +983,23 @@ static int __igt_reset_engines(struct drm_i915_private *i915, count++; if (rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("i915_reset_engine(%s:%s):" + " failed to complete request after reset\n", + engine->name, test_name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + i915_request_put(rq); + + GEM_TRACE_DUMP(); + i915_gem_set_wedged(i915); + err = -EIO; + break; + } + i915_request_put(rq); } @@ -837,7 +1074,7 @@ unwind: break; } - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) err = -EIO; if (flags & TEST_ACTIVE) { @@ -905,7 +1142,7 @@ static int igt_reset_wait(void *arg) long timeout; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS])) + if (!intel_engine_can_store_dword(i915->engine[RCS0])) return 0; /* Check that we detect a stuck waiter and issue a reset */ @@ -917,7 +1154,7 @@ static int igt_reset_wait(void *arg) if (err) goto unlock; - rq = hang_create_request(&h, i915->engine[RCS]); + rq = hang_create_request(&h, i915->engine[RCS0]); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto fini; @@ -963,7 +1200,7 @@ unlock: mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) return -EIO; return err; @@ -1034,13 +1271,11 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, struct hang h; int err; - if (!intel_engine_can_store_dword(i915->engine[RCS])) + if (!intel_engine_can_store_dword(i915->engine[RCS0])) return 0; /* Check that we can recover an unbind stuck on a hanging request */ - igt_global_reset_lock(i915); - mutex_lock(&i915->drm.struct_mutex); err = hang_init(&h, i915); if (err) @@ -1066,7 +1301,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, goto out_obj; } - rq = hang_create_request(&h, i915->engine[RCS]); + rq = hang_create_request(&h, i915->engine[RCS0]); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out_obj; @@ -1138,7 +1373,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915, } out_reset: - fake_hangcheck(rq->i915, intel_engine_flag(rq->engine)); + igt_global_reset_lock(i915); + fake_hangcheck(rq->i915, rq->engine->mask); + igt_global_reset_unlock(i915); if (tsk) { struct igt_wedge_me w; @@ -1159,9 +1396,8 @@ fini: hang_fini(&h); unlock: mutex_unlock(&i915->drm.struct_mutex); - igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) return -EIO; return err; @@ -1317,7 +1553,7 @@ static int igt_reset_queue(void *arg) goto fini; } - reset_count = fake_hangcheck(i915, ENGINE_MASK(id)); + reset_count = fake_hangcheck(i915, BIT(id)); if (prev->fence.error != -EIO) { pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n", @@ -1367,7 +1603,7 @@ unlock: mutex_unlock(&i915->drm.struct_mutex); igt_global_reset_unlock(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) return -EIO; return err; @@ -1376,7 +1612,7 @@ unlock: static int igt_handle_error(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_engine_cs *engine = i915->engine[RCS0]; struct hang h; struct i915_request *rq; struct i915_gpu_state *error; @@ -1423,7 +1659,7 @@ static int igt_handle_error(void *arg) /* Temporarily disable error capture */ error = xchg(&i915->gpu_error.first_error, (void *)-1); - i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL); + i915_handle_error(i915, engine->mask, 0, NULL); xchg(&i915->gpu_error.first_error, error); @@ -1547,7 +1783,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine, i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) err = -EIO; } @@ -1586,7 +1822,7 @@ static int igt_atomic_reset(void *arg) /* Flush any requests before we get started and check basics */ force_reset(i915); - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_reset_failed(i915)) goto unlock; if (intel_has_gpu_reset(i915)) { @@ -1642,6 +1878,8 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_global_reset), /* attempt to recover GPU first */ SUBTEST(igt_wedged_reset), SUBTEST(igt_hang_sanitycheck), + SUBTEST(igt_reset_nop), + SUBTEST(igt_reset_nop_engine), SUBTEST(igt_reset_idle_engine), SUBTEST(igt_reset_active_engine), SUBTEST(igt_reset_engines), @@ -1660,7 +1898,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (!intel_has_gpu_reset(i915)) return 0; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return -EIO; /* we're long past hope of a successful reset */ wakeref = intel_runtime_pm_get(i915); diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 58144e024751..0d3cae564db8 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -10,6 +10,7 @@ #include "../i915_selftest.h" #include "igt_flush_test.h" +#include "igt_live_test.h" #include "igt_spinner.h" #include "i915_random.h" @@ -88,6 +89,9 @@ static int live_preempt(void *arg) if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; + if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + pr_err("Logical preemption supported, but not exposed\n"); + mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(i915); @@ -110,8 +114,17 @@ static int live_preempt(void *arg) I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); for_each_engine(engine, i915, id) { + struct igt_live_test t; struct i915_request *rq; + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -147,7 +160,8 @@ static int live_preempt(void *arg) igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + + if (igt_live_test_end(&t)) { err = -EIO; goto err_ctx_lo; } @@ -201,8 +215,17 @@ static int live_late_preempt(void *arg) goto err_ctx_hi; for_each_engine(engine, i915, id) { + struct igt_live_test t; struct i915_request *rq; + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, MI_ARB_CHECK); if (IS_ERR(rq)) { @@ -241,7 +264,8 @@ static int live_late_preempt(void *arg) igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + + if (igt_live_test_end(&t)) { err = -EIO; goto err_ctx_lo; } @@ -335,6 +359,9 @@ static int live_suppress_self_preempt(void *arg) struct i915_request *rq_a, *rq_b; int depth; + if (!intel_engine_has_preemption(engine)) + continue; + engine->execlists.preempt_hang.count = 0; rq_a = igt_spinner_create_request(&a.spin, @@ -407,6 +434,171 @@ err_wedged: goto err_client_b; } +static int __i915_sw_fence_call +dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) +{ + return NOTIFY_DONE; +} + +static struct i915_request *dummy_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = kzalloc(sizeof(*rq), GFP_KERNEL); + if (!rq) + return NULL; + + INIT_LIST_HEAD(&rq->active_list); + rq->engine = engine; + + i915_sched_node_init(&rq->sched); + + /* mark this request as permanently incomplete */ + rq->fence.seqno = 1; + BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */ + rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1; + GEM_BUG_ON(i915_request_completed(rq)); + + i915_sw_fence_init(&rq->submit, dummy_notify); + i915_sw_fence_commit(&rq->submit); + + return rq; +} + +static void dummy_request_free(struct i915_request *dummy) +{ + i915_request_mark_complete(dummy); + i915_sched_node_fini(&dummy->sched); + i915_sw_fence_fini(&dummy->submit); + + dma_fence_free(&dummy->fence); +} + +static int live_suppress_wait_preempt(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct preempt_client client[4]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + int err = -ENOMEM; + int i; + + /* + * Waiters are given a little priority nudge, but not enough + * to actually cause any preemption. Double check that we do + * not needlessly generate preempt-to-idle cycles. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + return 0; + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(i915); + + if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ + goto err_unlock; + if (preempt_client_init(i915, &client[1])) /* ELSP[1] */ + goto err_client_0; + if (preempt_client_init(i915, &client[2])) /* head of queue */ + goto err_client_1; + if (preempt_client_init(i915, &client[3])) /* bystander */ + goto err_client_2; + + for_each_engine(engine, i915, id) { + int depth; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (!engine->emit_init_breadcrumb) + continue; + + for (depth = 0; depth < ARRAY_SIZE(client); depth++) { + struct i915_request *rq[ARRAY_SIZE(client)]; + struct i915_request *dummy; + + engine->execlists.preempt_hang.count = 0; + + dummy = dummy_request(engine); + if (!dummy) + goto err_client_3; + + for (i = 0; i < ARRAY_SIZE(client); i++) { + rq[i] = igt_spinner_create_request(&client[i].spin, + client[i].ctx, engine, + MI_NOOP); + if (IS_ERR(rq[i])) { + err = PTR_ERR(rq[i]); + goto err_wedged; + } + + /* Disable NEWCLIENT promotion */ + __i915_active_request_set(&rq[i]->timeline->last_request, + dummy); + i915_request_add(rq[i]); + } + + dummy_request_free(dummy); + + GEM_BUG_ON(i915_request_completed(rq[0])); + if (!igt_wait_for_spinner(&client[0].spin, rq[0])) { + pr_err("%s: First client failed to start\n", + engine->name); + goto err_wedged; + } + GEM_BUG_ON(!i915_request_started(rq[0])); + + if (i915_request_wait(rq[depth], + I915_WAIT_LOCKED | + I915_WAIT_PRIORITY, + 1) != -ETIME) { + pr_err("%s: Waiter depth:%d completed!\n", + engine->name, depth); + goto err_wedged; + } + + for (i = 0; i < ARRAY_SIZE(client); i++) + igt_spinner_end(&client[i].spin); + + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + goto err_wedged; + + if (engine->execlists.preempt_hang.count) { + pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n", + engine->name, + engine->execlists.preempt_hang.count, + depth); + err = -EINVAL; + goto err_client_3; + } + } + } + + err = 0; +err_client_3: + preempt_client_fini(&client[3]); +err_client_2: + preempt_client_fini(&client[2]); +err_client_1: + preempt_client_fini(&client[1]); +err_client_0: + preempt_client_fini(&client[0]); +err_unlock: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + intel_runtime_pm_put(i915, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + return err; + +err_wedged: + for (i = 0; i < ARRAY_SIZE(client); i++) + igt_spinner_end(&client[i].spin); + i915_gem_set_wedged(i915); + err = -EIO; + goto err_client_3; +} + static int live_chain_preempt(void *arg) { struct drm_i915_private *i915 = arg; @@ -438,11 +630,39 @@ static int live_chain_preempt(void *arg) struct i915_sched_attr attr = { .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), }; - int count, i; + struct igt_live_test t; + struct i915_request *rq; + int ring_size, count, i; - for_each_prime_number_from(count, 1, 32) { /* must fit ring! */ - struct i915_request *rq; + if (!intel_engine_has_preemption(engine)) + continue; + rq = igt_spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + + ring_size = rq->wa_tail - rq->head; + if (ring_size < 0) + ring_size += rq->ring->size; + ring_size = rq->ring->size / ring_size; + pr_debug("%s(%s): Using maximum of %d requests\n", + __func__, engine->name, ring_size); + + igt_spinner_end(&lo.spin); + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) { + pr_err("Timed out waiting to flush %s\n", engine->name); + goto err_wedged; + } + + if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + err = -EIO; + goto err_wedged; + } + + for_each_prime_number_from(count, 1, ring_size) { rq = igt_spinner_create_request(&hi.spin, hi.ctx, engine, MI_ARB_CHECK); @@ -484,6 +704,26 @@ static int live_chain_preempt(void *arg) goto err_wedged; } igt_spinner_end(&lo.spin); + + rq = i915_request_alloc(engine, lo.ctx); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(i915->drm.dev); + + pr_err("Failed to flush low priority chain of %d requests\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + goto err_wedged; + } + } + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_wedged; } } @@ -767,7 +1007,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", count, flags, - RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext); + RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); return 0; } @@ -795,7 +1035,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", count, flags, - RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext); + RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); return 0; } @@ -808,6 +1048,7 @@ static int live_preempt_smoke(void *arg) }; const unsigned int phase[] = { 0, BATCH }; intel_wakeref_t wakeref; + struct igt_live_test t; int err = -ENOMEM; u32 *cs; int n; @@ -838,11 +1079,13 @@ static int live_preempt_smoke(void *arg) for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) cs[n] = MI_ARB_CHECK; cs[n] = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(smoke.batch); i915_gem_object_unpin_map(smoke.batch); - err = i915_gem_object_set_to_gtt_domain(smoke.batch, false); - if (err) + if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) { + err = -EIO; goto err_batch; + } for (n = 0; n < smoke.ncontext; n++) { smoke.contexts[n] = kernel_context(smoke.i915); @@ -861,7 +1104,7 @@ static int live_preempt_smoke(void *arg) } err_ctx: - if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED)) + if (igt_live_test_end(&t)) err = -EIO; for (n = 0; n < smoke.ncontext; n++) { @@ -887,6 +1130,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_preempt), SUBTEST(live_late_preempt), SUBTEST(live_suppress_self_preempt), + SUBTEST(live_suppress_wait_preempt), SUBTEST(live_chain_preempt), SUBTEST(live_preempt_hang), SUBTEST(live_preempt_smoke), @@ -895,7 +1139,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) if (!HAS_EXECLISTS(i915)) return 0; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; return i915_subtests(tests, i915); diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 81d9d31042a9..ee0bc91f7664 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -119,9 +119,132 @@ int intel_uncore_mock_selftests(void) return 0; } -static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv) +static int live_forcewake_ops(void *arg) +{ + static const struct reg { + const char *name; + unsigned long platforms; + unsigned int offset; + } registers[] = { + { + "RING_START", + INTEL_GEN_MASK(6, 7), + 0x38, + }, + { + "RING_MI_MODE", + INTEL_GEN_MASK(8, BITS_PER_LONG), + 0x9c, + } + }; + const struct reg *r; + struct drm_i915_private *i915 = arg; + struct intel_uncore_forcewake_domain *domain; + struct intel_uncore *uncore = &i915->uncore; + struct intel_engine_cs *engine; + enum intel_engine_id id; + intel_wakeref_t wakeref; + unsigned int tmp; + int err = 0; + + GEM_BUG_ON(i915->gt.awake); + + /* vlv/chv with their pcu behave differently wrt reads */ + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + pr_debug("PCU fakes forcewake badly; skipping\n"); + return 0; + } + + /* We have to pick carefully to get the exact behaviour we need */ + for (r = registers; r->name; r++) + if (r->platforms & INTEL_INFO(i915)->gen_mask) + break; + if (!r->name) { + pr_debug("Forcewaked register not known for %s; skipping\n", + intel_platform_name(INTEL_INFO(i915)->platform)); + return 0; + } + + wakeref = intel_runtime_pm_get(i915); + + for_each_fw_domain(domain, uncore, tmp) { + smp_store_mb(domain->active, false); + if (!hrtimer_cancel(&domain->timer)) + continue; + + intel_uncore_fw_release_timer(&domain->timer); + } + + for_each_engine(engine, i915, id) { + i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset); + u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset; + enum forcewake_domains fw_domains; + u32 val; + + if (!engine->default_state) + continue; + + fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio, + FW_REG_READ); + if (!fw_domains) + continue; + + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + if (!domain->wake_count) + continue; + + pr_err("fw_domain %s still active, aborting test!\n", + intel_uncore_forcewake_domain_to_str(domain->id)); + err = -EINVAL; + goto out_rpm; + } + + intel_uncore_forcewake_get(uncore, fw_domains); + val = readl(reg); + intel_uncore_forcewake_put(uncore, fw_domains); + + /* Flush the forcewake release (delayed onto a timer) */ + for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) { + smp_store_mb(domain->active, false); + if (hrtimer_cancel(&domain->timer)) + intel_uncore_fw_release_timer(&domain->timer); + + preempt_disable(); + err = wait_ack_clear(domain, FORCEWAKE_KERNEL); + preempt_enable(); + if (err) { + pr_err("Failed to clear fw_domain %s\n", + intel_uncore_forcewake_domain_to_str(domain->id)); + goto out_rpm; + } + } + + if (!val) { + pr_err("%s:%s was zero while fw was held!\n", + engine->name, r->name); + err = -EINVAL; + goto out_rpm; + } + + /* We then expect the read to return 0 outside of the fw */ + if (wait_for(readl(reg) == 0, 100)) { + pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n", + engine->name, r->name, readl(reg), fw_domains); + err = -ETIMEDOUT; + goto out_rpm; + } + } + +out_rpm: + intel_runtime_pm_put(i915, wakeref); + return err; +} + +static int live_forcewake_domains(void *arg) { #define FW_RANGE 0x40000 + struct drm_i915_private *dev_priv = arg; + struct intel_uncore *uncore = &dev_priv->uncore; unsigned long *valid; u32 offset; int err; @@ -137,48 +260,52 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN)) return 0; - valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid), - GFP_KERNEL); + valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL); if (!valid) return -ENOMEM; - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - check_for_unclaimed_mmio(dev_priv); + check_for_unclaimed_mmio(uncore); for (offset = 0; offset < FW_RANGE; offset += 4) { i915_reg_t reg = { offset }; (void)I915_READ_FW(reg); - if (!check_for_unclaimed_mmio(dev_priv)) + if (!check_for_unclaimed_mmio(uncore)) set_bit(offset, valid); } - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); err = 0; for_each_set_bit(offset, valid, FW_RANGE) { i915_reg_t reg = { offset }; iosf_mbi_punit_acquire(); - intel_uncore_forcewake_reset(dev_priv); + intel_uncore_forcewake_reset(uncore); iosf_mbi_punit_release(); - check_for_unclaimed_mmio(dev_priv); + check_for_unclaimed_mmio(uncore); (void)I915_READ(reg); - if (check_for_unclaimed_mmio(dev_priv)) { + if (check_for_unclaimed_mmio(uncore)) { pr_err("Unclaimed mmio read to register 0x%04x\n", offset); err = -EINVAL; } } - kfree(valid); + bitmap_free(valid); return err; } int intel_uncore_live_selftests(struct drm_i915_private *i915) { + static const struct i915_subtest tests[] = { + SUBTEST(live_forcewake_ops), + SUBTEST(live_forcewake_domains), + }; + int err; /* Confirm the table we load is still valid */ @@ -188,9 +315,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915) if (err) return err; - err = intel_uncore_check_forcewake_domains(i915); - if (err) - return err; - - return 0; + return i915_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c index b15c4f26c593..3baed59008d7 100644 --- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c +++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c @@ -12,6 +12,14 @@ #include "igt_spinner.h" #include "igt_wedge_me.h" #include "mock_context.h" +#include "mock_drm.h" + +static const struct wo_register { + enum intel_platform platform; + u32 reg; +} wo_registers[] = { + { INTEL_GEMINILAKE, 0x731c } +}; #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4) struct wa_lists { @@ -74,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (IS_ERR(result)) return result; - i915_gem_object_set_cache_level(result, I915_CACHE_LLC); + i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC); cs = i915_gem_object_pin_map(result, I915_MAP_WB); if (IS_ERR(cs)) { @@ -82,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_obj; } memset(cs, 0xc5, PAGE_SIZE); + i915_gem_object_flush_map(result); i915_gem_object_unpin_map(result); vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL); @@ -181,7 +190,7 @@ static int check_whitelist(struct i915_gem_context *ctx, err = 0; igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */ err = i915_gem_object_set_to_cpu_domain(results, false); - if (i915_terminally_wedged(&ctx->i915->gpu_error)) + if (i915_terminally_wedged(ctx->i915)) err = -EIO; if (err) goto out_put; @@ -214,7 +223,7 @@ out_put: static int do_device_reset(struct intel_engine_cs *engine) { - i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds"); + i915_reset(engine->i915, engine->mask, "live_workarounds"); return 0; } @@ -236,15 +245,11 @@ switch_to_scratch_context(struct intel_engine_cs *engine, if (IS_ERR(ctx)) return PTR_ERR(ctx); + GEM_BUG_ON(i915_gem_context_is_bannable(ctx)); + rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(engine->i915, wakeref) { - if (spin) - rq = igt_spinner_create_request(spin, - ctx, engine, - MI_NOOP); - else - rq = i915_request_alloc(engine, ctx); - } + with_intel_runtime_pm(engine->i915, wakeref) + rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); kernel_context_close(ctx); @@ -273,7 +278,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, const char *name) { struct drm_i915_private *i915 = engine->i915; - bool want_spin = reset == do_engine_reset; struct i915_gem_context *ctx; struct igt_spinner spin; intel_wakeref_t wakeref; @@ -282,11 +286,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n", engine->whitelist.count, name); - if (want_spin) { - err = igt_spinner_init(&spin, i915); - if (err) - return err; - } + err = igt_spinner_init(&spin, i915); + if (err) + return err; ctx = kernel_context(i915); if (IS_ERR(ctx)) @@ -298,17 +300,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, goto out; } - err = switch_to_scratch_context(engine, want_spin ? &spin : NULL); + err = switch_to_scratch_context(engine, &spin); if (err) goto out; with_intel_runtime_pm(i915, wakeref) err = reset(engine); - if (want_spin) { - igt_spinner_end(&spin); - igt_spinner_fini(&spin); - } + igt_spinner_end(&spin); + igt_spinner_fini(&spin); if (err) { pr_err("%s reset failed\n", name); @@ -340,10 +340,379 @@ out: return err; } +static struct i915_vma *create_scratch(struct i915_gem_context *ctx) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + void *ptr; + int err; + + obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); + + ptr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(ptr)) { + err = PTR_ERR(ptr); + goto err_obj; + } + memset(ptr, 0xc5, PAGE_SIZE); + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_obj; + + err = i915_gem_object_set_to_cpu_domain(obj, false); + if (err) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static struct i915_vma *create_batch(struct i915_gem_context *ctx) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_obj; + + err = i915_gem_object_set_to_wc_domain(obj, true); + if (err) + goto err_obj; + + return vma; + +err_obj: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static u32 reg_write(u32 old, u32 new, u32 rsvd) +{ + if (rsvd == 0x0000ffff) { + old &= ~(new >> 16); + old |= new & (new >> 16); + } else { + old &= ~rsvd; + old |= new & rsvd; + } + + return old; +} + +static bool wo_register(struct intel_engine_cs *engine, u32 reg) +{ + enum intel_platform platform = INTEL_INFO(engine->i915)->platform; + int i; + + for (i = 0; i < ARRAY_SIZE(wo_registers); i++) { + if (wo_registers[i].platform == platform && + wo_registers[i].reg == reg) + return true; + } + + return false; +} + +static int check_dirty_whitelist(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + const u32 values[] = { + 0x00000000, + 0x01010101, + 0x10100101, + 0x03030303, + 0x30300303, + 0x05050505, + 0x50500505, + 0x0f0f0f0f, + 0xf00ff00f, + 0x10101010, + 0xf0f01010, + 0x30303030, + 0xa0a03030, + 0x50505050, + 0xc0c05050, + 0xf0f0f0f0, + 0x11111111, + 0x33333333, + 0x55555555, + 0x0000ffff, + 0x00ff00ff, + 0xff0000ff, + 0xffff00ff, + 0xffffffff, + }; + struct i915_vma *scratch; + struct i915_vma *batch; + int err = 0, i, v; + u32 *cs, *results; + + scratch = create_scratch(ctx); + if (IS_ERR(scratch)) + return PTR_ERR(scratch); + + batch = create_batch(ctx); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_scratch; + } + + for (i = 0; i < engine->whitelist.count; i++) { + u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); + u64 addr = scratch->node.start; + struct i915_request *rq; + u32 srm, lrm, rsvd; + u32 expect; + int idx; + + if (wo_register(engine, reg)) + continue; + + srm = MI_STORE_REGISTER_MEM; + lrm = MI_LOAD_REGISTER_MEM; + if (INTEL_GEN(ctx->i915) >= 8) + lrm++, srm++; + + pr_debug("%s: Writing garbage to %x\n", + engine->name, reg); + + cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out_batch; + } + + /* SRM original */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + /* LRI garbage */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = reg; + *cs++ = values[v]; + + /* SRM result */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr + sizeof(u32) * idx); + *cs++ = upper_32_bits(addr + sizeof(u32) * idx); + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + /* LRI garbage */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = reg; + *cs++ = ~values[v]; + + /* SRM result */ + *cs++ = srm; + *cs++ = reg; + *cs++ = lower_32_bits(addr + sizeof(u32) * idx); + *cs++ = upper_32_bits(addr + sizeof(u32) * idx); + idx++; + } + GEM_BUG_ON(idx * sizeof(u32) > scratch->size); + + /* LRM original -- don't leave garbage in the context! */ + *cs++ = lrm; + *cs++ = reg; + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + *cs++ = MI_BATCH_BUFFER_END; + + i915_gem_object_flush_map(batch->obj); + i915_gem_object_unpin_map(batch->obj); + i915_gem_chipset_flush(ctx->i915); + + rq = i915_request_alloc(engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_batch; + } + + if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ + err = engine->emit_init_breadcrumb(rq); + if (err) + goto err_request; + } + + err = engine->emit_bb_start(rq, + batch->node.start, PAGE_SIZE, + 0); + if (err) + goto err_request; + +err_request: + i915_request_add(rq); + if (err) + goto out_batch; + + if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) { + pr_err("%s: Futzing %x timedout; cancelling test\n", + engine->name, reg); + i915_gem_set_wedged(ctx->i915); + err = -EIO; + goto out_batch; + } + + results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(results)) { + err = PTR_ERR(results); + goto out_batch; + } + + GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); + rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */ + if (!rsvd) { + pr_err("%s: Unable to write to whitelisted register %x\n", + engine->name, reg); + err = -EINVAL; + goto out_unpin; + } + + expect = results[0]; + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + expect = reg_write(expect, values[v], rsvd); + if (results[idx] != expect) + err++; + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + expect = reg_write(expect, ~values[v], rsvd); + if (results[idx] != expect) + err++; + idx++; + } + if (err) { + pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n", + engine->name, err, reg); + + pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n", + engine->name, reg, results[0], rsvd); + + expect = results[0]; + idx = 1; + for (v = 0; v < ARRAY_SIZE(values); v++) { + u32 w = values[v]; + + expect = reg_write(expect, w, rsvd); + pr_info("Wrote %08x, read %08x, expect %08x\n", + w, results[idx], expect); + idx++; + } + for (v = 0; v < ARRAY_SIZE(values); v++) { + u32 w = ~values[v]; + + expect = reg_write(expect, w, rsvd); + pr_info("Wrote %08x, read %08x, expect %08x\n", + w, results[idx], expect); + idx++; + } + + err = -EINVAL; + } +out_unpin: + i915_gem_object_unpin_map(scratch->obj); + if (err) + break; + } + + if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED)) + err = -EIO; +out_batch: + i915_vma_unpin_and_release(&batch, 0); +out_scratch: + i915_vma_unpin_and_release(&scratch, 0); + return err; +} + +static int live_dirty_whitelist(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + intel_wakeref_t wakeref; + struct drm_file *file; + int err = 0; + + /* Can the user write to the whitelisted registers? */ + + if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ + return 0; + + wakeref = intel_runtime_pm_get(i915); + + mutex_unlock(&i915->drm.struct_mutex); + file = mock_file(i915); + mutex_lock(&i915->drm.struct_mutex); + if (IS_ERR(file)) { + err = PTR_ERR(file); + goto out_rpm; + } + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } + + for_each_engine(engine, i915, id) { + if (engine->whitelist.count == 0) + continue; + + err = check_dirty_whitelist(ctx, engine); + if (err) + goto out_file; + } + +out_file: + mutex_unlock(&i915->drm.struct_mutex); + mock_file_free(i915, file); + mutex_lock(&i915->drm.struct_mutex); +out_rpm: + intel_runtime_pm_put(i915, wakeref); + return err; +} + static int live_reset_whitelist(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS]; + struct intel_engine_cs *engine = i915->engine[RCS0]; int err = 0; /* If we reset the gpu, we should not lose the RING_NONPRIV */ @@ -513,13 +882,14 @@ err: int intel_workarounds_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(live_dirty_whitelist), SUBTEST(live_reset_whitelist), SUBTEST(live_gpu_reset_gt_engine_workarounds), SUBTEST(live_engine_reset_gt_engine_workarounds), }; int err; - if (i915_terminally_wedged(&i915->gpu_error)) + if (i915_terminally_wedged(i915)) return 0; mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c index b646cdcdd602..0426093bf1d9 100644 --- a/drivers/gpu/drm/i915/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/selftests/mock_context.c @@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915, const char *name) { struct i915_gem_context *ctx; - unsigned int n; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); @@ -41,25 +40,31 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; + ctx->hw_contexts = RB_ROOT; + spin_lock_init(&ctx->hw_contexts_lock); + INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); INIT_LIST_HEAD(&ctx->handles_list); INIT_LIST_HEAD(&ctx->hw_id_link); - - for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) - intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]); + INIT_LIST_HEAD(&ctx->active_engines); + mutex_init(&ctx->mutex); ret = i915_gem_context_pin_hw_id(ctx); if (ret < 0) goto err_handles; if (name) { + struct i915_hw_ppgtt *ppgtt; + ctx->name = kstrdup(name, GFP_KERNEL); if (!ctx->name) goto err_put; - ctx->ppgtt = mock_ppgtt(i915, name); - if (!ctx->ppgtt) + ppgtt = mock_ppgtt(i915, name); + if (!ppgtt) goto err_put; + + __set_ppgtt(ctx, ppgtt); } return ctx; @@ -87,9 +92,24 @@ void mock_init_contexts(struct drm_i915_private *i915) struct i915_gem_context * live_context(struct drm_i915_private *i915, struct drm_file *file) { + struct i915_gem_context *ctx; + int err; + lockdep_assert_held(&i915->drm.struct_mutex); - return i915_gem_create_context(i915, file->driver_priv); + ctx = i915_gem_create_context(i915, 0); + if (IS_ERR(ctx)) + return ctx; + + err = gem_context_register(ctx, file->driver_priv); + if (err < 0) + goto err_ctx; + + return ctx; + +err_ctx: + context_close(ctx); + return ERR_PTR(err); } struct i915_gem_context * diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c index 08f0cab02e0f..61a8206ed677 100644 --- a/drivers/gpu/drm/i915/selftests/mock_engine.c +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c @@ -50,13 +50,12 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) if (!ring) return NULL; - if (i915_timeline_init(engine->i915, - &ring->timeline, engine->name, - NULL)) { + if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) { kfree(ring); return NULL; } + kref_init(&ring->base.ref); ring->base.size = sz; ring->base.effective_size = sz; ring->base.vaddr = (void *)(ring + 1); @@ -76,28 +75,26 @@ static void mock_ring_free(struct intel_ring *base) kfree(ring); } -static struct mock_request *first_request(struct mock_engine *engine) +static struct i915_request *first_request(struct mock_engine *engine) { return list_first_entry_or_null(&engine->hw_queue, - struct mock_request, - link); + struct i915_request, + mock.link); } -static void advance(struct mock_request *request) +static void advance(struct i915_request *request) { - list_del_init(&request->link); - intel_engine_write_global_seqno(request->base.engine, - request->base.global_seqno); - i915_request_mark_complete(&request->base); - GEM_BUG_ON(!i915_request_completed(&request->base)); + list_del_init(&request->mock.link); + i915_request_mark_complete(request); + GEM_BUG_ON(!i915_request_completed(request)); - intel_engine_queue_breadcrumbs(request->base.engine); + intel_engine_queue_breadcrumbs(request->engine); } static void hw_delay_complete(struct timer_list *t) { struct mock_engine *engine = from_timer(engine, t, hw_delay); - struct mock_request *request; + struct i915_request *request; unsigned long flags; spin_lock_irqsave(&engine->hw_lock, flags); @@ -112,8 +109,9 @@ static void hw_delay_complete(struct timer_list *t) * requeue the timer for the next delayed request. */ while ((request = first_request(engine))) { - if (request->delay) { - mod_timer(&engine->hw_delay, jiffies + request->delay); + if (request->mock.delay) { + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); break; } @@ -126,55 +124,43 @@ static void hw_delay_complete(struct timer_list *t) static void mock_context_unpin(struct intel_context *ce) { mock_timeline_unpin(ce->ring->timeline); - i915_gem_context_put(ce->gem_context); } -static void mock_context_destroy(struct intel_context *ce) +static void mock_context_destroy(struct kref *ref) { - GEM_BUG_ON(ce->pin_count); + struct intel_context *ce = container_of(ref, typeof(*ce), ref); + + GEM_BUG_ON(intel_context_is_pinned(ce)); if (ce->ring) mock_ring_free(ce->ring); -} -static const struct intel_context_ops mock_context_ops = { - .unpin = mock_context_unpin, - .destroy = mock_context_destroy, -}; + intel_context_free(ce); +} -static struct intel_context * -mock_context_pin(struct intel_engine_cs *engine, - struct i915_gem_context *ctx) +static int mock_context_pin(struct intel_context *ce) { - struct intel_context *ce = to_intel_context(ctx, engine); - int err = -ENOMEM; - - if (ce->pin_count++) - return ce; - if (!ce->ring) { - ce->ring = mock_ring(engine); + ce->ring = mock_ring(ce->engine); if (!ce->ring) - goto err; + return -ENOMEM; } mock_timeline_pin(ce->ring->timeline); + return 0; +} - ce->ops = &mock_context_ops; - i915_gem_context_get(ctx); - return ce; +static const struct intel_context_ops mock_context_ops = { + .pin = mock_context_pin, + .unpin = mock_context_unpin, -err: - ce->pin_count = 0; - return ERR_PTR(err); -} + .destroy = mock_context_destroy, +}; static int mock_request_alloc(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); - - INIT_LIST_HEAD(&mock->link); - mock->delay = 0; + INIT_LIST_HEAD(&request->mock.link); + request->mock.delay = 0; return 0; } @@ -192,25 +178,55 @@ static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs) static void mock_submit_request(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); unsigned long flags; i915_request_submit(request); - GEM_BUG_ON(!request->global_seqno); spin_lock_irqsave(&engine->hw_lock, flags); - list_add_tail(&mock->link, &engine->hw_queue); - if (mock->link.prev == &engine->hw_queue) { - if (mock->delay) - mod_timer(&engine->hw_delay, jiffies + mock->delay); + list_add_tail(&request->mock.link, &engine->hw_queue); + if (list_is_first(&request->mock.link, &engine->hw_queue)) { + if (request->mock.delay) + mod_timer(&engine->hw_delay, + jiffies + request->mock.delay); else - advance(mock); + advance(request); } spin_unlock_irqrestore(&engine->hw_lock, flags); } +static void mock_reset_prepare(struct intel_engine_cs *engine) +{ +} + +static void mock_reset(struct intel_engine_cs *engine, bool stalled) +{ + GEM_BUG_ON(stalled); +} + +static void mock_reset_finish(struct intel_engine_cs *engine) +{ +} + +static void mock_cancel_requests(struct intel_engine_cs *engine) +{ + struct i915_request *request; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline.lock, flags); + + /* Mark all submitted requests as skipped. */ + list_for_each_entry(request, &engine->timeline.requests, sched.link) { + if (!i915_request_signaled(request)) + dma_fence_set_error(&request->fence, -EIO); + + i915_request_mark_complete(request); + } + + spin_unlock_irqrestore(&engine->timeline.lock, flags); +} + struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, const char *name, int id) @@ -227,18 +243,21 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.i915 = i915; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; + engine->base.mask = BIT(id); engine->base.status_page.addr = (void *)(engine + 1); - engine->base.context_pin = mock_context_pin; + engine->base.cops = &mock_context_ops; engine->base.request_alloc = mock_request_alloc; engine->base.emit_flush = mock_emit_flush; engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb; engine->base.submit_request = mock_submit_request; - if (i915_timeline_init(i915, - &engine->base.timeline, - engine->base.name, - NULL)) + engine->base.reset.prepare = mock_reset_prepare; + engine->base.reset.reset = mock_reset; + engine->base.reset.finish = mock_reset_finish; + engine->base.cancel_requests = mock_cancel_requests; + + if (i915_timeline_init(i915, &engine->base.timeline, NULL)) goto err_free; i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE); @@ -249,7 +268,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, timer_setup(&engine->hw_delay, hw_delay_complete, 0); INIT_LIST_HEAD(&engine->hw_queue); - if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base))) + if (pin_context(i915->kernel_context, &engine->base, + &engine->base.kernel_context)) goto err_breadcrumbs; return &engine->base; @@ -266,19 +286,18 @@ void mock_engine_flush(struct intel_engine_cs *engine) { struct mock_engine *mock = container_of(engine, typeof(*mock), base); - struct mock_request *request, *rn; + struct i915_request *request, *rn; del_timer_sync(&mock->hw_delay); spin_lock_irq(&mock->hw_lock); - list_for_each_entry_safe(request, rn, &mock->hw_queue, link) + list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link) advance(request); spin_unlock_irq(&mock->hw_lock); } void mock_engine_reset(struct intel_engine_cs *engine) { - intel_engine_write_global_seqno(engine, 0); } void mock_engine_free(struct intel_engine_cs *engine) @@ -293,7 +312,7 @@ void mock_engine_free(struct intel_engine_cs *engine) if (ce) intel_context_unpin(ce); - __intel_context_unpin(engine->i915->kernel_context, engine); + intel_context_unpin(engine->kernel_context); intel_engine_fini_breadcrumbs(engine); i915_timeline_fini(&engine->timeline); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 14ae46fda49f..60bbf8b4df40 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -79,12 +79,6 @@ static void mock_device_release(struct drm_device *dev) destroy_workqueue(i915->wq); - kmem_cache_destroy(i915->priorities); - kmem_cache_destroy(i915->dependencies); - kmem_cache_destroy(i915->requests); - kmem_cache_destroy(i915->vmas); - kmem_cache_destroy(i915->objects); - i915_gemfs_fini(i915); drm_mode_config_cleanup(&i915->drm); @@ -115,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work) static void mock_idle_work_handler(struct work_struct *work) { + struct drm_i915_private *i915 = + container_of(work, typeof(*i915), gt.idle_work.work); + + i915->gt.active_engines = 0; } static int pm_domain_resume(struct device *dev) @@ -184,11 +182,12 @@ struct drm_i915_private *mock_gem_device(void) I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_2M; - mock_uncore_init(i915); + mock_uncore_init(&i915->uncore); i915_gem_init__mm(i915); init_waitqueue_head(&i915->gpu_error.wait_queue); init_waitqueue_head(&i915->gpu_error.reset_queue); + init_srcu_struct(&i915->gpu_error.reset_backoff_srcu); mutex_init(&i915->gpu_error.wedge_mutex); i915->wq = alloc_ordered_workqueue("mock", 0); @@ -202,31 +201,6 @@ struct drm_i915_private *mock_gem_device(void) i915->gt.awake = true; - i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN); - if (!i915->objects) - goto err_wq; - - i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN); - if (!i915->vmas) - goto err_objects; - - i915->requests = KMEM_CACHE(mock_request, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_TYPESAFE_BY_RCU); - if (!i915->requests) - goto err_vmas; - - i915->dependencies = KMEM_CACHE(i915_dependency, - SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT); - if (!i915->dependencies) - goto err_requests; - - i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN); - if (!i915->priorities) - goto err_dependencies; - i915_timelines_init(i915); INIT_LIST_HEAD(&i915->gt.active_rings); @@ -236,13 +210,13 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(i915, &i915->ggtt); - mkwrite_device_info(i915)->ring_mask = BIT(0); + mkwrite_device_info(i915)->engine_mask = BIT(0); i915->kernel_context = mock_context(i915, NULL); if (!i915->kernel_context) goto err_unlock; - i915->engine[RCS] = mock_engine(i915, "mock", RCS); - if (!i915->engine[RCS]) + i915->engine[RCS0] = mock_engine(i915, "mock", RCS0); + if (!i915->engine[RCS0]) goto err_context; mutex_unlock(&i915->drm.struct_mutex); @@ -256,16 +230,6 @@ err_context: err_unlock: mutex_unlock(&i915->drm.struct_mutex); i915_timelines_fini(i915); - kmem_cache_destroy(i915->priorities); -err_dependencies: - kmem_cache_destroy(i915->dependencies); -err_requests: - kmem_cache_destroy(i915->requests); -err_vmas: - kmem_cache_destroy(i915->vmas); -err_objects: - kmem_cache_destroy(i915->objects); -err_wq: destroy_workqueue(i915->wq); err_drv: drm_mode_config_cleanup(&i915->drm); diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 0dc29e242597..d1a7c9608712 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine, unsigned long delay) { struct i915_request *request; - struct mock_request *mock; /* NB the i915->requests slab cache is enlarged to fit mock_request */ request = i915_request_alloc(engine, context); if (IS_ERR(request)) return NULL; - mock = container_of(request, typeof(*mock), base); - mock->delay = delay; - - return &mock->base; + request->mock.delay = delay; + return request; } bool mock_cancel_request(struct i915_request *request) { - struct mock_request *mock = container_of(request, typeof(*mock), base); struct mock_engine *engine = container_of(request->engine, typeof(*engine), base); bool was_queued; spin_lock_irq(&engine->hw_lock); - was_queued = !list_empty(&mock->link); - list_del_init(&mock->link); + was_queued = !list_empty(&request->mock.link); + list_del_init(&request->mock.link); spin_unlock_irq(&engine->hw_lock); if (was_queued) diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h index 995fb728380c..4acf0211df20 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.h +++ b/drivers/gpu/drm/i915/selftests/mock_request.h @@ -29,13 +29,6 @@ #include "../i915_request.h" -struct mock_request { - struct i915_request base; - - struct list_head link; - unsigned long delay; -}; - struct i915_request * mock_request(struct intel_engine_cs *engine, struct i915_gem_context *context, diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c index d2de9ece2118..416d85233263 100644 --- a/drivers/gpu/drm/i915/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c @@ -14,6 +14,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context) timeline->fence_context = context; spin_lock_init(&timeline->lock); + mutex_init(&timeline->mutex); INIT_ACTIVE_REQUEST(&timeline->barrier); INIT_ACTIVE_REQUEST(&timeline->last_request); diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c index 8ef14c7e5e38..ff8999c63a12 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.c +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c @@ -26,21 +26,21 @@ #define __nop_write(x) \ static void \ -nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { } +nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { } __nop_write(8) __nop_write(16) __nop_write(32) #define __nop_read(x) \ static u##x \ -nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; } +nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; } __nop_read(8) __nop_read(16) __nop_read(32) __nop_read(64) -void mock_uncore_init(struct drm_i915_private *i915) +void mock_uncore_init(struct intel_uncore *uncore) { - ASSIGN_WRITE_MMIO_VFUNCS(i915, nop); - ASSIGN_READ_MMIO_VFUNCS(i915, nop); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop); + ASSIGN_READ_MMIO_VFUNCS(uncore, nop); } diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h index d79aa3ca4d51..dacb36b5ffcd 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.h +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h @@ -25,6 +25,6 @@ #ifndef __MOCK_UNCORE_H #define __MOCK_UNCORE_H -void mock_uncore_init(struct drm_i915_private *i915); +void mock_uncore_init(struct intel_uncore *uncore); #endif /* !__MOCK_UNCORE_H */ diff --git a/drivers/gpu/drm/i915/test_i915_active_types_standalone.c b/drivers/gpu/drm/i915/test_i915_active_types_standalone.c new file mode 100644 index 000000000000..144ebd153e57 --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_active_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_active_types.h" diff --git a/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c b/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c new file mode 100644 index 000000000000..4e4da4860bc2 --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_gem_context_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_gem_context_types.h" diff --git a/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c b/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c new file mode 100644 index 000000000000..f58e148e8946 --- /dev/null +++ b/drivers/gpu/drm/i915/test_i915_timeline_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_timeline_types.h" diff --git a/drivers/gpu/drm/i915/test_intel_context_types_standalone.c b/drivers/gpu/drm/i915/test_intel_context_types_standalone.c new file mode 100644 index 000000000000..b39e3c4e6551 --- /dev/null +++ b/drivers/gpu/drm/i915/test_intel_context_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_context_types.h" diff --git a/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c b/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c new file mode 100644 index 000000000000..d05e4cdcbcf9 --- /dev/null +++ b/drivers/gpu/drm/i915/test_intel_engine_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_engine_types.h" diff --git a/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c b/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c new file mode 100644 index 000000000000..4f658bb00825 --- /dev/null +++ b/drivers/gpu/drm/i915/test_intel_workarounds_types_standalone.c @@ -0,0 +1,7 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_workarounds_types.h" diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c index 6403728fe778..0a950c976bbb 100644 --- a/drivers/gpu/drm/i915/vlv_dsi.c +++ b/drivers/gpu/drm/i915/vlv_dsi.c @@ -78,7 +78,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_GEN_FIFO_STAT(port), mask, mask, 100)) DRM_ERROR("DPI FIFOs are not empty\n"); @@ -148,7 +148,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* note: this is never true for reads */ if (packet.payload_length) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_GEN_FIFO_STAT(port), data_mask, 0, 50)) @@ -162,7 +162,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); } - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_GEN_FIFO_STAT(port), ctrl_mask, 0, 50)) { @@ -174,7 +174,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host, /* ->rx_len is set only for reads */ if (msg->rx_len) { data_mask = GEN_READ_DATA_AVAIL; - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_INTR_STAT(port), data_mask, data_mask, 50)) @@ -234,7 +234,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, I915_WRITE(MIPI_DPI_CONTROL(port), cmd); mask = SPL_PKT_SENT_INTERRUPT; - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_INTR_STAT(port), mask, mask, 100)) DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); @@ -353,16 +353,18 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) /* Wait for Pwr ACK */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, - GLK_MIPIIO_PORT_POWERED, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_MIPIIO_PORT_POWERED, + GLK_MIPIIO_PORT_POWERED, + 20)) DRM_ERROR("MIPIO port is powergated\n"); } /* Check for cold boot scenario */ for_each_dsi_port(port, intel_dsi->ports) { - cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) & - DEVICE_READY); + cold_boot |= + !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY); } return cold_boot; @@ -377,9 +379,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to set */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, - GLK_PHY_STATUS_PORT_READY, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_PHY_STATUS_PORT_READY, + GLK_PHY_STATUS_PORT_READY, + 20)) DRM_ERROR("PHY is not ON\n"); } @@ -403,8 +407,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) I915_WRITE(MIPI_DEVICE_READY(port), val); /* Wait for ULPS active */ - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_ULPS_NOT_ACTIVE, + 0, + 20)) DRM_ERROR("ULPS not active\n"); /* Exit ULPS */ @@ -427,17 +434,21 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) /* Wait for Stop state */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE, - GLK_DATA_LANE_STOP_STATE, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + MIPI_CTRL(port), + GLK_DATA_LANE_STOP_STATE, + GLK_DATA_LANE_STOP_STATE, + 20)) DRM_ERROR("Date lane not in STOP state\n"); } /* Wait for AFE LATCH */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, - BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT, - AFE_LATCHOUT, 20)) + if (intel_wait_for_register(&dev_priv->uncore, + BXT_MIPI_PORT_CTRL(port), + AFE_LATCHOUT, + AFE_LATCHOUT, + 20)) DRM_ERROR("D-PHY not entering LP-11 state\n"); } } @@ -537,7 +548,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 0, 20)) DRM_ERROR("PHY is not turning OFF\n"); @@ -545,7 +556,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) /* Wait for Pwr ACK bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED, 0, 20)) DRM_ERROR("MIPI IO Port is not powergated\n"); @@ -566,7 +577,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY, 0, 20)) DRM_ERROR("PHY is not turning OFF\n"); @@ -616,7 +627,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) * Port A only. MIPI Port C has no similar bit for checking. */ if ((IS_GEN9_LP(dev_priv) || port == PORT_A) && - intel_wait_for_register(dev_priv, + intel_wait_for_register(&dev_priv->uncore, port_ctrl, AFE_LATCHOUT, 0, 30)) DRM_ERROR("DSI LP not going Low\n"); @@ -1658,7 +1669,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) struct drm_encoder *encoder; struct intel_connector *intel_connector; struct drm_connector *connector; - struct drm_display_mode *scan, *fixed_mode = NULL; + struct drm_display_mode *fixed_mode; enum port port; DRM_DEBUG_KMS("\n"); @@ -1769,13 +1780,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_connector_attach_encoder(intel_connector, intel_encoder); mutex_lock(&dev->mode_config.mutex); - intel_dsi_vbt_get_modes(intel_dsi); - list_for_each_entry(scan, &connector->probed_modes, head) { - if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { - fixed_mode = drm_mode_duplicate(dev, scan); - break; - } - } + fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); mutex_unlock(&dev->mode_config.mutex); if (!fixed_mode) { @@ -1783,9 +1788,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) goto err; } - connector->display_info.width_mm = fixed_mode->width_mm; - connector->display_info.height_mm = fixed_mode->height_mm; - intel_panel_init(&intel_connector->panel, fixed_mode, NULL); intel_panel_setup_backlight(connector, INVALID_PIPE); diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c index 954d5a8c4fa7..5e7b1fb2db5d 100644 --- a/drivers/gpu/drm/i915/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c @@ -244,7 +244,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder) * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 0, @@ -528,7 +528,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, I915_WRITE(BXT_DSI_PLL_ENABLE, val); /* Timeout and fail if PLL not locked */ - if (intel_wait_for_register(dev_priv, + if (intel_wait_for_register(&dev_priv->uncore, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, BXT_DSI_PLL_LOCKED, diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index ec3602ebbc1c..311a20c942eb 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -295,7 +295,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW); /* Default to driving pixel data on negative clock edges */ sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags & - DRM_BUS_FLAG_PIXDATA_POSEDGE); + DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE); sig_cfg.bus_format = imx_crtc_state->bus_format; sig_cfg.v_to_h_sync = 0; sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index cf549f1ed403..78c9e5a5e793 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -5,6 +5,7 @@ config DRM_MSM depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST) depends on OF && COMMON_CLK depends on MMU + depends on INTERCONNECT || !INTERCONNECT select QCOM_MDT_LOADER if ARCH_QCOM select REGULATOR select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index ce1b3cc4bf6d..d1662a75c7ec 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -2,6 +2,7 @@ /* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ #include <linux/clk.h> +#include <linux/interconnect.h> #include <linux/pm_opp.h> #include <soc/qcom/cmd-db.h> @@ -84,6 +85,9 @@ bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) { + struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; int ret; gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0); @@ -106,6 +110,12 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index) dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret); gmu->freq = gmu->gpu_freqs[index]; + + /* + * Eventually we will want to scale the path vote with the frequency but + * for now leave it at max so that the performance is nominal. + */ + icc_set_bw(gpu->icc_path, 0, MBps_to_icc(7216)); } void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq) @@ -705,6 +715,8 @@ out: int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; int status, ret; @@ -720,6 +732,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) if (ret) goto out; + /* Set the bus quota to a reasonable value for boot */ + icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072)); + a6xx_gmu_irq_enable(gmu); /* Check to see if we are doing a cold or warm boot */ @@ -760,6 +775,8 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; struct a6xx_gmu *gmu = &a6xx_gpu->gmu; u32 val; @@ -806,6 +823,9 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); + /* Remove the bus vote */ + icc_set_bw(gpu->icc_path, 0, 0); + clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks); pm_runtime_put_sync(gmu->dev); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 2cfee1a4fe0b..27898475cdf4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -18,6 +18,7 @@ */ #include <linux/ascii85.h> +#include <linux/interconnect.h> #include <linux/kernel.h> #include <linux/pm_opp.h> #include <linux/slab.h> @@ -747,6 +748,11 @@ static int adreno_get_pwrlevels(struct device *dev, DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); + /* Check for an interconnect path for the bus */ + gpu->icc_path = of_icc_get(dev, NULL); + if (IS_ERR(gpu->icc_path)) + gpu->icc_path = NULL; + return 0; } @@ -787,10 +793,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { + struct msm_gpu *gpu = &adreno_gpu->base; unsigned int i; for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) release_firmware(adreno_gpu->fw[i]); + icc_put(gpu->icc_path); + msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index ca17086f72c9..6241986bab51 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -19,6 +19,7 @@ #define __MSM_GPU_H__ #include <linux/clk.h> +#include <linux/interconnect.h> #include <linux/regulator/consumer.h> #include "msm_drv.h" @@ -118,6 +119,8 @@ struct msm_gpu { struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk; uint32_t fast_rate; + struct icc_path *icc_path; + /* Hang and Inactivity Detection: */ #define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */ diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 0ee1ca8a316a..98e9bda91e80 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -253,12 +253,12 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) if (!(bus_flags & DRM_BUS_FLAG_DE_LOW)) vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; /* - * DRM_BUS_FLAG_PIXDATA_ defines are controller centric, + * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric, * controllers VDCTRL0_DOTCLK is display centric. * Drive on positive edge -> display samples on falling edge - * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING + * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING */ - if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING; writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0); diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 88a52f6b39fe..7dfbbbc1beea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf, } ret = pm_runtime_get_sync(drm->dev); - if (IS_ERR_VALUE(ret) && ret != -EACCES) + if (ret < 0 && ret != -EACCES) return ret; ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); pm_runtime_put_autosuspend(drm->dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 8be7a83ced9b..40c47d6a7d78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -100,12 +100,10 @@ static void nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) { struct nouveau_dmem_chunk *chunk; - struct nouveau_drm *drm; unsigned long idx; chunk = (void *)hmm_devmem_page_get_drvdata(page); idx = page_to_pfn(page) - chunk->pfn_first; - drm = chunk->drm; /* * FIXME: @@ -261,7 +259,7 @@ static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = { .finalize_and_map = nouveau_dmem_fault_finalize_and_map, }; -static int +static vm_fault_t nouveau_dmem_fault(struct hmm_devmem *devmem, struct vm_area_struct *vma, unsigned long addr, @@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm) /* FIXME handle pin failure */ WARN_ON(ret); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); - /* FIXME handle pin failure */ - WARN_ON(ret); - } mutex_unlock(&drm->dmem->mutex); } @@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm) list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { nouveau_bo_unpin(chunk->bo); } - list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) { - nouveau_bo_unpin(chunk->bo); - } mutex_unlock(&drm->dmem->mutex); } @@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm) */ drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, device, size); - if (drm->dmem->devmem == NULL) { + if (IS_ERR(drm->dmem->devmem)) { kfree(drm->dmem); drm->dmem = NULL; return; diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig index a349cb61961e..7b0bcb494b5c 100644 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig @@ -6,23 +6,12 @@ config DRM_OMAP_ENCODER_OPA362 Driver for OPA362 external analog TV amplifier controlled through a GPIO. -config DRM_OMAP_ENCODER_TFP410 - tristate "TFP410 DPI to DVI Encoder" - help - Driver for TFP410 DPI to DVI encoder. - config DRM_OMAP_ENCODER_TPD12S015 tristate "TPD12S015 HDMI ESD protection and level shifter" help Driver for TPD12S015, which offers HDMI ESD protection and level shifting. -config DRM_OMAP_CONNECTOR_DVI - tristate "DVI Connector" - depends on I2C - help - Driver for a generic DVI connector. - config DRM_OMAP_CONNECTOR_HDMI tristate "HDMI Connector" help @@ -33,12 +22,6 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV help Driver for a generic analog TV connector. -config DRM_OMAP_PANEL_DPI - tristate "Generic DPI panel" - depends on BACKLIGHT_CLASS_DEVICE - help - Driver for generic DPI panels. - config DRM_OMAP_PANEL_DSI_CM tristate "Generic DSI Command Mode Panel" depends on BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile index d99659e1381b..1db34d4fed64 100644 --- a/drivers/gpu/drm/omapdrm/displays/Makefile +++ b/drivers/gpu/drm/omapdrm/displays/Makefile @@ -1,11 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o -obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o -obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o -obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c index 28a3ce8f88d2..6c0561101874 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c @@ -35,50 +35,9 @@ static void tvc_disconnect(struct omap_dss_device *src, { } -static int tvc_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - dev_dbg(ddata->dev, "enable\n"); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return r; -} - -static void tvc_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - dev_dbg(ddata->dev, "disable\n"); - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - static const struct omap_dss_device_ops tvc_ops = { .connect = tvc_connect, .disconnect = tvc_disconnect, - - .enable = tvc_enable, - .disable = tvc_disable, }; static int tvc_probe(struct platform_device *pdev) @@ -97,6 +56,7 @@ static int tvc_probe(struct platform_device *pdev) dssdev->ops = &tvc_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); @@ -109,12 +69,9 @@ static int tvc_probe(struct platform_device *pdev) static int __exit tvc_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; omapdss_device_unregister(&ddata->dssdev); - tvc_disable(dssdev); - return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c deleted file mode 100644 index 24b14f44248e..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Generic DVI Connector driver - * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -#include <linux/gpio/consumer.h> -#include <linux/i2c.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> - -#include <drm/drm_edid.h> - -#include "../dss/omapdss.h" - -struct panel_drv_data { - struct omap_dss_device dssdev; - - struct i2c_adapter *i2c_adapter; - - struct gpio_desc *hpd_gpio; - - void (*hpd_cb)(void *cb_data, enum drm_connector_status status); - void *hpd_cb_data; - bool hpd_enabled; - /* mutex for hpd fields above */ - struct mutex hpd_lock; -}; - -#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) - -static int dvic_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - return 0; -} - -static void dvic_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ -} - -static int dvic_enable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void dvic_disable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static int dvic_ddc_read(struct i2c_adapter *adapter, - unsigned char *buf, u16 count, u8 offset) -{ - int r, retries; - - for (retries = 3; retries > 0; retries--) { - struct i2c_msg msgs[] = { - { - .addr = DDC_ADDR, - .flags = 0, - .len = 1, - .buf = &offset, - }, { - .addr = DDC_ADDR, - .flags = I2C_M_RD, - .len = count, - .buf = buf, - } - }; - - r = i2c_transfer(adapter, msgs, 2); - if (r == 2) - return 0; - - if (r != -EAGAIN) - break; - } - - return r < 0 ? r : -EIO; -} - -static int dvic_read_edid(struct omap_dss_device *dssdev, - u8 *edid, int len) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - int r, l, bytes_read; - - l = min(EDID_LENGTH, len); - r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0); - if (r) - return r; - - bytes_read = l; - - /* if there are extensions, read second block */ - if (len > EDID_LENGTH && edid[0x7e] > 0) { - l = min(EDID_LENGTH, len - EDID_LENGTH); - - r = dvic_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH, - l, EDID_LENGTH); - if (r) - return r; - - bytes_read += l; - } - - return bytes_read; -} - -static bool dvic_detect(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - unsigned char out; - int r; - - if (ddata->hpd_gpio) - return gpiod_get_value_cansleep(ddata->hpd_gpio); - - if (!ddata->i2c_adapter) - return true; - - r = dvic_ddc_read(ddata->i2c_adapter, &out, 1, 0); - - return r == 0; -} - -static void dvic_register_hpd_cb(struct omap_dss_device *dssdev, - void (*cb)(void *cb_data, - enum drm_connector_status status), - void *cb_data) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = cb; - ddata->hpd_cb_data = cb_data; - mutex_unlock(&ddata->hpd_lock); -} - -static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - mutex_lock(&ddata->hpd_lock); - ddata->hpd_cb = NULL; - ddata->hpd_cb_data = NULL; - mutex_unlock(&ddata->hpd_lock); -} - -static const struct omap_dss_device_ops dvic_ops = { - .connect = dvic_connect, - .disconnect = dvic_disconnect, - - .enable = dvic_enable, - .disable = dvic_disable, - - .read_edid = dvic_read_edid, - .detect = dvic_detect, - - .register_hpd_cb = dvic_register_hpd_cb, - .unregister_hpd_cb = dvic_unregister_hpd_cb, -}; - -static irqreturn_t dvic_hpd_isr(int irq, void *data) -{ - struct panel_drv_data *ddata = data; - - mutex_lock(&ddata->hpd_lock); - if (ddata->hpd_enabled && ddata->hpd_cb) { - enum drm_connector_status status; - - if (dvic_detect(&ddata->dssdev)) - status = connector_status_connected; - else - status = connector_status_disconnected; - - ddata->hpd_cb(ddata->hpd_cb_data, status); - } - mutex_unlock(&ddata->hpd_lock); - - return IRQ_HANDLED; -} - -static int dvic_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - struct device_node *adapter_node; - struct i2c_adapter *adapter; - struct gpio_desc *gpio; - int r; - - gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); - if (IS_ERR(gpio)) { - dev_err(&pdev->dev, "failed to parse HPD gpio\n"); - return PTR_ERR(gpio); - } - - ddata->hpd_gpio = gpio; - - mutex_init(&ddata->hpd_lock); - - if (ddata->hpd_gpio) { - r = devm_request_threaded_irq(&pdev->dev, - gpiod_to_irq(ddata->hpd_gpio), NULL, dvic_hpd_isr, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, - "DVI HPD", ddata); - if (r) - return r; - } - - adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); - if (adapter_node) { - adapter = of_get_i2c_adapter_by_node(adapter_node); - of_node_put(adapter_node); - if (adapter == NULL) { - dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); - return -EPROBE_DEFER; - } - - ddata->i2c_adapter = adapter; - } - - return 0; -} - -static int dvic_probe(struct platform_device *pdev) -{ - struct panel_drv_data *ddata; - struct omap_dss_device *dssdev; - int r; - - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; - - platform_set_drvdata(pdev, ddata); - - r = dvic_probe_of(pdev); - if (r) - return r; - - dssdev = &ddata->dssdev; - dssdev->ops = &dvic_ops; - dssdev->dev = &pdev->dev; - dssdev->type = OMAP_DISPLAY_TYPE_DVI; - dssdev->owner = THIS_MODULE; - dssdev->of_ports = BIT(0); - - if (ddata->hpd_gpio) - dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT - | OMAP_DSS_DEVICE_OP_HPD; - if (ddata->i2c_adapter) - dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT - | OMAP_DSS_DEVICE_OP_EDID; - - omapdss_display_init(dssdev); - omapdss_device_register(dssdev); - - return 0; -} - -static int __exit dvic_remove(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; - - omapdss_device_unregister(&ddata->dssdev); - - dvic_disable(dssdev); - - i2c_put_adapter(ddata->i2c_adapter); - - mutex_destroy(&ddata->hpd_lock); - - return 0; -} - -static const struct of_device_id dvic_of_match[] = { - { .compatible = "omapdss,dvi-connector", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, dvic_of_match); - -static struct platform_driver dvi_connector_driver = { - .probe = dvic_probe, - .remove = __exit_p(dvic_remove), - .driver = { - .name = "connector-dvi", - .of_match_table = dvic_of_match, - .suppress_bind_attrs = true, - }, -}; - -module_platform_driver(dvi_connector_driver); - -MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); -MODULE_DESCRIPTION("Generic DVI Connector driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index e602fa4a50a4..68d6f6e44b03 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c @@ -41,44 +41,6 @@ static void hdmic_disconnect(struct omap_dss_device *src, { } -static int hdmic_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - dev_dbg(ddata->dev, "enable\n"); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return r; -} - -static void hdmic_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - dev_dbg(ddata->dev, "disable\n"); - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - static bool hdmic_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -113,9 +75,6 @@ static const struct omap_dss_device_ops hdmic_ops = { .connect = hdmic_connect, .disconnect = hdmic_disconnect, - .enable = hdmic_enable, - .disable = hdmic_disable, - .detect = hdmic_detect, .register_hpd_cb = hdmic_register_hpd_cb, .unregister_hpd_cb = hdmic_unregister_hpd_cb, @@ -181,6 +140,7 @@ static int hdmic_probe(struct platform_device *pdev) dssdev->ops = &hdmic_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); dssdev->ops_flags = ddata->hpd_gpio @@ -196,12 +156,9 @@ static int hdmic_probe(struct platform_device *pdev) static int __exit hdmic_remove(struct platform_device *pdev) { struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; omapdss_device_unregister(&ddata->dssdev); - hdmic_disable(dssdev); - return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 4fefd80f53bb..29a5a130ebd1 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c @@ -41,48 +41,20 @@ static void opa362_disconnect(struct omap_dss_device *src, omapdss_device_disconnect(dst, dst->next); } -static int opa362_enable(struct omap_dss_device *dssdev) +static void opa362_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - dev_dbg(dssdev->dev, "enable\n"); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void opa362_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - dev_dbg(dssdev->dev, "disable\n"); - - if (!omapdss_device_is_enabled(dssdev)) - return; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } static const struct omap_dss_device_ops opa362_ops = { @@ -116,7 +88,6 @@ static int opa362_probe(struct platform_device *pdev) dssdev->ops = &opa362_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_VENC; - dssdev->output_type = OMAP_DISPLAY_TYPE_VENC; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); @@ -141,13 +112,7 @@ static int __exit opa362_remove(struct platform_device *pdev) omapdss_device_put(dssdev->next); omapdss_device_unregister(&ddata->dssdev); - WARN_ON(omapdss_device_is_enabled(dssdev)); - if (omapdss_device_is_enabled(dssdev)) - opa362_disable(dssdev); - - WARN_ON(omapdss_device_is_connected(dssdev)); - if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(NULL, dssdev); + opa362_disable(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c deleted file mode 100644 index f1a748353279..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * TFP410 DPI-to-DVI encoder driver - * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -#include <linux/gpio/consumer.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> - -#include "../dss/omapdss.h" - -struct panel_drv_data { - struct omap_dss_device dssdev; - - struct gpio_desc *pd_gpio; -}; - -#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) - -static int tfp410_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - return omapdss_device_connect(dst->dss, dst, dst->next); -} - -static void tfp410_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - omapdss_device_disconnect(dst, dst->next); -} - -static int tfp410_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - if (ddata->pd_gpio) - gpiod_set_value_cansleep(ddata->pd_gpio, 0); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void tfp410_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - if (ddata->pd_gpio) - gpiod_set_value_cansleep(ddata->pd_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static const struct omap_dss_device_ops tfp410_ops = { - .connect = tfp410_connect, - .disconnect = tfp410_disconnect, - .enable = tfp410_enable, - .disable = tfp410_disable, -}; - -static int tfp410_probe(struct platform_device *pdev) -{ - struct panel_drv_data *ddata; - struct omap_dss_device *dssdev; - struct gpio_desc *gpio; - - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; - - platform_set_drvdata(pdev, ddata); - - /* Powerdown GPIO */ - gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH); - if (IS_ERR(gpio)) { - dev_err(&pdev->dev, "failed to parse powerdown gpio\n"); - return PTR_ERR(gpio); - } - - ddata->pd_gpio = gpio; - - dssdev = &ddata->dssdev; - dssdev->ops = &tfp410_ops; - dssdev->dev = &pdev->dev; - dssdev->type = OMAP_DISPLAY_TYPE_DPI; - dssdev->output_type = OMAP_DISPLAY_TYPE_DVI; - dssdev->owner = THIS_MODULE; - dssdev->of_ports = BIT(1) | BIT(0); - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; - - dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1); - if (IS_ERR(dssdev->next)) { - if (PTR_ERR(dssdev->next) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to find video sink\n"); - return PTR_ERR(dssdev->next); - } - - omapdss_device_register(dssdev); - - return 0; -} - -static int __exit tfp410_remove(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; - - if (dssdev->next) - omapdss_device_put(dssdev->next); - omapdss_device_unregister(&ddata->dssdev); - - WARN_ON(omapdss_device_is_enabled(dssdev)); - if (omapdss_device_is_enabled(dssdev)) - tfp410_disable(dssdev); - - WARN_ON(omapdss_device_is_connected(dssdev)); - if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(NULL, dssdev); - - return 0; -} - -static const struct of_device_id tfp410_of_match[] = { - { .compatible = "omapdss,ti,tfp410", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, tfp410_of_match); - -static struct platform_driver tfp410_driver = { - .probe = tfp410_probe, - .remove = __exit_p(tfp410_remove), - .driver = { - .name = "tfp410", - .of_match_table = tfp410_of_match, - .suppress_bind_attrs = true, - }, -}; - -module_platform_driver(tfp410_driver); - -MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); -MODULE_DESCRIPTION("TFP410 DPI to DVI encoder driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c index 94de55fd8884..bc03752d2762 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c @@ -62,35 +62,6 @@ static void tpd_disconnect(struct omap_dss_device *src, omapdss_device_disconnect(dst, dst->next); } -static int tpd_enable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - int r; - - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return r; -} - -static void tpd_disable(struct omap_dss_device *dssdev) -{ - struct omap_dss_device *src = dssdev->src; - - if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) - return; - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - static bool tpd_detect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); @@ -124,8 +95,6 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev) static const struct omap_dss_device_ops tpd_ops = { .connect = tpd_connect, .disconnect = tpd_disconnect, - .enable = tpd_enable, - .disable = tpd_disable, .detect = tpd_detect, .register_hpd_cb = tpd_register_hpd_cb, .unregister_hpd_cb = tpd_unregister_hpd_cb, @@ -198,7 +167,6 @@ static int tpd_probe(struct platform_device *pdev) dssdev->ops = &tpd_ops; dssdev->dev = &pdev->dev; dssdev->type = OMAP_DISPLAY_TYPE_HDMI; - dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(1) | BIT(0); dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT @@ -225,14 +193,6 @@ static int __exit tpd_remove(struct platform_device *pdev) omapdss_device_put(dssdev->next); omapdss_device_unregister(&ddata->dssdev); - WARN_ON(omapdss_device_is_enabled(dssdev)); - if (omapdss_device_is_enabled(dssdev)) - tpd_disable(dssdev); - - WARN_ON(omapdss_device_is_connected(dssdev)); - if (omapdss_device_is_connected(dssdev)) - omapdss_device_disconnect(NULL, dssdev); - return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c deleted file mode 100644 index 465120809eb3..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Generic MIPI DPI Panel Driver - * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - */ - -#include <linux/gpio/consumer.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/of.h> -#include <linux/regulator/consumer.h> -#include <linux/backlight.h> - -#include <video/of_display_timing.h> - -#include "../dss/omapdss.h" - -struct panel_drv_data { - struct omap_dss_device dssdev; - - struct videomode vm; - - struct backlight_device *backlight; - - struct gpio_desc *enable_gpio; - struct regulator *vcc_supply; -}; - -#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) - -static int panel_dpi_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - return 0; -} - -static void panel_dpi_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ -} - -static int panel_dpi_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - - r = regulator_enable(ddata->vcc_supply); - if (r) { - src->ops->disable(src); - return r; - } - - gpiod_set_value_cansleep(ddata->enable_gpio, 1); - backlight_enable(ddata->backlight); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; -} - -static void panel_dpi_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - backlight_disable(ddata->backlight); - - gpiod_set_value_cansleep(ddata->enable_gpio, 0); - regulator_disable(ddata->vcc_supply); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; -} - -static void panel_dpi_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *vm = ddata->vm; -} - -static const struct omap_dss_device_ops panel_dpi_ops = { - .connect = panel_dpi_connect, - .disconnect = panel_dpi_disconnect, - - .enable = panel_dpi_enable, - .disable = panel_dpi_disable, - - .get_timings = panel_dpi_get_timings, -}; - -static int panel_dpi_probe_of(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct device_node *node = pdev->dev.of_node; - int r; - struct display_timing timing; - struct gpio_desc *gpio; - - gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(gpio)) - return PTR_ERR(gpio); - - ddata->enable_gpio = gpio; - - /* - * Many different panels are supported by this driver and there are - * probably very different needs for their reset pins in regards to - * timing and order relative to the enable gpio. So for now it's just - * ensured that the reset line isn't active. - */ - gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(gpio)) - return PTR_ERR(gpio); - - ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc"); - if (IS_ERR(ddata->vcc_supply)) - return PTR_ERR(ddata->vcc_supply); - - ddata->backlight = devm_of_find_backlight(&pdev->dev); - - if (IS_ERR(ddata->backlight)) - return PTR_ERR(ddata->backlight); - - r = of_get_display_timing(node, "panel-timing", &timing); - if (r) { - dev_err(&pdev->dev, "failed to get video timing\n"); - return r; - } - - videomode_from_timing(&timing, &ddata->vm); - - return 0; -} - -static int panel_dpi_probe(struct platform_device *pdev) -{ - struct panel_drv_data *ddata; - struct omap_dss_device *dssdev; - int r; - - ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); - if (ddata == NULL) - return -ENOMEM; - - platform_set_drvdata(pdev, ddata); - - r = panel_dpi_probe_of(pdev); - if (r) - return r; - - dssdev = &ddata->dssdev; - dssdev->dev = &pdev->dev; - dssdev->ops = &panel_dpi_ops; - dssdev->type = OMAP_DISPLAY_TYPE_DPI; - dssdev->owner = THIS_MODULE; - dssdev->of_ports = BIT(0); - drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags); - - omapdss_display_init(dssdev); - omapdss_device_register(dssdev); - - return 0; -} - -static int __exit panel_dpi_remove(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; - - omapdss_device_unregister(dssdev); - - panel_dpi_disable(dssdev); - - return 0; -} - -static const struct of_device_id panel_dpi_of_match[] = { - { .compatible = "omapdss,panel-dpi", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, panel_dpi_of_match); - -static struct platform_driver panel_dpi_driver = { - .probe = panel_dpi_probe, - .remove = __exit_p(panel_dpi_remove), - .driver = { - .name = "panel-dpi", - .of_match_table = panel_dpi_of_match, - .suppress_bind_attrs = true, - }, -}; - -module_platform_driver(panel_dpi_driver); - -MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); -MODULE_DESCRIPTION("Generic MIPI DPI Panel Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 29692a5217c5..741a5e324767 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -24,6 +24,8 @@ #include <linux/of_device.h> #include <linux/regulator/consumer.h> +#include <drm/drm_connector.h> + #include <video/mipi_display.h> #include <video/of_display_timing.h> @@ -41,6 +43,7 @@ struct panel_drv_data { struct omap_dss_device dssdev; + struct omap_dss_device *src; struct videomode vm; @@ -141,7 +144,7 @@ static void hw_guard_wait(struct panel_drv_data *ddata) static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; u8 buf[1]; @@ -157,14 +160,14 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1); } static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 buf[2] = { dcs_cmd, param }; return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2); @@ -173,7 +176,7 @@ static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) static int dsicm_sleep_in(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 cmd; int r; @@ -228,7 +231,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) static int dsicm_set_update_window(struct panel_drv_data *ddata, u16 x, u16 y, u16 w, u16 h) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; u16 x1 = x; u16 x2 = x + w - 1; @@ -275,7 +278,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata) static int dsicm_enter_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; if (ddata->ulps_enabled) @@ -309,18 +312,13 @@ err: static int dsicm_exit_ulps(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; if (!ddata->ulps_enabled) return 0; - r = src->ops->enable(src); - if (r) { - dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); - goto err1; - } - + src->ops->enable(src); src->ops->dsi.enable_hs(src, ddata->channel, true); r = _dsicm_enable_te(ddata, true); @@ -347,7 +345,7 @@ err2: enable_irq(gpiod_to_irq(ddata->ext_te_gpio)); ddata->ulps_enabled = false; } -err1: + dsicm_queue_ulps_work(ddata); return r; @@ -366,7 +364,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata) static int dsicm_bl_update_status(struct backlight_device *dev) { struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r = 0; int level; @@ -414,7 +412,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 errors = 0; int r; @@ -446,7 +444,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 id1, id2, id3; int r; @@ -478,7 +476,7 @@ static ssize_t dsicm_store_ulps(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; unsigned long t; int r; @@ -528,7 +526,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev, { struct platform_device *pdev = to_platform_device(dev); struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; unsigned long t; int r; @@ -603,7 +601,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata) static int dsicm_power_on(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; u8 id1, id2, id3; int r; struct omap_dss_dsi_config dsi_config = { @@ -649,11 +647,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata) goto err_vddi; } - r = src->ops->enable(src); - if (r) { - dev_err(&ddata->pdev->dev, "failed to enable DSI\n"); - goto err_vddi; - } + src->ops->enable(src); dsicm_hw_reset(ddata); @@ -722,7 +716,7 @@ err_vpnl: static void dsicm_power_off(struct panel_drv_data *ddata) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; src->ops->dsi.disable_video_output(src, ddata->channel); @@ -776,6 +770,7 @@ static int dsicm_connect(struct omap_dss_device *src, return r; } + ddata->src = src; return 0; } @@ -785,28 +780,17 @@ static void dsicm_disconnect(struct omap_dss_device *src, struct panel_drv_data *ddata = to_panel_data(dst); src->ops->dsi.release_vc(src, ddata->channel); + ddata->src = NULL; } -static int dsicm_enable(struct omap_dss_device *dssdev) +static void dsicm_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; - dev_dbg(&ddata->pdev->dev, "enable\n"); - mutex_lock(&ddata->lock); - if (!omapdss_device_is_connected(dssdev)) { - r = -ENODEV; - goto err; - } - - if (omapdss_device_is_enabled(dssdev)) { - r = 0; - goto err; - } - src->ops->dsi.bus_lock(src); r = dsicm_power_on(ddata); @@ -816,27 +800,22 @@ static int dsicm_enable(struct omap_dss_device *dssdev) if (r) goto err; - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - mutex_unlock(&ddata->lock); dsicm_bl_power(ddata, true); - return 0; + return; err: - dev_dbg(&ddata->pdev->dev, "enable failed\n"); + dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r); mutex_unlock(&ddata->lock); - return r; } static void dsicm_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; - dev_dbg(&ddata->pdev->dev, "disable\n"); - dsicm_bl_power(ddata, false); mutex_lock(&ddata->lock); @@ -845,23 +824,19 @@ static void dsicm_disable(struct omap_dss_device *dssdev) src->ops->dsi.bus_lock(src); - if (omapdss_device_is_enabled(dssdev)) { - r = dsicm_wake_up(ddata); - if (!r) - dsicm_power_off(ddata); - } + r = dsicm_wake_up(ddata); + if (!r) + dsicm_power_off(ddata); src->ops->dsi.bus_unlock(src); - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; - mutex_unlock(&ddata->lock); } static void dsicm_framedone_cb(int err, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); src->ops->dsi.bus_unlock(src); @@ -870,7 +845,7 @@ static void dsicm_framedone_cb(int err, void *data) static irqreturn_t dsicm_te_isr(int irq, void *data) { struct panel_drv_data *ddata = data; - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int old; int r; @@ -896,7 +871,7 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work) { struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, te_timeout_work.work); - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); @@ -908,7 +883,7 @@ static int dsicm_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); @@ -954,7 +929,7 @@ err: static int dsicm_sync(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; dev_dbg(&ddata->pdev->dev, "sync\n"); @@ -970,7 +945,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev) static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) { - struct omap_dss_device *src = ddata->dssdev.src; + struct omap_dss_device *src = ddata->src; int r; if (enable) @@ -990,7 +965,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; mutex_lock(&ddata->lock); @@ -1041,7 +1016,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; int r; int first = 1; int plen; @@ -1123,7 +1098,7 @@ static void dsicm_ulps_work(struct work_struct *work) struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, ulps_work.work); struct omap_dss_device *dssdev = &ddata->dssdev; - struct omap_dss_device *src = dssdev->src; + struct omap_dss_device *src = ddata->src; mutex_lock(&ddata->lock); @@ -1140,29 +1115,32 @@ static void dsicm_ulps_work(struct work_struct *work) mutex_unlock(&ddata->lock); } -static void dsicm_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int dsicm_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + connector->display_info.width_mm = ddata->width_mm; + connector->display_info.height_mm = ddata->height_mm; + + return omapdss_display_get_modes(connector, &ddata->vm); } static int dsicm_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { struct panel_drv_data *ddata = to_panel_data(dssdev); int ret = 0; - if (vm->hactive != ddata->vm.hactive) + if (mode->hdisplay != ddata->vm.hactive) ret = -EINVAL; - if (vm->vactive != ddata->vm.vactive) + if (mode->vdisplay != ddata->vm.vactive) ret = -EINVAL; if (ret) { dev_warn(dssdev->dev, "wrong resolution: %d x %d", - vm->hactive, vm->vactive); + mode->hdisplay, mode->vdisplay); dev_warn(dssdev->dev, "panel resolution: %d x %d", ddata->vm.hactive, ddata->vm.vactive); } @@ -1170,15 +1148,6 @@ static int dsicm_check_timings(struct omap_dss_device *dssdev, return ret; } -static void dsicm_get_size(struct omap_dss_device *dssdev, - unsigned int *width, unsigned int *height) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - *width = ddata->width_mm; - *height = ddata->height_mm; -} - static const struct omap_dss_device_ops dsicm_ops = { .connect = dsicm_connect, .disconnect = dsicm_disconnect, @@ -1186,7 +1155,7 @@ static const struct omap_dss_device_ops dsicm_ops = { .enable = dsicm_enable, .disable = dsicm_disable, - .get_timings = dsicm_get_timings, + .get_modes = dsicm_get_modes, .check_timings = dsicm_check_timings, }; @@ -1194,8 +1163,6 @@ static const struct omap_dss_driver dsicm_dss_driver = { .update = dsicm_update, .sync = dsicm_sync, - .get_size = dsicm_get_size, - .enable_te = dsicm_enable_te, .get_te = dsicm_get_te, @@ -1305,8 +1272,10 @@ static int dsicm_probe(struct platform_device *pdev) dssdev->ops = &dsicm_ops; dssdev->driver = &dsicm_dss_driver; dssdev->type = OMAP_DISPLAY_TYPE_DSI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; @@ -1385,8 +1354,9 @@ static int __exit dsicm_remove(struct platform_device *pdev) omapdss_device_unregister(dssdev); - dsicm_disable(dssdev); - omapdss_device_disconnect(dssdev->src, dssdev); + if (omapdss_device_is_enabled(dssdev)) + dsicm_disable(dssdev); + omapdss_device_disconnect(ddata->src, dssdev); sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index f6ef8ff964dd..99f2350d462c 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c @@ -123,52 +123,28 @@ static void lb035q02_disconnect(struct omap_dss_device *src, { } -static int lb035q02_enable(struct omap_dss_device *dssdev) +static void lb035q02_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void lb035q02_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; if (ddata->enable_gpio) gpiod_set_value_cansleep(ddata->enable_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void lb035q02_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int lb035q02_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops lb035q02_ops = { @@ -178,7 +154,7 @@ static const struct omap_dss_device_ops lb035q02_ops = { .enable = lb035q02_enable, .disable = lb035q02_disable, - .get_timings = lb035q02_get_timings, + .get_modes = lb035q02_get_modes, }; static int lb035q02_probe_of(struct spi_device *spi) @@ -221,16 +197,19 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &lb035q02_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * DE is active LOW * DATA needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index f445de6369f7..c2409815a204 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c @@ -118,50 +118,26 @@ static void nec_8048_disconnect(struct omap_dss_device *src, { } -static int nec_8048_enable(struct omap_dss_device *dssdev) +static void nec_8048_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; gpiod_set_value_cansleep(ddata->res_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void nec_8048_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; gpiod_set_value_cansleep(ddata->res_gpio, 0); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void nec_8048_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int nec_8048_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops nec_8048_ops = { @@ -171,7 +147,7 @@ static const struct omap_dss_device_ops nec_8048_ops = { .enable = nec_8048_enable, .disable = nec_8048_disable, - .get_timings = nec_8048_get_timings, + .get_modes = nec_8048_get_modes, }; static int nec_8048_probe(struct spi_device *spi) @@ -216,10 +192,13 @@ static int nec_8048_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &nec_8048_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 64b1369cb274..9c545de430f6 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c @@ -62,29 +62,22 @@ static void sharp_ls_disconnect(struct omap_dss_device *src, { } -static int sharp_ls_enable(struct omap_dss_device *dssdev) +static void sharp_ls_pre_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; int r; - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - if (ddata->vcc) { r = regulator_enable(ddata->vcc); - if (r != 0) - return r; + if (r) + dev_err(dssdev->dev, "%s: failed to enable regulator\n", + __func__); } +} - r = src->ops->enable(src); - if (r) { - regulator_disable(ddata->vcc); - return r; - } +static void sharp_ls_enable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); /* wait couple of vsyncs until enabling the LCD */ msleep(50); @@ -94,19 +87,11 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev) if (ddata->ini_gpio) gpiod_set_value_cansleep(ddata->ini_gpio, 1); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void sharp_ls_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; if (ddata->ini_gpio) gpiod_set_value_cansleep(ddata->ini_gpio, 0); @@ -115,33 +100,35 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev) gpiod_set_value_cansleep(ddata->resb_gpio, 0); /* wait at least 5 vsyncs after disabling the LCD */ - msleep(100); +} - src->ops->disable(src); +static void sharp_ls_post_disable(struct omap_dss_device *dssdev) +{ + struct panel_drv_data *ddata = to_panel_data(dssdev); if (ddata->vcc) regulator_disable(ddata->vcc); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void sharp_ls_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int sharp_ls_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops sharp_ls_ops = { .connect = sharp_ls_connect, .disconnect = sharp_ls_disconnect, + .pre_enable = sharp_ls_pre_enable, .enable = sharp_ls_enable, .disable = sharp_ls_disable, + .post_disable = sharp_ls_post_disable, - .get_timings = sharp_ls_get_timings, + .get_modes = sharp_ls_get_modes, }; static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, @@ -220,15 +207,18 @@ static int sharp_ls_probe(struct platform_device *pdev) dssdev->dev = &pdev->dev; dssdev->ops = &sharp_ls_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * DATA needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); @@ -243,7 +233,10 @@ static int __exit sharp_ls_remove(struct platform_device *pdev) omapdss_device_unregister(dssdev); - sharp_ls_disable(dssdev); + if (omapdss_device_is_enabled(dssdev)) { + sharp_ls_disable(dssdev); + sharp_ls_post_disable(dssdev); + } return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index e04663856b31..2038def14ba1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c @@ -516,17 +516,9 @@ static void acx565akm_disconnect(struct omap_dss_device *src, static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; dev_dbg(&ddata->spi->dev, "%s\n", __func__); - r = src->ops->enable(src); - if (r) { - pr_err("%s sdi enable failed\n", __func__); - return r; - } - /*FIXME tweak me */ msleep(50); @@ -562,7 +554,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev) static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; dev_dbg(dssdev->dev, "%s\n", __func__); @@ -585,56 +576,32 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev) /* FIXME need to tweak this delay */ msleep(100); - - src->ops->disable(src); } -static int acx565akm_enable(struct omap_dss_device *dssdev) +static void acx565akm_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - int r; - - dev_dbg(dssdev->dev, "%s\n", __func__); - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; mutex_lock(&ddata->mutex); - r = acx565akm_panel_power_on(dssdev); + acx565akm_panel_power_on(dssdev); mutex_unlock(&ddata->mutex); - if (r) - return r; - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void acx565akm_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - dev_dbg(dssdev->dev, "%s\n", __func__); - - if (!omapdss_device_is_enabled(dssdev)) - return; - mutex_lock(&ddata->mutex); acx565akm_panel_power_off(dssdev); mutex_unlock(&ddata->mutex); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void acx565akm_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int acx565akm_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops acx565akm_ops = { @@ -644,7 +611,7 @@ static const struct omap_dss_device_ops acx565akm_ops = { .enable = acx565akm_enable, .disable = acx565akm_disable, - .get_timings = acx565akm_get_timings, + .get_modes = acx565akm_get_modes, }; static int acx565akm_probe(struct spi_device *spi) @@ -739,10 +706,13 @@ static int acx565akm_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &acx565akm_ops; dssdev->type = OMAP_DISPLAY_TYPE_SDI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE - | DRM_BUS_FLAG_PIXDATA_POSEDGE; + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); @@ -766,7 +736,8 @@ static int acx565akm_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); - acx565akm_disable(dssdev); + if (omapdss_device_is_enabled(dssdev)) + acx565akm_disable(dssdev); return 0; } diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c index 7ddc8c574a61..2ad161e33106 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c @@ -35,6 +35,8 @@ struct panel_drv_data { struct videomode vm; + struct backlight_device *backlight; + struct spi_device *spi_dev; }; @@ -169,24 +171,12 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *src, { } -static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) +static void td028ttec1_panel_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - int r; - - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; + int r = 0; - dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n", - dssdev->state); + dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state); /* three times command zero */ r |= jbt_ret_write_0(ddata, 0x00); @@ -197,8 +187,8 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) usleep_range(1000, 2000); if (r) { - dev_warn(dssdev->dev, "transfer error\n"); - goto transfer_err; + dev_warn(dssdev->dev, "%s: transfer error\n", __func__); + return; } /* deep standby out */ @@ -268,20 +258,17 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev) r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON); - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - -transfer_err: + if (r) + dev_err(dssdev->dev, "%s: write error\n", __func__); - return r ? -EIO : 0; + backlight_enable(ddata->backlight); } static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - if (!omapdss_device_is_enabled(dssdev)) - return; + backlight_disable(ddata->backlight); dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n"); @@ -289,18 +276,14 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev) jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002); jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN); jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00); - - src->ops->disable(src); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops td028ttec1_ops = { @@ -310,7 +293,7 @@ static const struct omap_dss_device_ops td028ttec1_ops = { .enable = td028ttec1_panel_enable, .disable = td028ttec1_panel_disable, - .get_timings = td028ttec1_panel_get_timings, + .get_modes = td028ttec1_panel_get_modes, }; static int td028ttec1_panel_probe(struct spi_device *spi) @@ -334,6 +317,10 @@ static int td028ttec1_panel_probe(struct spi_device *spi) if (ddata == NULL) return -ENOMEM; + ddata->backlight = devm_of_find_backlight(&spi->dev); + if (IS_ERR(ddata->backlight)) + return PTR_ERR(ddata->backlight); + dev_set_drvdata(&spi->dev, ddata); ddata->spi_dev = spi; @@ -344,15 +331,18 @@ static int td028ttec1_panel_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &td028ttec1_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * SYNC needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_NEGEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 8440fcb744d9..0b692fc7e5ea 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c @@ -320,22 +320,11 @@ static void tpo_td043_disconnect(struct omap_dss_device *src, { } -static int tpo_td043_enable(struct omap_dss_device *dssdev) +static void tpo_td043_enable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; int r; - if (!omapdss_device_is_connected(dssdev)) - return -ENODEV; - - if (omapdss_device_is_enabled(dssdev)) - return 0; - - r = src->ops->enable(src); - if (r) - return r; - /* * If we are resuming from system suspend, SPI clocks might not be * enabled yet, so we'll program the LCD from SPI PM resume callback. @@ -343,38 +332,27 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev) if (!ddata->spi_suspended) { r = tpo_td043_power_on(ddata); if (r) { - src->ops->disable(src); - return r; + dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n", + __func__, r); + return; } } - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - - return 0; } static void tpo_td043_disable(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = dssdev->src; - - if (!omapdss_device_is_enabled(dssdev)) - return; - - src->ops->disable(src); if (!ddata->spi_suspended) tpo_td043_power_off(ddata); - - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } -static void tpo_td043_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int tpo_td043_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { struct panel_drv_data *ddata = to_panel_data(dssdev); - *vm = ddata->vm; + return omapdss_display_get_modes(connector, &ddata->vm); } static const struct omap_dss_device_ops tpo_td043_ops = { @@ -384,7 +362,7 @@ static const struct omap_dss_device_ops tpo_td043_ops = { .enable = tpo_td043_enable, .disable = tpo_td043_disable, - .get_timings = tpo_td043_get_timings, + .get_modes = tpo_td043_get_modes, }; static int tpo_td043_probe(struct spi_device *spi) @@ -442,15 +420,18 @@ static int tpo_td043_probe(struct spi_device *spi) dssdev->dev = &spi->dev; dssdev->ops = &tpo_td043_ops; dssdev->type = OMAP_DISPLAY_TYPE_DPI; + dssdev->display = true; dssdev->owner = THIS_MODULE; dssdev->of_ports = BIT(0); + dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; /* * Note: According to the panel documentation: * SYNC needs to be driven on the FALLING edge */ - dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE - | DRM_BUS_FLAG_PIXDATA_NEGEDGE; + dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE + | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; omapdss_display_init(dssdev); omapdss_device_register(dssdev); @@ -467,7 +448,8 @@ static int tpo_td043_remove(struct spi_device *spi) omapdss_device_unregister(dssdev); - tpo_td043_disable(dssdev); + if (omapdss_device_is_enabled(dssdev)) + tpo_td043_disable(dssdev); sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 472f56e3de70..f8dad99013e8 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -19,6 +19,7 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_graph.h> +#include <linux/platform_device.h> #include "dss.h" #include "omapdss.h" @@ -112,13 +113,12 @@ void omapdss_device_put(struct omap_dss_device *dssdev) } EXPORT_SYMBOL(omapdss_device_put); -struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, - unsigned int port) +struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node) { struct omap_dss_device *dssdev; list_for_each_entry(dssdev, &omapdss_devices_list, list) { - if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port)) + if (dssdev->dev->of_node == node) return omapdss_device_get(dssdev); } @@ -126,13 +126,10 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, } /* - * Search for the next device starting at @from. The type argument specfies - * which device types to consider when searching. Searching for multiple types - * is supported by and'ing their type flags. Release the reference to the @from - * device, and acquire a reference to the returned device if found. + * Search for the next output device starting at @from. Release the reference to + * the @from device, and acquire a reference to the returned device if found. */ -struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, - enum omap_dss_device_type type) +struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from) { struct omap_dss_device *dssdev; struct list_head *list; @@ -160,15 +157,8 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, goto done; } - /* - * Accept display entities if the display type is requested, - * and output entities if the output type is requested. - */ - if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) && - !dssdev->output_type) - goto done; - if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id && - dssdev->next) + if (dssdev->id && + (dssdev->next || dssdev->bridge || dssdev->panel)) goto done; } @@ -183,7 +173,12 @@ done: mutex_unlock(&omapdss_devices_lock); return dssdev; } -EXPORT_SYMBOL(omapdss_device_get_next); +EXPORT_SYMBOL(omapdss_device_next_output); + +static bool omapdss_device_is_connected(struct omap_dss_device *dssdev) +{ + return dssdev->dss; +} int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *src, @@ -191,7 +186,19 @@ int omapdss_device_connect(struct dss_device *dss, { int ret; - dev_dbg(dst->dev, "connect\n"); + dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n", + src ? dev_name(src->dev) : "NULL", + dst ? dev_name(dst->dev) : "NULL"); + + if (!dst) { + /* + * The destination is NULL when the source is connected to a + * bridge or panel instead of a DSS device. Stop here, we will + * attach the bridge or panel later when we will have a DRM + * encoder. + */ + return src && (src->bridge || src->panel) ? 0 : -EINVAL; + } if (omapdss_device_is_connected(dst)) return -EBUSY; @@ -204,12 +211,6 @@ int omapdss_device_connect(struct dss_device *dss, return ret; } - if (src) { - WARN_ON(src->dst); - dst->src = src; - src->dst = dst; - } - return 0; } EXPORT_SYMBOL_GPL(omapdss_device_connect); @@ -217,19 +218,20 @@ EXPORT_SYMBOL_GPL(omapdss_device_connect); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dev_dbg(dst->dev, "disconnect\n"); + struct dss_device *dss = src ? src->dss : dst->dss; - if (!dst->id && !omapdss_device_is_connected(dst)) { - WARN_ON(dst->output_type); + dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n", + src ? dev_name(src->dev) : "NULL", + dst ? dev_name(dst->dev) : "NULL"); + + if (!dst) { + WARN_ON(!src->bridge && !src->panel); return; } - if (src) { - if (WARN_ON(dst != src->dst)) - return; - - dst->src = NULL; - src->dst = NULL; + if (!dst->id && !omapdss_device_is_connected(dst)) { + WARN_ON(!dst->display); + return; } WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED); @@ -239,6 +241,58 @@ void omapdss_device_disconnect(struct omap_dss_device *src, } EXPORT_SYMBOL_GPL(omapdss_device_disconnect); +void omapdss_device_pre_enable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + omapdss_device_pre_enable(dssdev->next); + + if (dssdev->ops->pre_enable) + dssdev->ops->pre_enable(dssdev); +} +EXPORT_SYMBOL_GPL(omapdss_device_pre_enable); + +void omapdss_device_enable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + if (dssdev->ops->enable) + dssdev->ops->enable(dssdev); + + omapdss_device_enable(dssdev->next); + + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; +} +EXPORT_SYMBOL_GPL(omapdss_device_enable); + +void omapdss_device_disable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + omapdss_device_disable(dssdev->next); + + if (dssdev->ops->disable) + dssdev->ops->disable(dssdev); +} +EXPORT_SYMBOL_GPL(omapdss_device_disable); + +void omapdss_device_post_disable(struct omap_dss_device *dssdev) +{ + if (!dssdev) + return; + + if (dssdev->ops->post_disable) + dssdev->ops->post_disable(dssdev); + + omapdss_device_post_disable(dssdev->next); + + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; +} +EXPORT_SYMBOL_GPL(omapdss_device_post_disable); + /* ----------------------------------------------------------------------------- * Components Handling */ @@ -249,6 +303,7 @@ struct omapdss_comp_node { struct list_head list; struct device_node *node; bool dss_core_component; + const char *compat; }; static bool omapdss_list_contains(const struct device_node *node) @@ -266,13 +321,20 @@ static bool omapdss_list_contains(const struct device_node *node) static void omapdss_walk_device(struct device *dev, struct device_node *node, bool dss_core) { + struct omapdss_comp_node *comp; struct device_node *n; - struct omapdss_comp_node *comp = devm_kzalloc(dev, sizeof(*comp), - GFP_KERNEL); + const char *compat; + int ret; + ret = of_property_read_string(node, "compatible", &compat); + if (ret < 0) + return; + + comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL); if (comp) { comp->node = node; comp->dss_core_component = dss_core; + comp->compat = compat; list_add(&comp->list, &omapdss_comp_list); } @@ -312,12 +374,8 @@ void omapdss_gather_components(struct device *dev) omapdss_walk_device(dev, dev->of_node, true); - for_each_available_child_of_node(dev->of_node, child) { - if (!of_find_property(child, "compatible", NULL)) - continue; - + for_each_available_child_of_node(dev->of_node, child) omapdss_walk_device(dev, child, true); - } } EXPORT_SYMBOL(omapdss_gather_components); @@ -325,6 +383,8 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp) { if (comp->dss_core_component) return true; + if (!strstarts(comp->compat, "omapdss,")) + return true; if (omapdss_device_is_registered(comp->node)) return true; diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c index 34b2a4ef63a4..e93f61a567a8 100644 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ b/drivers/gpu/drm/omapdrm/dss/display.c @@ -23,6 +23,9 @@ #include <linux/kernel.h> #include <linux/of.h> +#include <drm/drm_connector.h> +#include <drm/drm_modes.h> + #include "omapdss.h" static int disp_num_counter; @@ -39,8 +42,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev) if (id < 0) id = disp_num_counter++; - dssdev->alias_id = id; - /* Use 'label' property for name, if it exists */ of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name); @@ -58,3 +59,22 @@ struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output) return omapdss_device_get(output); } EXPORT_SYMBOL_GPL(omapdss_display_get); + +int omapdss_display_get_modes(struct drm_connector *connector, + const struct videomode *vm) +{ + struct drm_display_mode *mode; + + mode = drm_mode_create(connector->dev); + if (!mode) + return 0; + + drm_display_mode_from_videomode(vm, mode); + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + + return 1; +} +EXPORT_SYMBOL_GPL(omapdss_display_get_modes); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index ca4f3c4c6318..cc78dfa07f04 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -47,8 +47,8 @@ struct dpi_data { struct mutex lock; - struct videomode vm; struct dss_lcd_mgr_config mgr_config; + unsigned long pixelclock; int data_lines; struct omap_dss_device output; @@ -347,16 +347,15 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req, static int dpi_set_mode(struct dpi_data *dpi) { - const struct videomode *vm = &dpi->vm; int lck_div = 0, pck_div = 0; unsigned long fck = 0; int r = 0; if (dpi->pll) r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel, - vm->pixelclock, &fck, &lck_div, &pck_div); + dpi->pixelclock, &fck, &lck_div, &pck_div); else - r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck, + r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck, &lck_div, &pck_div); if (r) return r; @@ -378,7 +377,7 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi) dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config); } -static int dpi_display_enable(struct omap_dss_device *dssdev) +static void dpi_display_enable(struct omap_dss_device *dssdev) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); struct omap_dss_device *out = &dpi->output; @@ -386,12 +385,6 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) mutex_lock(&dpi->lock); - if (!out->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err_no_out_mgr; - } - if (dpi->vdds_dsi_reg) { r = regulator_enable(dpi->vdds_dsi_reg); if (r) @@ -426,7 +419,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev) mutex_unlock(&dpi->lock); - return 0; + return; err_mgr_enable: err_set_mode: @@ -439,9 +432,7 @@ err_get_dispc: if (dpi->vdds_dsi_reg) regulator_disable(dpi->vdds_dsi_reg); err_reg_enable: -err_no_out_mgr: mutex_unlock(&dpi->lock); - return r; } static void dpi_display_disable(struct omap_dss_device *dssdev) @@ -467,7 +458,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev) } static void dpi_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); @@ -475,13 +466,13 @@ static void dpi_set_timings(struct omap_dss_device *dssdev, mutex_lock(&dpi->lock); - dpi->vm = *vm; + dpi->pixelclock = mode->clock * 1000; mutex_unlock(&dpi->lock); } static int dpi_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev); int lck_div, pck_div; @@ -490,20 +481,20 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, struct dpi_clk_calc_ctx ctx; bool ok; - if (vm->hactive % 8 != 0) + if (mode->hdisplay % 8 != 0) return -EINVAL; - if (vm->pixelclock == 0) + if (mode->clock == 0) return -EINVAL; if (dpi->pll) { - ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx); + ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx); if (!ok) return -EINVAL; fck = ctx.pll_cinfo.clkout[ctx.clkout_idx]; } else { - ok = dpi_dss_clk_calc(dpi, vm->pixelclock, &ctx); + ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx); if (!ok) return -EINVAL; @@ -515,7 +506,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev, pck = fck / lck_div / pck_div; - vm->pixelclock = pck; + mode->clock = pck / 1000; return 0; } @@ -596,23 +587,15 @@ static int dpi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { struct dpi_data *dpi = dpi_get_data_from_dssdev(dst); - int r; dpi_init_pll(dpi); - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void dpi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -651,25 +634,15 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->dev = &dpi->pdev->dev; out->id = OMAP_DSS_OUTPUT_DPI; - out->output_type = OMAP_DISPLAY_TYPE_DPI; + out->type = OMAP_DISPLAY_TYPE_DPI; out->dispc_channel = dpi_get_channel(dpi); out->of_ports = BIT(port_num); out->ops = &dpi_ops; out->owner = THIS_MODULE; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -681,9 +654,8 @@ static void dpi_uninit_output_port(struct device_node *port) struct dpi_data *dpi = port->data; struct omap_dss_device *out = &dpi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static const struct soc_device_attribute dpi_soc_devices[] = { diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 64fb788b6647..5202862d89b5 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1342,12 +1342,9 @@ static int dsi_pll_enable(struct dss_pll *pll) */ dsi_enable_scp_clk(dsi); - if (!dsi->vdds_dsi_enabled) { - r = regulator_enable(dsi->vdds_dsi_reg); - if (r) - goto err0; - dsi->vdds_dsi_enabled = true; - } + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err0; /* XXX PLL does not come out of reset without this... */ dispc_pck_free_enable(dsi->dss->dispc, 1); @@ -1372,36 +1369,25 @@ static int dsi_pll_enable(struct dss_pll *pll) return 0; err1: - if (dsi->vdds_dsi_enabled) { - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + regulator_disable(dsi->vdds_dsi_reg); err0: dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); return r; } -static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes) +static void dsi_pll_disable(struct dss_pll *pll) { + struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); + dsi_pll_power(dsi, DSI_PLL_POWER_OFF); - if (disconnect_lanes) { - WARN_ON(!dsi->vdds_dsi_enabled); - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + + regulator_disable(dsi->vdds_dsi_reg); dsi_disable_scp_clk(dsi); dsi_runtime_put(dsi); - DSSDBG("PLL uninit done\n"); -} - -static void dsi_pll_disable(struct dss_pll *pll) -{ - struct dsi_data *dsi = container_of(pll, struct dsi_data, pll); - - dsi_pll_uninit(dsi, true); + DSSDBG("PLL disable done\n"); } static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) @@ -3753,19 +3739,13 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel) { struct dsi_data *dsi = to_dsi_data(dssdev); int bpp = dsi_get_pixel_size(dsi->pix_fmt); - struct omap_dss_device *out = &dsi->output; u8 data_type; u16 word_count; int r; - if (!out->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - return -ENODEV; - } - r = dsi_display_init_dispc(dsi); if (r) - goto err_init_dispc; + return r; if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { switch (dsi->pix_fmt) { @@ -3814,7 +3794,6 @@ err_mgr_enable: } err_pix_fmt: dsi_display_uninit_dispc(dsi); -err_init_dispc: return r; } @@ -4096,11 +4075,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) r = dss_pll_enable(&dsi->pll); if (r) - goto err0; + return r; r = dsi_configure_dsi_clocks(dsi); if (r) - goto err1; + goto err0; dss_select_dsi_clk_source(dsi->dss, dsi->module_id, dsi->module_id == 0 ? @@ -4108,6 +4087,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) DSSDBG("PLL OK\n"); + if (!dsi->vdds_dsi_enabled) { + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err1; + + dsi->vdds_dsi_enabled = true; + } + r = dsi_cio_init(dsi); if (r) goto err2; @@ -4136,10 +4123,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) err3: dsi_cio_uninit(dsi); err2: - dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; err1: - dss_pll_disable(&dsi->pll); + dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); err0: + dss_pll_disable(&dsi->pll); + return r; } @@ -4158,13 +4148,18 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes, dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK); dsi_cio_uninit(dsi); - dsi_pll_uninit(dsi, disconnect_lanes); + dss_pll_disable(&dsi->pll); + + if (disconnect_lanes) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } } -static int dsi_display_enable(struct omap_dss_device *dssdev) +static void dsi_display_enable(struct omap_dss_device *dssdev) { struct dsi_data *dsi = to_dsi_data(dssdev); - int r = 0; + int r; DSSDBG("dsi_display_enable\n"); @@ -4184,14 +4179,13 @@ static int dsi_display_enable(struct omap_dss_device *dssdev) mutex_unlock(&dsi->lock); - return 0; + return; err_init_dsi: dsi_runtime_put(dsi); err_get_dsi: mutex_unlock(&dsi->lock); DSSDBG("dsi_display_enable FAILED\n"); - return r; } static void dsi_display_disable(struct omap_dss_device *dssdev, @@ -4888,21 +4882,12 @@ static int dsi_get_clocks(struct dsi_data *dsi) static int dsi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void dsi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -5138,29 +5123,19 @@ static int dsi_init_output(struct dsi_data *dsi) out->id = dsi->module_id == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; - out->output_type = OMAP_DISPLAY_TYPE_DSI; + out->type = OMAP_DISPLAY_TYPE_DSI; out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; out->dispc_channel = dsi_get_channel(dsi); out->ops = &dsi_ops; out->owner = THIS_MODULE; out->of_ports = BIT(0); - out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE + out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | DRM_BUS_FLAG_DE_HIGH - | DRM_BUS_FLAG_SYNC_NEGEDGE; - - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } + | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -5171,9 +5146,8 @@ static void dsi_uninit_output(struct dsi_data *dsi) { struct omap_dss_device *out = &dsi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static int dsi_probe_of(struct dsi_data *dsi) diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c index 0422597ac6b0..b2094055c5fc 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss-of.c +++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c @@ -12,71 +12,25 @@ * more details. */ -#include <linux/device.h> #include <linux/err.h> -#include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> -#include <linux/seq_file.h> #include "omapdss.h" -static struct device_node * -dss_of_port_get_parent_device(struct device_node *port) -{ - struct device_node *np; - int i; - - if (!port) - return NULL; - - np = of_get_parent(port); - - for (i = 0; i < 2 && np; ++i) { - struct property *prop; - - prop = of_find_property(np, "compatible", NULL); - - if (prop) - return np; - - np = of_get_next_parent(np); - } - - return NULL; -} - struct omap_dss_device * omapdss_of_find_connected_device(struct device_node *node, unsigned int port) { - struct device_node *src_node; - struct device_node *src_port; - struct device_node *ep; - struct omap_dss_device *src; - u32 port_number = 0; + struct device_node *remote_node; + struct omap_dss_device *dssdev; - /* Get the endpoint... */ - ep = of_graph_get_endpoint_by_regs(node, port, 0); - if (!ep) + remote_node = of_graph_get_remote_node(node, port, 0); + if (!remote_node) return NULL; - /* ... and its remote port... */ - src_port = of_graph_get_remote_port(ep); - of_node_put(ep); - if (!src_port) - return NULL; - - /* ... and the remote port's number and parent... */ - of_property_read_u32(src_port, "reg", &port_number); - src_node = dss_of_port_get_parent_device(src_port); - of_node_put(src_port); - if (!src_node) - return ERR_PTR(-EINVAL); - - /* ... and finally the connected device. */ - src = omapdss_find_device_by_port(src_node, port_number); - of_node_put(src_node); + dssdev = omapdss_find_device_by_node(remote_node); + of_node_put(remote_node); - return src ? src : ERR_PTR(-EPROBE_DEFER); + return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER); } EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 7553c7fc1c45..55e68863ef15 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1560,7 +1560,7 @@ static void dss_shutdown(struct platform_device *pdev) DSSDBG("shutdown\n"); - for_each_dss_display(dssdev) { + for_each_dss_output(dssdev) { if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) dssdev->ops->disable(dssdev); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index aabdda394c9c..6339e2756b34 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -249,15 +249,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) } static void hdmi_display_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); mutex_lock(&hdmi->lock); - hdmi->cfg.vm = *vm; + drm_display_mode_to_videomode(mode, &hdmi->cfg.vm); - dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); + dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000); mutex_unlock(&hdmi->lock); } @@ -312,26 +312,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd) hdmi_wp_audio_enable(&hd->wp, false); } -static int hdmi_display_enable(struct omap_dss_device *dssdev) +static void hdmi_display_enable(struct omap_dss_device *dssdev) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); unsigned long flags; - int r = 0; + int r; DSSDBG("ENTER hdmi_display_enable\n"); mutex_lock(&hdmi->lock); - if (!dssdev->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err0; - } - r = hdmi_power_on_full(hdmi); if (r) { DSSERR("failed to power on device\n"); - goto err0; + goto done; } if (hdmi->audio_configured) { @@ -351,12 +345,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev) hdmi->display_enabled = true; spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags); +done: mutex_unlock(&hdmi->lock); - return 0; - -err0: - mutex_unlock(&hdmi->lock); - return r; } static void hdmi_display_disable(struct omap_dss_device *dssdev) @@ -417,21 +407,12 @@ void hdmi4_core_disable(struct hdmi_core_data *core) static int hdmi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -698,7 +679,7 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi) out->dev = &hdmi->pdev->dev; out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &hdmi_ops; @@ -706,19 +687,9 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi) out->of_ports = BIT(0); out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -729,9 +700,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static int hdmi4_probe_of(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 9e8556f67a29..2955bbad13bb 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -248,15 +248,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi) } static void hdmi_display_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); mutex_lock(&hdmi->lock); - hdmi->cfg.vm = *vm; + drm_display_mode_to_videomode(mode, &hdmi->cfg.vm); - dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock); + dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000); mutex_unlock(&hdmi->lock); } @@ -320,26 +320,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd) REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2); } -static int hdmi_display_enable(struct omap_dss_device *dssdev) +static void hdmi_display_enable(struct omap_dss_device *dssdev) { struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev); unsigned long flags; - int r = 0; + int r; DSSDBG("ENTER hdmi_display_enable\n"); mutex_lock(&hdmi->lock); - if (!dssdev->dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err0; - } - r = hdmi_power_on_full(hdmi); if (r) { DSSERR("failed to power on device\n"); - goto err0; + goto done; } if (hdmi->audio_configured) { @@ -359,12 +353,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev) hdmi->display_enabled = true; spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags); +done: mutex_unlock(&hdmi->lock); - return 0; - -err0: - mutex_unlock(&hdmi->lock); - return r; } static void hdmi_display_disable(struct omap_dss_device *dssdev) @@ -422,21 +412,12 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi) static int hdmi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void hdmi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -682,7 +663,7 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi) out->dev = &hdmi->pdev->dev; out->id = OMAP_DSS_OUTPUT_HDMI; - out->output_type = OMAP_DISPLAY_TYPE_HDMI; + out->type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &hdmi_ops; @@ -690,19 +671,9 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi) out->of_ports = BIT(0); out->ops_flags = OMAP_DSS_DEVICE_OP_EDID; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -713,9 +684,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi) { struct omap_dss_device *out = &hdmi->output; - if (out->next) - omapdss_device_put(out->next); omapdss_device_unregister(out); + omapdss_device_cleanup_output(out); } static int hdmi5_probe_of(struct omap_hdmi *hdmi) diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c index 3bfb95d230e0..2b41c75ce988 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c @@ -184,6 +184,22 @@ static const struct of_device_id omapdss_of_match[] __initconst = { {}, }; +static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = { + { .compatible = "composite-video-connector" }, + { .compatible = "hdmi-connector" }, + { .compatible = "lgphilips,lb035q02" }, + { .compatible = "nec,nl8048hl11" }, + { .compatible = "panel-dsi-cm" }, + { .compatible = "sharp,ls037v7dw01" }, + { .compatible = "sony,acx565akm" }, + { .compatible = "svideo-connector" }, + { .compatible = "ti,opa362" }, + { .compatible = "ti,tpd12s015" }, + { .compatible = "toppoly,td028ttec1" }, + { .compatible = "tpo,td028ttec1" }, + { .compatible = "tpo,td043mtea1" }, +}; + static int __init omapdss_boot_init(void) { struct device_node *dss, *child; @@ -210,7 +226,7 @@ static int __init omapdss_boot_init(void) n = list_first_entry(&dss_conv_list, struct dss_conv_node, list); - if (!n->root) + if (of_match_node(omapdss_of_fixups_whitelist, n->node)) omapdss_omapify_node(n->node); list_del(&n->list); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 33e15cb77efa..0c734d1f89e1 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -19,7 +19,6 @@ #define __OMAP_DRM_DSS_H #include <linux/list.h> -#include <linux/kobject.h> #include <linux/device.h> #include <linux/interrupt.h> #include <video/videomode.h> @@ -68,6 +67,7 @@ struct dss_lcd_mgr_config; struct snd_aes_iec958; struct snd_cea_861_aud_if; struct hdmi_avi_infoframe; +struct drm_connector; enum omap_display_type { OMAP_DISPLAY_TYPE_NONE = 0, @@ -360,15 +360,15 @@ struct omap_dss_device_ops { void (*disconnect)(struct omap_dss_device *dssdev, struct omap_dss_device *dst); - int (*enable)(struct omap_dss_device *dssdev); + void (*pre_enable)(struct omap_dss_device *dssdev); + void (*enable)(struct omap_dss_device *dssdev); void (*disable)(struct omap_dss_device *dssdev); + void (*post_disable)(struct omap_dss_device *dssdev); int (*check_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); - void (*get_timings)(struct omap_dss_device *dssdev, - struct videomode *vm); + struct drm_display_mode *mode); void (*set_timings)(struct omap_dss_device *dssdev, - const struct videomode *vm); + const struct drm_display_mode *mode); bool (*detect)(struct omap_dss_device *dssdev); @@ -380,6 +380,9 @@ struct omap_dss_device_ops { int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); + int (*get_modes)(struct omap_dss_device *dssdev, + struct drm_connector *connector); + union { const struct omapdss_hdmi_ops hdmi; const struct omapdss_dsi_ops dsi; @@ -390,42 +393,40 @@ struct omap_dss_device_ops { * enum omap_dss_device_ops_flag - Indicates which device ops are supported * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations - * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID + * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID + * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes */ enum omap_dss_device_ops_flag { OMAP_DSS_DEVICE_OP_DETECT = BIT(0), OMAP_DSS_DEVICE_OP_HPD = BIT(1), OMAP_DSS_DEVICE_OP_EDID = BIT(2), -}; - -enum omap_dss_device_type { - OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0), - OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1), + OMAP_DSS_DEVICE_OP_MODES = BIT(3), }; struct omap_dss_device { - struct kobject kobj; struct device *dev; struct module *owner; struct dss_device *dss; - struct omap_dss_device *src; - struct omap_dss_device *dst; struct omap_dss_device *next; + struct drm_bridge *bridge; + struct drm_panel *panel; struct list_head list; - unsigned int alias_id; - + /* + * DSS type that this device generates (for DSS internal devices) or + * requires (for external encoders, connectors and panels). Must be a + * non-zero (different than OMAP_DISPLAY_TYPE_NONE) value. + */ enum omap_display_type type; + /* - * DSS output type that this device generates (for DSS internal devices) - * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE - * for display devices (connectors and panels) and to non-zero value for - * all other devices. + * True if the device is a display (panel or connector) at the end of + * the pipeline, false otherwise. */ - enum omap_display_type output_type; + bool display; const char *name; @@ -434,9 +435,6 @@ struct omap_dss_device { unsigned long ops_flags; u32 bus_flags; - /* helper variable for driver suspend/resume */ - bool activate_after_resume; - enum omap_display_caps caps; enum omap_dss_display_state state; @@ -445,7 +443,6 @@ struct omap_dss_device { /* DISPC channel for this output */ enum omap_channel dispc_channel; - bool dispc_channel_connected; /* output instance */ enum omap_dss_output_id id; @@ -465,9 +462,6 @@ struct omap_dss_driver { int (*memory_read)(struct omap_dss_device *dssdev, void *buf, size_t size, u16 x, u16 y, u16 w, u16 h); - - void (*get_size)(struct omap_dss_device *dssdev, - unsigned int *width, unsigned int *height); }; struct dss_device *omapdss_get_dss(void); @@ -477,32 +471,35 @@ static inline bool omapdss_is_initialized(void) return !!omapdss_get_dss(); } -#define for_each_dss_display(d) \ - while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL) void omapdss_display_init(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output); +int omapdss_display_get_modes(struct drm_connector *connector, + const struct videomode *vm); void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); void omapdss_device_put(struct omap_dss_device *dssdev); -struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src, - unsigned int port); -struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from, - enum omap_dss_device_type type); +struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node); int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *src, struct omap_dss_device *dst); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst); +void omapdss_device_pre_enable(struct omap_dss_device *dssdev); +void omapdss_device_enable(struct omap_dss_device *dssdev); +void omapdss_device_disable(struct omap_dss_device *dssdev); +void omapdss_device_post_disable(struct omap_dss_device *dssdev); int omap_dss_get_num_overlay_managers(void); int omap_dss_get_num_overlays(void); #define for_each_dss_output(d) \ - while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL) -int omapdss_output_validate(struct omap_dss_device *out); + while ((d = omapdss_device_next_output(d)) != NULL) +struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from); +int omapdss_device_init_output(struct omap_dss_device *out); +void omapdss_device_cleanup_output(struct omap_dss_device *out); typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); @@ -511,11 +508,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); int omapdss_compat_init(void); void omapdss_compat_uninit(void); -static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev) -{ - return dssdev->src; -} - static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) { return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 18505bc70f7e..10a9ee5cdc61 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -20,20 +20,48 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/of_graph.h> + +#include <drm/drm_panel.h> #include "dss.h" #include "omapdss.h" -int omapdss_output_validate(struct omap_dss_device *out) +int omapdss_device_init_output(struct omap_dss_device *out) { - if (out->next && out->output_type != out->next->type) { + struct device_node *remote_node; + + remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); + if (!remote_node) { + dev_dbg(out->dev, "failed to find video sink\n"); + return 0; + } + + out->next = omapdss_find_device_by_node(remote_node); + out->bridge = of_drm_find_bridge(remote_node); + out->panel = of_drm_find_panel(remote_node); + if (IS_ERR(out->panel)) + out->panel = NULL; + + of_node_put(remote_node); + + if (out->next && out->type != out->next->type) { dev_err(out->dev, "output type and display type don't match\n"); + omapdss_device_put(out->next); + out->next = NULL; return -EINVAL; } - return 0; + return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER; +} +EXPORT_SYMBOL(omapdss_device_init_output); + +void omapdss_device_cleanup_output(struct omap_dss_device *out) +{ + if (out->next) + omapdss_device_put(out->next); } -EXPORT_SYMBOL(omapdss_output_validate); +EXPORT_SYMBOL(omapdss_device_cleanup_output); int dss_install_mgr_ops(struct dss_device *dss, const struct dss_mgr_ops *mgr_ops, diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index b2fe2387037a..7aae52984fed 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -37,7 +37,7 @@ struct sdi_device { struct regulator *vdds_sdi_reg; struct dss_lcd_mgr_config mgr_config; - struct videomode vm; + unsigned long pixelclock; int datapairs; struct omap_dss_device output; @@ -129,27 +129,22 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi) dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config); } -static int sdi_display_enable(struct omap_dss_device *dssdev) +static void sdi_display_enable(struct omap_dss_device *dssdev) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); struct dispc_clock_info dispc_cinfo; unsigned long fck; int r; - if (!sdi->output.dispc_channel_connected) { - DSSERR("failed to enable display: no output/manager\n"); - return -ENODEV; - } - r = regulator_enable(sdi->vdds_sdi_reg); if (r) - goto err_reg_enable; + return; r = dispc_runtime_get(sdi->dss->dispc); if (r) goto err_get_dispc; - r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo); + r = sdi_calc_clock_div(sdi, sdi->pixelclock, &fck, &dispc_cinfo); if (r) goto err_calc_clock_div; @@ -185,7 +180,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_mgr_enable; - return 0; + return; err_mgr_enable: dss_sdi_disable(sdi->dss); @@ -195,8 +190,6 @@ err_calc_clock_div: dispc_runtime_put(sdi->dss->dispc); err_get_dispc: regulator_disable(sdi->vdds_sdi_reg); -err_reg_enable: - return r; } static void sdi_display_disable(struct omap_dss_device *dssdev) @@ -213,36 +206,37 @@ static void sdi_display_disable(struct omap_dss_device *dssdev) } static void sdi_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); - sdi->vm = *vm; + sdi->pixelclock = mode->clock * 1000; } static int sdi_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { struct sdi_device *sdi = dssdev_to_sdi(dssdev); struct dispc_clock_info dispc_cinfo; + unsigned long pixelclock = mode->clock * 1000; unsigned long fck; unsigned long pck; int r; - if (vm->pixelclock == 0) + if (pixelclock == 0) return -EINVAL; - r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo); + r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo); if (r) return r; pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div; - if (pck != vm->pixelclock) { + if (pck != pixelclock) { DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n", - vm->pixelclock, pck); + pixelclock, pck); - vm->pixelclock = pck; + mode->clock = pck / 1000; } return 0; @@ -251,21 +245,12 @@ static int sdi_check_timings(struct omap_dss_device *dssdev, static int sdi_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void sdi_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -287,29 +272,19 @@ static int sdi_init_output(struct sdi_device *sdi) out->dev = &sdi->pdev->dev; out->id = OMAP_DSS_OUTPUT_SDI; - out->output_type = OMAP_DISPLAY_TYPE_SDI; + out->type = OMAP_DISPLAY_TYPE_SDI; out->name = "sdi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_LCD; /* We have SDI only on OMAP3, where it's on port 1 */ out->of_ports = BIT(1); out->ops = &sdi_ops; out->owner = THIS_MODULE; - out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */ - | DRM_BUS_FLAG_SYNC_POSEDGE; - - out->next = omapdss_of_find_connected_device(out->dev->of_node, 1); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } + out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */ + | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE; - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -318,9 +293,8 @@ static int sdi_init_output(struct sdi_device *sdi) static void sdi_uninit_output(struct sdi_device *sdi) { - if (sdi->output.next) - omapdss_device_put(sdi->output.next); omapdss_device_unregister(&sdi->output); + omapdss_device_cleanup_output(&sdi->output); } int sdi_init_port(struct dss_device *dss, struct platform_device *pdev, diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index b5f52727f8b1..da43b865d973 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -267,63 +267,40 @@ enum venc_videomode { VENC_MODE_NTSC, }; -static const struct videomode omap_dss_pal_vm = { - .hactive = 720, - .vactive = 574, - .pixelclock = 13500000, - .hsync_len = 64, - .hfront_porch = 12, - .hback_porch = 68, - .vsync_len = 5, - .vfront_porch = 5, - .vback_porch = 41, - - .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW | - DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH | - DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE, +static const struct drm_display_mode omap_dss_pal_mode = { + .hdisplay = 720, + .hsync_start = 732, + .hsync_end = 796, + .htotal = 864, + .vdisplay = 574, + .vsync_start = 579, + .vsync_end = 584, + .vtotal = 625, + .clock = 13500, + + .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC | + DRM_MODE_FLAG_NVSYNC, }; -static const struct videomode omap_dss_ntsc_vm = { - .hactive = 720, - .vactive = 482, - .pixelclock = 13500000, - .hsync_len = 64, - .hfront_porch = 16, - .hback_porch = 58, - .vsync_len = 6, - .vfront_porch = 6, - .vback_porch = 31, - - .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW | - DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH | - DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE, +static const struct drm_display_mode omap_dss_ntsc_mode = { + .hdisplay = 720, + .hsync_start = 736, + .hsync_end = 800, + .htotal = 858, + .vdisplay = 482, + .vsync_start = 488, + .vsync_end = 494, + .vtotal = 525, + .clock = 13500, + + .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC | + DRM_MODE_FLAG_NVSYNC, }; -static enum venc_videomode venc_get_videomode(const struct videomode *vm) -{ - if (!(vm->flags & DISPLAY_FLAGS_INTERLACED)) - return VENC_MODE_UNKNOWN; - - if (vm->pixelclock == omap_dss_pal_vm.pixelclock && - vm->hactive == omap_dss_pal_vm.hactive && - vm->vactive == omap_dss_pal_vm.vactive) - return VENC_MODE_PAL; - - if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock && - vm->hactive == omap_dss_ntsc_vm.hactive && - vm->vactive == omap_dss_ntsc_vm.vactive) - return VENC_MODE_NTSC; - - return VENC_MODE_UNKNOWN; -} - struct venc_device { struct platform_device *pdev; void __iomem *base; struct mutex venc_lock; - u32 wss_data; struct regulator *vdda_dac_reg; struct dss_device *dss; @@ -331,7 +308,7 @@ struct venc_device { struct clk *tv_dac_clk; - struct videomode vm; + const struct venc_config *config; enum omap_dss_venc_type type; bool invert_polarity; bool requires_tv_dac_clk; @@ -367,8 +344,7 @@ static void venc_write_config(struct venc_device *venc, venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level); venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level); venc_write_reg(venc, VENC_M_CONTROL, config->m_control); - venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | - venc->wss_data); + venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data); venc_write_reg(venc, VENC_S_CARR, config->s_carr); venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl); venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid); @@ -452,18 +428,6 @@ static void venc_runtime_put(struct venc_device *venc) WARN_ON(r < 0 && r != -ENOSYS); } -static const struct venc_config *venc_timings_to_config(const struct videomode *vm) -{ - switch (venc_get_videomode(vm)) { - default: - WARN_ON_ONCE(1); - case VENC_MODE_PAL: - return &venc_config_pal_trm; - case VENC_MODE_NTSC: - return &venc_config_ntsc_trm; - } -} - static int venc_power_on(struct venc_device *venc) { u32 l; @@ -474,7 +438,7 @@ static int venc_power_on(struct venc_device *venc) goto err0; venc_reset(venc); - venc_write_config(venc, venc_timings_to_config(&venc->vm)); + venc_write_config(venc, venc->config); dss_set_venc_output(venc->dss, venc->type); dss_set_dac_pwrdn_bgz(venc->dss, 1); @@ -524,33 +488,17 @@ static void venc_power_off(struct venc_device *venc) venc_runtime_put(venc); } -static int venc_display_enable(struct omap_dss_device *dssdev) +static void venc_display_enable(struct omap_dss_device *dssdev) { struct venc_device *venc = dssdev_to_venc(dssdev); - int r; DSSDBG("venc_display_enable\n"); mutex_lock(&venc->venc_lock); - if (!dssdev->dispc_channel_connected) { - DSSERR("Failed to enable display: no output/manager\n"); - r = -ENODEV; - goto err0; - } - - r = venc_power_on(venc); - if (r) - goto err0; - - venc->wss_data = 0; + venc_power_on(venc); mutex_unlock(&venc->venc_lock); - - return 0; -err0: - mutex_unlock(&venc->venc_lock); - return r; } static void venc_display_disable(struct omap_dss_device *dssdev) @@ -566,30 +514,70 @@ static void venc_display_disable(struct omap_dss_device *dssdev) mutex_unlock(&venc->venc_lock); } -static void venc_get_timings(struct omap_dss_device *dssdev, - struct videomode *vm) +static int venc_get_modes(struct omap_dss_device *dssdev, + struct drm_connector *connector) { - struct venc_device *venc = dssdev_to_venc(dssdev); + static const struct drm_display_mode *modes[] = { + &omap_dss_pal_mode, + &omap_dss_ntsc_mode, + }; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(modes); ++i) { + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, modes[i]); + if (!mode) + return i; + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + } - mutex_lock(&venc->venc_lock); - *vm = venc->vm; - mutex_unlock(&venc->venc_lock); + return ARRAY_SIZE(modes); +} + +static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode) +{ + if (!(mode->flags & DRM_MODE_FLAG_INTERLACE)) + return VENC_MODE_UNKNOWN; + + if (mode->clock == omap_dss_pal_mode.clock && + mode->hdisplay == omap_dss_pal_mode.hdisplay && + mode->vdisplay == omap_dss_pal_mode.vdisplay) + return VENC_MODE_PAL; + + if (mode->clock == omap_dss_ntsc_mode.clock && + mode->hdisplay == omap_dss_ntsc_mode.hdisplay && + mode->vdisplay == omap_dss_ntsc_mode.vdisplay) + return VENC_MODE_NTSC; + + return VENC_MODE_UNKNOWN; } static void venc_set_timings(struct omap_dss_device *dssdev, - const struct videomode *vm) + const struct drm_display_mode *mode) { struct venc_device *venc = dssdev_to_venc(dssdev); + enum venc_videomode venc_mode = venc_get_videomode(mode); DSSDBG("venc_set_timings\n"); mutex_lock(&venc->venc_lock); - /* Reset WSS data when the TV standard changes. */ - if (memcmp(&venc->vm, vm, sizeof(*vm))) - venc->wss_data = 0; + switch (venc_mode) { + default: + WARN_ON_ONCE(1); + /* Fall-through */ + case VENC_MODE_PAL: + venc->config = &venc_config_pal_trm; + break; - venc->vm = *vm; + case VENC_MODE_NTSC: + venc->config = &venc_config_ntsc_trm; + break; + } dispc_set_tv_pclk(venc->dss->dispc, 13500000); @@ -597,22 +585,26 @@ static void venc_set_timings(struct omap_dss_device *dssdev, } static int venc_check_timings(struct omap_dss_device *dssdev, - struct videomode *vm) + struct drm_display_mode *mode) { DSSDBG("venc_check_timings\n"); - switch (venc_get_videomode(vm)) { + switch (venc_get_videomode(mode)) { case VENC_MODE_PAL: - *vm = omap_dss_pal_vm; - return 0; + drm_mode_copy(mode, &omap_dss_pal_mode); + break; case VENC_MODE_NTSC: - *vm = omap_dss_ntsc_vm; - return 0; + drm_mode_copy(mode, &omap_dss_ntsc_mode); + break; default: return -EINVAL; } + + drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); + drm_mode_set_name(mode); + return 0; } static int venc_dump_regs(struct seq_file *s, void *p) @@ -695,21 +687,12 @@ static int venc_get_clocks(struct venc_device *venc) static int venc_connect(struct omap_dss_device *src, struct omap_dss_device *dst) { - int r; - - r = omapdss_device_connect(dst->dss, dst, dst->next); - if (r) - return r; - - dst->dispc_channel_connected = true; - return 0; + return omapdss_device_connect(dst->dss, dst, dst->next); } static void venc_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) { - dst->dispc_channel_connected = false; - omapdss_device_disconnect(dst, dst->next); } @@ -721,8 +704,9 @@ static const struct omap_dss_device_ops venc_ops = { .disable = venc_display_disable, .check_timings = venc_check_timings, - .get_timings = venc_get_timings, .set_timings = venc_set_timings, + + .get_modes = venc_get_modes, }; /* ----------------------------------------------------------------------------- @@ -776,26 +760,17 @@ static int venc_init_output(struct venc_device *venc) out->dev = &venc->pdev->dev; out->id = OMAP_DSS_OUTPUT_VENC; - out->output_type = OMAP_DISPLAY_TYPE_VENC; + out->type = OMAP_DISPLAY_TYPE_VENC; out->name = "venc.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; out->ops = &venc_ops; out->owner = THIS_MODULE; out->of_ports = BIT(0); + out->ops_flags = OMAP_DSS_DEVICE_OP_MODES; - out->next = omapdss_of_find_connected_device(out->dev->of_node, 0); - if (IS_ERR(out->next)) { - if (PTR_ERR(out->next) != -EPROBE_DEFER) - dev_err(out->dev, "failed to find video sink\n"); - return PTR_ERR(out->next); - } - - r = omapdss_output_validate(out); - if (r) { - omapdss_device_put(out->next); - out->next = NULL; + r = omapdss_device_init_output(out); + if (r < 0) return r; - } omapdss_device_register(out); @@ -804,9 +779,8 @@ static int venc_init_output(struct venc_device *venc) static void venc_uninit_output(struct venc_device *venc) { - if (venc->output.next) - omapdss_device_put(venc->output.next); omapdss_device_unregister(&venc->output); + omapdss_device_cleanup_output(&venc->output); } static int venc_probe_of(struct venc_device *venc) @@ -878,8 +852,7 @@ static int venc_probe(struct platform_device *pdev) mutex_init(&venc->venc_lock); - venc->wss_data = 0; - venc->vm = omap_dss_pal_vm; + venc->config = &venc_config_pal_trm; venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0); venc->base = devm_ioremap_resource(&pdev->dev, venc_mem); diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 9da94d10782a..5967283934e1 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -17,6 +17,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> +#include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include "omap_drv.h" @@ -30,24 +31,27 @@ struct omap_connector { struct drm_connector base; struct omap_dss_device *output; - struct omap_dss_device *display; struct omap_dss_device *hpd; bool hdmi_mode; }; static void omap_connector_hpd_notify(struct drm_connector *connector, - struct omap_dss_device *src, enum drm_connector_status status) { - if (status == connector_status_disconnected) { - /* - * If the source is an HDMI encoder, notify it of disconnection. - * This is required to let the HDMI encoder reset any internal - * state related to connection status, such as the CEC address. - */ - if (src && src->type == OMAP_DISPLAY_TYPE_HDMI && - src->ops->hdmi.lost_hotplug) - src->ops->hdmi.lost_hotplug(src); + struct omap_connector *omap_connector = to_omap_connector(connector); + struct omap_dss_device *dssdev; + + if (status != connector_status_disconnected) + return; + + /* + * Notify all devics in the pipeline of disconnection. This is required + * to let the HDMI encoders reset their internal state related to + * connection status, such as the CEC address. + */ + for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) { + if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug) + dssdev->ops->hdmi.lost_hotplug(dssdev); } } @@ -67,7 +71,7 @@ static void omap_connector_hpd_cb(void *cb_data, if (old_status == status) return; - omap_connector_hpd_notify(connector, omap_connector->hpd, status); + omap_connector_hpd_notify(connector, status); drm_kms_helper_hotplug_event(dev); } @@ -103,20 +107,20 @@ omap_connector_find_device(struct drm_connector *connector, enum omap_dss_device_ops_flag op) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev; + struct omap_dss_device *dssdev = NULL; + struct omap_dss_device *d; - for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { - if (dssdev->ops_flags & op) - return dssdev; + for (d = omap_connector->output; d; d = d->next) { + if (d->ops_flags & op) + dssdev = d; } - return NULL; + return dssdev; } static enum drm_connector_status omap_connector_detect( struct drm_connector *connector, bool force) { - struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev; enum drm_connector_status status; @@ -128,13 +132,12 @@ static enum drm_connector_status omap_connector_detect( ? connector_status_connected : connector_status_disconnected; - omap_connector_hpd_notify(connector, dssdev->src, status); + omap_connector_hpd_notify(connector, status); } else { - switch (omap_connector->display->type) { - case OMAP_DISPLAY_TYPE_DPI: - case OMAP_DISPLAY_TYPE_DBI: - case OMAP_DISPLAY_TYPE_SDI: - case OMAP_DISPLAY_TYPE_DSI: + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_DPI: + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_DSI: status = connector_status_connected; break; default: @@ -143,7 +146,7 @@ static enum drm_connector_status omap_connector_detect( } } - VERB("%s: %d (force=%d)", omap_connector->display->name, status, force); + VERB("%s: %d (force=%d)", connector->name, status, force); return status; } @@ -152,7 +155,7 @@ static void omap_connector_destroy(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); - DBG("%s", omap_connector->display->name); + DBG("%s", connector->name); if (omap_connector->hpd) { struct omap_dss_device *hpd = omap_connector->hpd; @@ -166,7 +169,6 @@ static void omap_connector_destroy(struct drm_connector *connector) drm_connector_cleanup(connector); omapdss_device_put(omap_connector->output); - omapdss_device_put(omap_connector->display); kfree(omap_connector); } @@ -212,10 +214,8 @@ static int omap_connector_get_modes(struct drm_connector *connector) { struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev; - struct drm_display_mode *mode; - struct videomode vm = {0}; - DBG("%s", omap_connector->display->name); + DBG("%s", connector->name); /* * If display exposes EDID, then we parse that in the normal way to @@ -227,89 +227,71 @@ static int omap_connector_get_modes(struct drm_connector *connector) return omap_connector_get_modes_edid(connector, dssdev); /* - * Otherwise we have either a fixed resolution panel or an output that - * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus - * unconnected, or analog TV). Start by querying the size. + * Otherwise if the display pipeline reports modes (e.g. with a fixed + * resolution panel or an analog TV output), query it. */ - dssdev = omap_connector->display; - if (dssdev->driver && dssdev->driver->get_size) - dssdev->driver->get_size(dssdev, - &connector->display_info.width_mm, - &connector->display_info.height_mm); + dssdev = omap_connector_find_device(connector, + OMAP_DSS_DEVICE_OP_MODES); + if (dssdev) + return dssdev->ops->get_modes(dssdev, connector); /* - * Iterate over the pipeline to find the first device that can provide - * timing information. If we can't find any, we just let the KMS core - * add the default modes. + * Otherwise if the display pipeline uses a drm_panel, we delegate the + * operation to the panel API. */ - for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) { - if (dssdev->ops->get_timings) - break; - } - if (!dssdev) - return 0; + if (omap_connector->output->panel) + return drm_panel_get_modes(omap_connector->output->panel); - /* Add a single mode corresponding to the fixed panel timings. */ - mode = drm_mode_create(connector->dev); - if (!mode) - return 0; + /* + * We can't retrieve modes, which can happen for instance for a DVI or + * VGA output with the DDC bus unconnected. The KMS core will add the + * default modes. + */ + return 0; +} - dssdev->ops->get_timings(dssdev, &vm); +enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + int ret; - drm_display_mode_from_videomode(&vm, mode); + drm_mode_copy(adjusted_mode, mode); - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); + for (; dssdev; dssdev = dssdev->next) { + if (!dssdev->ops->check_timings) + continue; + + ret = dssdev->ops->check_timings(dssdev, adjusted_mode); + if (ret) + return MODE_BAD; + } - return 1; + return MODE_OK; } -static int omap_connector_mode_valid(struct drm_connector *connector, +static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); - enum omap_channel channel = omap_connector->output->dispc_channel; - struct omap_drm_private *priv = connector->dev->dev_private; - struct omap_dss_device *dssdev; - struct videomode vm = {0}; - struct drm_device *dev = connector->dev; - struct drm_display_mode *new_mode; - int r, ret = MODE_BAD; - - drm_display_mode_to_videomode(mode, &vm); - mode->vrefresh = drm_mode_vrefresh(mode); + struct drm_display_mode new_mode = { { 0 } }; + enum drm_mode_status status; - r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); - if (r) + status = omap_connector_mode_fixup(omap_connector->output, mode, + &new_mode); + if (status != MODE_OK) goto done; - for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) { - if (!dssdev->ops->check_timings) - continue; - - r = dssdev->ops->check_timings(dssdev, &vm); - if (r) - goto done; - } - - /* check if vrefresh is still valid */ - new_mode = drm_mode_duplicate(dev, mode); - if (!new_mode) - return MODE_BAD; - - new_mode->clock = vm.pixelclock / 1000; - new_mode->vrefresh = 0; - if (mode->vrefresh == drm_mode_vrefresh(new_mode)) - ret = MODE_OK; - drm_mode_destroy(dev, new_mode); + /* Check if vrefresh is still valid. */ + if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode)) + status = MODE_NOCLOCK; done: DBG("connector: mode %s: " DRM_MODE_FMT, - (ret == MODE_OK) ? "valid" : "invalid", + (status == MODE_OK) ? "valid" : "invalid", DRM_MODE_ARG(mode)); - return ret; + return status; } static const struct drm_connector_funcs omap_connector_funcs = { @@ -326,9 +308,16 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { .mode_valid = omap_connector_mode_valid, }; -static int omap_connector_get_type(struct omap_dss_device *display) +static int omap_connector_get_type(struct omap_dss_device *output) { - switch (display->type) { + struct omap_dss_device *display; + enum omap_display_type type; + + display = omapdss_display_get(output); + type = display->type; + omapdss_device_put(display); + + switch (type) { case OMAP_DISPLAY_TYPE_HDMI: return DRM_MODE_CONNECTOR_HDMIA; case OMAP_DISPLAY_TYPE_DVI: @@ -351,28 +340,26 @@ static int omap_connector_get_type(struct omap_dss_device *display) /* initialize connector */ struct drm_connector *omap_connector_init(struct drm_device *dev, struct omap_dss_device *output, - struct omap_dss_device *display, struct drm_encoder *encoder) { struct drm_connector *connector = NULL; struct omap_connector *omap_connector; struct omap_dss_device *dssdev; - DBG("%s", display->name); + DBG("%s", output->name); omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); if (!omap_connector) goto fail; omap_connector->output = omapdss_device_get(output); - omap_connector->display = omapdss_device_get(display); connector = &omap_connector->base; connector->interlace_allowed = 1; connector->doublescan_allowed = 0; drm_connector_init(dev, connector, &omap_connector_funcs, - omap_connector_get_type(display)); + omap_connector_get_type(output)); drm_connector_helper_add(connector, &omap_connector_helper_funcs); /* diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h index 854099801649..608085219336 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.h +++ b/drivers/gpu/drm/omapdrm/omap_connector.h @@ -22,6 +22,8 @@ #include <linux/types.h> +enum drm_mode_status; + struct drm_connector; struct drm_device; struct drm_encoder; @@ -29,12 +31,12 @@ struct omap_dss_device; struct drm_connector *omap_connector_init(struct drm_device *dev, struct omap_dss_device *output, - struct omap_dss_device *display, struct drm_encoder *encoder); -struct drm_encoder *omap_connector_attached_encoder( - struct drm_connector *connector); bool omap_connector_get_hdmi_mode(struct drm_connector *connector); void omap_connector_enable_hpd(struct drm_connector *connector); void omap_connector_disable_hpd(struct drm_connector *connector); +enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); #endif /* __OMAPDRM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index d99e24dcc0bf..5a29bf01c0e8 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) if (WARN_ON(omap_crtc->enabled == enable)) return; - if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) { + if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) { priv->dispc_ops->mgr_enable(priv->dispc, channel, enable); omap_crtc->enabled = enable; return; @@ -390,6 +390,15 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) { struct omap_drm_private *priv = crtc->dev->dev_private; + struct omap_crtc *omap_crtc = to_omap_crtc(crtc); + struct videomode vm = {0}; + int r; + + drm_display_mode_to_videomode(mode, &vm); + r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel, + &vm); + if (r) + return r; /* Check for bandwidth limit */ if (priv->max_bandwidth) { @@ -657,7 +666,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, &omap_crtc_funcs, NULL); if (ret < 0) { dev_err(dev->dev, "%s(): could not init crtc for: %s\n", - __func__, pipe->display->name); + __func__, pipe->output->name); kfree(omap_crtc); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index f8292278f57d..1b9b6f5e48e1 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -23,6 +23,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> +#include <drm/drm_panel.h> #include "omap_dmm_tiler.h" #include "omap_drv.h" @@ -137,12 +138,13 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_drm_pipeline *pipe = &priv->pipes[i]; + if (pipe->output->panel) + drm_panel_detach(pipe->output->panel); + omapdss_device_disconnect(NULL, pipe->output); omapdss_device_put(pipe->output); - omapdss_device_put(pipe->display); pipe->output = NULL; - pipe->display = NULL; } memset(&priv->channels, 0, sizeof(priv->channels)); @@ -150,33 +152,17 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) priv->num_pipes = 0; } -static int omap_compare_pipes(const void *a, const void *b) -{ - const struct omap_drm_pipeline *pipe1 = a; - const struct omap_drm_pipeline *pipe2 = b; - - if (pipe1->display->alias_id > pipe2->display->alias_id) - return 1; - else if (pipe1->display->alias_id < pipe2->display->alias_id) - return -1; - return 0; -} - static int omap_connect_pipelines(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; struct omap_dss_device *output = NULL; - unsigned int i; int r; - if (!omapdss_stack_is_ready()) - return -EPROBE_DEFER; - for_each_dss_output(output) { r = omapdss_device_connect(priv->dss, NULL, output); if (r == -EPROBE_DEFER) { omapdss_device_put(output); - goto cleanup; + return r; } else if (r) { dev_warn(output->dev, "could not connect output %s\n", output->name); @@ -185,7 +171,6 @@ static int omap_connect_pipelines(struct drm_device *ddev) pipe = &priv->pipes[priv->num_pipes++]; pipe->output = omapdss_device_get(output); - pipe->display = omapdss_display_get(output); if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) { /* To balance the 'for_each_dss_output' loop */ @@ -195,36 +180,19 @@ static int omap_connect_pipelines(struct drm_device *ddev) } } - /* Sort the list by DT aliases */ - sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]), - omap_compare_pipes, NULL); - - /* - * Populate the pipeline lookup table by DISPC channel. Only one display - * is allowed per channel. - */ - for (i = 0; i < priv->num_pipes; ++i) { - struct omap_drm_pipeline *pipe = &priv->pipes[i]; - enum omap_channel channel = pipe->output->dispc_channel; - - if (WARN_ON(priv->channels[channel] != NULL)) { - r = -EINVAL; - goto cleanup; - } - - priv->channels[channel] = pipe; - } - return 0; +} -cleanup: - /* - * if we are deferring probe, we disconnect the devices we previously - * connected - */ - omap_disconnect_pipelines(ddev); +static int omap_compare_pipelines(const void *a, const void *b) +{ + const struct omap_drm_pipeline *pipe1 = a; + const struct omap_drm_pipeline *pipe2 = b; - return r; + if (pipe1->alias_id > pipe2->alias_id) + return 1; + else if (pipe1->alias_id < pipe2->alias_id) + return -1; + return 0; } static int omap_modeset_init_properties(struct drm_device *dev) @@ -240,6 +208,30 @@ static int omap_modeset_init_properties(struct drm_device *dev) return 0; } +static int omap_display_id(struct omap_dss_device *output) +{ + struct device_node *node = NULL; + + if (output->next) { + struct omap_dss_device *display; + + display = omapdss_display_get(output); + node = display->dev->of_node; + omapdss_device_put(display); + } else if (output->bridge) { + struct drm_bridge *bridge = output->bridge; + + while (bridge->next) + bridge = bridge->next; + + node = bridge->of_node; + } else if (output->panel) { + node = output->panel->dev->of_node; + } + + return node ? of_alias_get_id(node, "display") : -ENODEV; +} + static int omap_modeset_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; @@ -249,6 +241,9 @@ static int omap_modeset_init(struct drm_device *dev) int ret; u32 plane_crtc_mask; + if (!omapdss_stack_is_ready()) + return -EPROBE_DEFER; + drm_mode_config_init(dev); ret = omap_modeset_init_properties(dev); @@ -263,6 +258,10 @@ static int omap_modeset_init(struct drm_device *dev) * configuration does not match the expectations or exceeds * the available resources, the configuration is rejected. */ + ret = omap_connect_pipelines(dev); + if (ret < 0) + return ret; + if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) { dev_err(dev->dev, "%s(): Too many connected displays\n", __func__); @@ -288,33 +287,75 @@ static int omap_modeset_init(struct drm_device *dev) priv->planes[priv->num_planes++] = plane; } - /* Create the CRTCs, encoders and connectors. */ + /* + * Create the encoders, attach the bridges and get the pipeline alias + * IDs. + */ for (i = 0; i < priv->num_pipes; i++) { struct omap_drm_pipeline *pipe = &priv->pipes[i]; - struct omap_dss_device *display = pipe->display; - struct drm_connector *connector; - struct drm_encoder *encoder; - struct drm_crtc *crtc; + int id; - encoder = omap_encoder_init(dev, pipe->output, display); - if (!encoder) + pipe->encoder = omap_encoder_init(dev, pipe->output); + if (!pipe->encoder) return -ENOMEM; - connector = omap_connector_init(dev, pipe->output, display, - encoder); - if (!connector) - return -ENOMEM; + if (pipe->output->bridge) { + ret = drm_bridge_attach(pipe->encoder, + pipe->output->bridge, NULL); + if (ret < 0) + return ret; + } + + id = omap_display_id(pipe->output); + pipe->alias_id = id >= 0 ? id : i; + } + + /* Sort the pipelines by DT aliases. */ + sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]), + omap_compare_pipelines, NULL); + + /* + * Populate the pipeline lookup table by DISPC channel. Only one display + * is allowed per channel. + */ + for (i = 0; i < priv->num_pipes; ++i) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + enum omap_channel channel = pipe->output->dispc_channel; + + if (WARN_ON(priv->channels[channel] != NULL)) + return -EINVAL; + + priv->channels[channel] = pipe; + } + + /* Create the connectors and CRTCs. */ + for (i = 0; i < priv->num_pipes; i++) { + struct omap_drm_pipeline *pipe = &priv->pipes[i]; + struct drm_encoder *encoder = pipe->encoder; + struct drm_crtc *crtc; + + if (!pipe->output->bridge) { + pipe->connector = omap_connector_init(dev, pipe->output, + encoder); + if (!pipe->connector) + return -ENOMEM; + + drm_connector_attach_encoder(pipe->connector, encoder); + + if (pipe->output->panel) { + ret = drm_panel_attach(pipe->output->panel, + pipe->connector); + if (ret < 0) + return ret; + } + } crtc = omap_crtc_init(dev, pipe, priv->planes[i]); if (IS_ERR(crtc)) return PTR_ERR(crtc); - drm_connector_attach_encoder(connector, encoder); encoder->possible_crtcs = 1 << i; - pipe->crtc = crtc; - pipe->encoder = encoder; - pipe->connector = connector; } DBG("registered %u planes, %u crtcs/encoders/connectors\n", @@ -351,10 +392,12 @@ static int omap_modeset_init(struct drm_device *dev) static void omap_modeset_enable_external_hpd(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; - int i; + unsigned int i; - for (i = 0; i < priv->num_pipes; i++) - omap_connector_enable_hpd(priv->pipes[i].connector); + for (i = 0; i < priv->num_pipes; i++) { + if (priv->pipes[i].connector) + omap_connector_enable_hpd(priv->pipes[i].connector); + } } /* @@ -363,10 +406,12 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev) static void omap_modeset_disable_external_hpd(struct drm_device *ddev) { struct omap_drm_private *priv = ddev->dev_private; - int i; + unsigned int i; - for (i = 0; i < priv->num_pipes; i++) - omap_connector_disable_hpd(priv->pipes[i].connector); + for (i = 0; i < priv->num_pipes; i++) { + if (priv->pipes[i].connector) + omap_connector_disable_hpd(priv->pipes[i].connector); + } } /* @@ -551,10 +596,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) omap_crtc_pre_init(priv); - ret = omap_connect_pipelines(ddev); - if (ret) - goto err_crtc_uninit; - soc = soc_device_match(omapdrm_soc_devices); priv->omaprev = soc ? (unsigned int)soc->data : 0; priv->wq = alloc_ordered_workqueue("omapdrm", 0); @@ -612,7 +653,6 @@ err_gem_deinit: omap_gem_deinit(ddev); destroy_workqueue(priv->wq); omap_disconnect_pipelines(ddev); -err_crtc_uninit: omap_crtc_pre_uninit(priv); drm_dev_put(ddev); return ret; @@ -685,54 +725,12 @@ static int pdev_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int omap_drm_suspend_all_displays(struct drm_device *ddev) -{ - struct omap_drm_private *priv = ddev->dev_private; - int i; - - for (i = 0; i < priv->num_pipes; i++) { - struct omap_dss_device *display = priv->pipes[i].display; - - if (display->state == OMAP_DSS_DISPLAY_ACTIVE) { - display->ops->disable(display); - display->activate_after_resume = true; - } else { - display->activate_after_resume = false; - } - } - - return 0; -} - -static int omap_drm_resume_all_displays(struct drm_device *ddev) -{ - struct omap_drm_private *priv = ddev->dev_private; - int i; - - for (i = 0; i < priv->num_pipes; i++) { - struct omap_dss_device *display = priv->pipes[i].display; - - if (display->activate_after_resume) { - display->ops->enable(display); - display->activate_after_resume = false; - } - } - - return 0; -} - static int omap_drm_suspend(struct device *dev) { struct omap_drm_private *priv = dev_get_drvdata(dev); struct drm_device *drm_dev = priv->ddev; - drm_kms_helper_poll_disable(drm_dev); - - drm_modeset_lock_all(drm_dev); - omap_drm_suspend_all_displays(drm_dev); - drm_modeset_unlock_all(drm_dev); - - return 0; + return drm_mode_config_helper_suspend(drm_dev); } static int omap_drm_resume(struct device *dev) @@ -740,11 +738,7 @@ static int omap_drm_resume(struct device *dev) struct omap_drm_private *priv = dev_get_drvdata(dev); struct drm_device *drm_dev = priv->ddev; - drm_modeset_lock_all(drm_dev); - omap_drm_resume_all_displays(drm_dev); - drm_modeset_unlock_all(drm_dev); - - drm_kms_helper_poll_enable(drm_dev); + drm_mode_config_helper_resume(drm_dev); return omap_gem_resume(drm_dev); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index 0c57d2814c51..3cca45cb25f3 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -49,7 +49,7 @@ struct omap_drm_pipeline { struct drm_encoder *encoder; struct drm_connector *connector; struct omap_dss_device *output; - struct omap_dss_device *display; + unsigned int alias_id; }; struct omap_drm_private { diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 0d85b3a35767..40512419642b 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -20,6 +20,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_edid.h> +#include <drm/drm_panel.h> #include "omap_drv.h" @@ -37,7 +38,6 @@ struct omap_encoder { struct drm_encoder base; struct omap_dss_device *output; - struct omap_dss_device *display; }; static void omap_encoder_destroy(struct drm_encoder *encoder) @@ -52,22 +52,43 @@ static const struct drm_encoder_funcs omap_encoder_funcs = { .destroy = omap_encoder_destroy, }; -static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder, +static void omap_encoder_update_videomode_flags(struct videomode *vm, + u32 bus_flags) +{ + if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW | + DISPLAY_FLAGS_DE_HIGH))) { + if (bus_flags & DRM_BUS_FLAG_DE_LOW) + vm->flags |= DISPLAY_FLAGS_DE_LOW; + else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) + vm->flags |= DISPLAY_FLAGS_DE_HIGH; + } + + if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | + DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; + } + + if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE | + DISPLAY_FLAGS_SYNC_NEGEDGE))) { + if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE; + else if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE) + vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; + } +} + +static void omap_encoder_hdmi_mode_set(struct drm_connector *connector, + struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = encoder->dev; struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *dssdev = omap_encoder->output; - struct drm_connector *connector; bool hdmi_mode; - hdmi_mode = false; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - hdmi_mode = omap_connector_get_hdmi_mode(connector); - break; - } - } + hdmi_mode = omap_connector_get_hdmi_mode(connector); if (dssdev->ops->hdmi.set_hdmi_mode) dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode); @@ -88,8 +109,18 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); + struct omap_dss_device *output = omap_encoder->output; struct omap_dss_device *dssdev; + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + struct drm_bridge *bridge; struct videomode vm = { 0 }; + u32 bus_flags; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + if (connector->encoder == encoder) + break; + } drm_display_mode_to_videomode(adjusted_mode, &vm); @@ -102,66 +133,102 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, * * A better solution is to use DRM's bus-flags through the whole driver. */ - for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { - unsigned long bus_flags = dssdev->bus_flags; - - if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW | - DISPLAY_FLAGS_DE_HIGH))) { - if (bus_flags & DRM_BUS_FLAG_DE_LOW) - vm.flags |= DISPLAY_FLAGS_DE_LOW; - else if (bus_flags & DRM_BUS_FLAG_DE_HIGH) - vm.flags |= DISPLAY_FLAGS_DE_HIGH; - } - - if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE | - DISPLAY_FLAGS_PIXDATA_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) - vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) - vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE; - } - - if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE | - DISPLAY_FLAGS_SYNC_NEGEDGE))) { - if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE) - vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE; - else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE) - vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; - } + for (dssdev = output; dssdev; dssdev = dssdev->next) + omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags); + + for (bridge = output->bridge; bridge; bridge = bridge->next) { + if (!bridge->timings) + continue; + + bus_flags = bridge->timings->input_bus_flags; + omap_encoder_update_videomode_flags(&vm, bus_flags); } + bus_flags = connector->display_info.bus_flags; + omap_encoder_update_videomode_flags(&vm, bus_flags); + /* Set timings for all devices in the display pipeline. */ - dss_mgr_set_timings(omap_encoder->output, &vm); + dss_mgr_set_timings(output, &vm); - for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { + for (dssdev = output; dssdev; dssdev = dssdev->next) { if (dssdev->ops->set_timings) - dssdev->ops->set_timings(dssdev, &vm); + dssdev->ops->set_timings(dssdev, adjusted_mode); } /* Set the HDMI mode and HDMI infoframe if applicable. */ - if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI) - omap_encoder_hdmi_mode_set(encoder, adjusted_mode); + if (output->type == OMAP_DISPLAY_TYPE_HDMI) + omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode); } static void omap_encoder_disable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->display; + struct omap_dss_device *dssdev = omap_encoder->output; + struct drm_device *dev = encoder->dev; + + dev_dbg(dev->dev, "disable(%s)\n", dssdev->name); + + /* Disable the panel if present. */ + if (dssdev->panel) { + drm_panel_disable(dssdev->panel); + drm_panel_unprepare(dssdev->panel); + } + + /* + * Disable the chain of external devices, starting at the one at the + * internal encoder's output. + */ + omapdss_device_disable(dssdev->next); + + /* + * Disable the internal encoder. This will disable the DSS output. The + * DSI is treated as an exception as DSI pipelines still use the legacy + * flow where the pipeline output controls the encoder. + */ + if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) { + dssdev->ops->disable(dssdev); + dssdev->state = OMAP_DSS_DISPLAY_DISABLED; + } - dssdev->ops->disable(dssdev); + /* + * Perform the post-disable operations on the chain of external devices + * to complete the display pipeline disable. + */ + omapdss_device_post_disable(dssdev->next); } static void omap_encoder_enable(struct drm_encoder *encoder) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->display; - int r; - - r = dssdev->ops->enable(dssdev); - if (r) - dev_err(encoder->dev->dev, - "Failed to enable display '%s': %d\n", - dssdev->name, r); + struct omap_dss_device *dssdev = omap_encoder->output; + struct drm_device *dev = encoder->dev; + + dev_dbg(dev->dev, "enable(%s)\n", dssdev->name); + + /* Prepare the chain of external devices for pipeline enable. */ + omapdss_device_pre_enable(dssdev->next); + + /* + * Enable the internal encoder. This will enable the DSS output. The + * DSI is treated as an exception as DSI pipelines still use the legacy + * flow where the pipeline output controls the encoder. + */ + if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) { + dssdev->ops->enable(dssdev); + dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; + } + + /* + * Enable the chain of external devices, starting at the one at the + * internal encoder's output. + */ + omapdss_device_enable(dssdev->next); + + /* Enable the panel if present. */ + if (dssdev->panel) { + drm_panel_prepare(dssdev->panel); + drm_panel_enable(dssdev->panel); + } } static int omap_encoder_atomic_check(struct drm_encoder *encoder, @@ -169,35 +236,17 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - enum omap_channel channel = omap_encoder->output->dispc_channel; - struct drm_device *dev = encoder->dev; - struct omap_drm_private *priv = dev->dev_private; - struct omap_dss_device *dssdev; - struct videomode vm = { 0 }; - int ret; - - drm_display_mode_to_videomode(&crtc_state->mode, &vm); - - ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm); - if (ret) - goto done; - - for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) { - if (!dssdev->ops->check_timings) - continue; - - ret = dssdev->ops->check_timings(dssdev, &vm); - if (ret) - goto done; + enum drm_mode_status status; + + status = omap_connector_mode_fixup(omap_encoder->output, + &crtc_state->mode, + &crtc_state->adjusted_mode); + if (status != MODE_OK) { + dev_err(encoder->dev->dev, "invalid timings: %d\n", status); + return -EINVAL; } - drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode); - -done: - if (ret) - dev_err(dev->dev, "invalid timings: %d\n", ret); - - return ret; + return 0; } static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { @@ -209,8 +258,7 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { /* initialize encoder */ struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *output, - struct omap_dss_device *display) + struct omap_dss_device *output) { struct drm_encoder *encoder = NULL; struct omap_encoder *omap_encoder; @@ -220,7 +268,6 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev, goto fail; omap_encoder->output = output; - omap_encoder->display = display; encoder = &omap_encoder->base; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h index a7b5dde63ecb..4aefb3142886 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.h +++ b/drivers/gpu/drm/omapdrm/omap_encoder.h @@ -25,7 +25,6 @@ struct drm_encoder; struct omap_dss_device; struct drm_encoder *omap_encoder_init(struct drm_device *dev, - struct omap_dss_device *output, - struct omap_dss_device *display); + struct omap_dss_device *output); #endif /* __OMAPDRM_ENCODER_H__ */ diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c index 72ce50581ef4..a79908dfa3c8 100644 --- a/drivers/gpu/drm/panel/panel-arm-versatile.c +++ b/drivers/gpu/drm/panel/panel-arm-versatile.c @@ -191,7 +191,7 @@ static const struct versatile_panel_type versatile_panels[] = { .vrefresh = 390, .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }, /* * Sanyo ALR252RGT 240x320 portrait display found on the @@ -215,7 +215,7 @@ static const struct versatile_panel_type versatile_panels[] = { .vrefresh = 116, .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, .ib2 = true, }, }; diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 6126735c2a78..a1c4cd2940fb 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -412,11 +412,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili) if (ili->conf->dclk_active_high) { reg = ILI9322_POL_DCLK; connector->display_info.bus_flags |= - DRM_BUS_FLAG_PIXDATA_POSEDGE; + DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE; } else { reg = 0; connector->display_info.bus_flags |= - DRM_BUS_FLAG_PIXDATA_NEGEDGE; + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; } if (ili->conf->de_active_high) { reg |= ILI9322_POL_DE; diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c index 2d99e28ff117..bdcc5d80823d 100644 --- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c @@ -328,7 +328,7 @@ static const struct seiko_panel_desc seiko_43wvf1g = { .height = 57, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }; static const struct of_device_id platform_of_match[] = { diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 9e8218f6a3f2..8fee7a8b29d9 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -914,7 +914,7 @@ static const struct panel_desc cdtech_s043wq26h_ct7 = { .width = 95, .height = 54, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode cdtech_s070wv95_ct16_mode = { @@ -1034,7 +1034,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct display_timing dlc_dlc0700yzg_1_timing = { @@ -1119,7 +1119,7 @@ static const struct panel_desc edt_et057090dhu = { .height = 86, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }; static const struct drm_display_mode edt_etm0700g0dh6_mode = { @@ -1145,7 +1145,7 @@ static const struct panel_desc edt_etm0700g0dh6 = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, }; static const struct panel_desc edt_etm0700g0bdh6 = { @@ -1157,7 +1157,7 @@ static const struct panel_desc edt_etm0700g0bdh6 = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X18, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = { @@ -1311,7 +1311,7 @@ static const struct panel_desc innolux_at043tn24 = { .height = 54, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode innolux_at070tn92_mode = { @@ -1818,7 +1818,7 @@ static const struct panel_desc nec_nl4827hc19_05b = { .height = 54, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode netron_dy_e231732_mode = { @@ -1867,8 +1867,8 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = { .height = 54, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE | - DRM_BUS_FLAG_SYNC_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | + DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE, }; static const struct display_timing nlt_nl192108ac18_02d_timing = { @@ -2029,7 +2029,33 @@ static const struct panel_desc ortustech_com43h4m85ulc = { .height = 93, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, +}; + +static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = { + .clock = 33000, + .hdisplay = 800, + .hsync_start = 800 + 210, + .hsync_end = 800 + 210 + 30, + .htotal = 800 + 210 + 30 + 16, + .vdisplay = 480, + .vsync_start = 480 + 22, + .vsync_end = 480 + 22 + 13, + .vtotal = 480 + 22 + 13 + 10, + .vrefresh = 60, + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, +}; + +static const struct panel_desc osddisplays_osd070t1718_19ts = { + .modes = &osddisplays_osd070t1718_19ts_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 152, + .height = 91, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode pda_91_00156_a0_mode = { @@ -2398,7 +2424,7 @@ static const struct panel_desc toshiba_lt089ac29000 = { .height = 116, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, - .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode tpk_f07a_0102_mode = { @@ -2421,7 +2447,7 @@ static const struct panel_desc tpk_f07a_0102 = { .width = 152, .height = 91, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }; static const struct drm_display_mode tpk_f10a_0102_mode = { @@ -2737,6 +2763,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "ortustech,com43h4m85ulc", .data = &ortustech_com43h4m85ulc, }, { + .compatible = "osddisplays,osd070t1718-19ts", + .data = &osddisplays_osd070t1718_19ts, + }, { .compatible = "pda,91-00156-a0", .data = &pda_91_00156_a0, }, { diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index 36b9e8d6b989..71591e5f5938 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -118,7 +118,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 480 + 10 + 1 + 35, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "640x480 RGB", @@ -135,7 +135,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 480 + 18 + 1 + 27, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "480x272 RGB", @@ -152,7 +152,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 272 + 2 + 1 + 12, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "480x640 RGB", @@ -169,7 +169,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 640 + 4 + 1 + 8, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, { .name = "400x240 RGB", @@ -186,7 +186,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = { .vtotal = 240 + 2 + 1 + 20, .vrefresh = 60, }, - .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE, + .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, }, }; diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 754f6b25f265..0c5d391f0a8f 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -188,7 +188,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe, tim2 |= TIM2_IOE; if (connector->display_info.bus_flags & - DRM_BUS_FLAG_PIXDATA_NEGEDGE) + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) tim2 |= TIM2_IPC; } diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index bb81e310eb6d..578d867a81d5 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -79,6 +79,10 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto free_dev; + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); + if (ret) + goto disable_pci; + ret = qxl_device_init(qdev, &qxl_driver, pdev); if (ret) goto disable_pci; @@ -94,7 +98,6 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto modeset_cleanup; - drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); drm_fbdev_generic_setup(&qdev->ddev, 32); return 0; diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 7c36e2777a15..1529849e217e 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -36,3 +36,7 @@ config DRM_RCAR_VSP depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m) help Enable support to expose the R-Car VSP Compositor as KMS planes. + +config DRM_RCAR_WRITEBACK + bool + default y if ARM64 diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 2a3b8d7972b5..6c2ed9c46467 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -4,7 +4,7 @@ rcar-du-drm-y := rcar_du_crtc.o \ rcar_du_encoder.o \ rcar_du_group.o \ rcar_du_kms.o \ - rcar_du_plane.o + rcar_du_plane.o \ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \ rcar_du_of_lvds_r8a7790.dtb.o \ @@ -13,6 +13,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \ rcar_du_of_lvds_r8a7795.dtb.o \ rcar_du_of_lvds_r8a7796.dtb.o rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o +rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 4cdea14d552f..2da46e3dc4ae 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -32,21 +32,21 @@ static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; return rcar_du_read(rcdu, rcrtc->mmio_offset + reg); } static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data); } static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; rcar_du_write(rcdu, rcrtc->mmio_offset + reg, rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr); @@ -54,7 +54,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr) static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; rcar_du_write(rcdu, rcrtc->mmio_offset + reg, rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set); @@ -62,7 +62,7 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set) void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set; rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr); @@ -157,10 +157,9 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc, } done: - dev_dbg(rcrtc->group->dev->dev, + dev_dbg(rcrtc->dev->dev, "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n", - dpll->output, dpll->fdpll, dpll->n, dpll->m, - best_diff); + dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff); } struct du_clk_params { @@ -212,7 +211,7 @@ static const struct soc_device_attribute rcar_du_r8a7795_es1[] = { static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) { const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; unsigned long mode_clock = mode->clock * 1000; u32 dsmr; u32 escr; @@ -277,7 +276,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_escr_divider(rcrtc->extclock, mode_clock, ESCR_DCLKSEL_DCLKIN, ¶ms); - dev_dbg(rcrtc->group->dev->dev, "mode clock %lu %s rate %lu\n", + dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n", mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext", params.rate); @@ -285,7 +284,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) escr = params.escr; } - dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr); + dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr); rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr); rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0); @@ -333,7 +332,7 @@ plane_format(struct rcar_du_plane *plane) static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc) { struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES]; - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; unsigned int num_planes = 0; unsigned int dptsr_planes; unsigned int hwplanes = 0; @@ -463,7 +462,7 @@ static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc) static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; if (wait_event_timeout(rcrtc->flip_wait, !rcar_du_crtc_page_flip_pending(rcrtc), @@ -493,7 +492,7 @@ static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0); /* Enable the VSP compositor. */ - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_enable(rcrtc); /* Turn vertical blanking interrupt reporting on. */ @@ -564,7 +563,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc) static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; struct drm_crtc *crtc = &rcrtc->crtc; u32 status; @@ -617,7 +616,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) drm_crtc_vblank_off(crtc); /* Disable the VSP compositor. */ - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_disable(rcrtc); /* @@ -627,7 +626,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc) * TODO: Find another way to stop the display for DUs that don't support * TVM sync. */ - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_TVM_SYNC)) + if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC)) rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH); @@ -648,8 +647,13 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc, rstate->outputs = 0; drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) { - struct rcar_du_encoder *renc = to_rcar_encoder(encoder); + struct rcar_du_encoder *renc; + /* Skip the writeback encoder. */ + if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) + continue; + + renc = to_rcar_encoder(encoder); rstate->outputs |= BIT(renc->output); } @@ -661,7 +665,7 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state); - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; rcar_du_crtc_get(rcrtc); @@ -689,7 +693,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state); - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; rcar_du_crtc_stop(rcrtc); rcar_du_crtc_put(rcrtc); @@ -735,7 +739,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc, */ rcar_du_crtc_get(rcrtc); - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_atomic_begin(rcrtc); } @@ -757,15 +761,16 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc, spin_unlock_irqrestore(&dev->event_lock, flags); } - if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) + if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE)) rcar_du_vsp_atomic_flush(rcrtc); } -enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc, - const struct drm_display_mode *mode) +static enum drm_mode_status +rcar_du_crtc_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; unsigned int vbp; @@ -797,7 +802,7 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = { static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc) { - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; const char **sources; unsigned int count; int i = -1; @@ -981,8 +986,8 @@ static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc, return 0; } -const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, - size_t *count) +static const char *const * +rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count) { struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); @@ -1079,7 +1084,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = { static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) { struct rcar_du_crtc *rcrtc = arg; - struct rcar_du_device *rcdu = rcrtc->group->dev; + struct rcar_du_device *rcdu = rcrtc->dev; irqreturn_t ret = IRQ_NONE; u32 status; @@ -1171,6 +1176,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, init_waitqueue_head(&rcrtc->vblank_wait); spin_lock_init(&rcrtc->vblank_lock); + rcrtc->dev = rcdu; rcrtc->group = rgrp; rcrtc->mmio_offset = mmio_offsets[hwindex]; rcrtc->index = hwindex; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h index bcb35b0b7612..3b7fc668996f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h @@ -15,6 +15,7 @@ #include <linux/wait.h> #include <drm/drm_crtc.h> +#include <drm/drm_writeback.h> #include <media/vsp1.h> @@ -24,10 +25,11 @@ struct rcar_du_vsp; /** * struct rcar_du_crtc - the CRTC, representing a DU superposition processor * @crtc: base DRM CRTC + * @dev: the DU device * @clock: the CRTC functional clock * @extclock: external pixel dot clock (optional) * @mmio_offset: offset of the CRTC registers in the DU MMIO block - * @index: CRTC software and hardware index + * @index: CRTC hardware index * @initialized: whether the CRTC has been initialized and clocks enabled * @dsysr: cached value of the DSYSR register * @vblank_enable: whether vblank events are enabled on this CRTC @@ -39,10 +41,12 @@ struct rcar_du_vsp; * @group: CRTC group this CRTC belongs to * @vsp: VSP feeding video to this CRTC * @vsp_pipe: index of the VSP pipeline feeding video to this CRTC + * @writeback: the writeback connector */ struct rcar_du_crtc { struct drm_crtc crtc; + struct rcar_du_device *dev; struct clk *clock; struct clk *extclock; unsigned int mmio_offset; @@ -65,9 +69,12 @@ struct rcar_du_crtc { const char *const *sources; unsigned int sources_count; + + struct drm_writeback_connector writeback; }; -#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) +#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) +#define wb_to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, writeback) /** * struct rcar_du_crtc_state - Driver-specific CRTC state @@ -97,8 +104,6 @@ enum rcar_du_output { int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, unsigned int hwindex); -void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc); -void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc); void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 8ee4e762f4e5..6c91753af7bc 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -28,13 +28,33 @@ static const struct drm_encoder_funcs encoder_funcs = { .destroy = drm_encoder_cleanup, }; +static unsigned int rcar_du_encoder_count_ports(struct device_node *node) +{ + struct device_node *ports; + struct device_node *port; + unsigned int num_ports = 0; + + ports = of_get_child_by_name(node, "ports"); + if (!ports) + ports = of_node_get(node); + + for_each_child_of_node(ports, port) { + if (of_node_name_eq(port, "port")) + num_ports++; + } + + of_node_put(ports); + + return num_ports; +} + int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_output output, struct device_node *enc_node) { struct rcar_du_encoder *renc; struct drm_encoder *encoder; - struct drm_bridge *bridge = NULL; + struct drm_bridge *bridge; int ret; renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); @@ -48,11 +68,33 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n", enc_node, output); - /* Locate the DRM bridge from the encoder DT node. */ - bridge = of_drm_find_bridge(enc_node); - if (!bridge) { - ret = -EPROBE_DEFER; - goto done; + /* + * Locate the DRM bridge from the DT node. For the DPAD outputs, if the + * DT node has a single port, assume that it describes a panel and + * create a panel bridge. + */ + if ((output == RCAR_DU_OUTPUT_DPAD0 || + output == RCAR_DU_OUTPUT_DPAD1) && + rcar_du_encoder_count_ports(enc_node) == 1) { + struct drm_panel *panel = of_drm_find_panel(enc_node); + + if (IS_ERR(panel)) { + ret = PTR_ERR(panel); + goto done; + } + + bridge = devm_drm_panel_bridge_add(rcdu->dev, panel, + DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(bridge)) { + ret = PTR_ERR(bridge); + goto done; + } + } else { + bridge = of_drm_find_bridge(enc_node); + if (!bridge) { + ret = -EPROBE_DEFER; + goto done; + } } ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 3b7d50a8fb9b..f8f7fff34dff 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -26,6 +26,7 @@ #include "rcar_du_kms.h" #include "rcar_du_regs.h" #include "rcar_du_vsp.h" +#include "rcar_du_writeback.h" /* ----------------------------------------------------------------------------- * Format helpers @@ -34,60 +35,70 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { { .fourcc = DRM_FORMAT_RGB565, + .v4l2 = V4L2_PIX_FMT_RGB565, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_ARGB1555, + .v4l2 = V4L2_PIX_FMT_ARGB555, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_XRGB1555, + .v4l2 = V4L2_PIX_FMT_XRGB555, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_XRGB8888, + .v4l2 = V4L2_PIX_FMT_XBGR32, .bpp = 32, .planes = 1, .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_RGB888, }, { .fourcc = DRM_FORMAT_ARGB8888, + .v4l2 = V4L2_PIX_FMT_ABGR32, .bpp = 32, .planes = 1, .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP, .edf = PnDDCR4_EDF_ARGB8888, }, { .fourcc = DRM_FORMAT_UYVY, + .v4l2 = V4L2_PIX_FMT_UYVY, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_YUYV, + .v4l2 = V4L2_PIX_FMT_YUYV, .bpp = 16, .planes = 1, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_NV12, + .v4l2 = V4L2_PIX_FMT_NV12M, .bpp = 12, .planes = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_NV21, + .v4l2 = V4L2_PIX_FMT_NV21M, .bpp = 12, .planes = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, .edf = PnDDCR4_EDF_NONE, }, { .fourcc = DRM_FORMAT_NV16, + .v4l2 = V4L2_PIX_FMT_NV16M, .bpp = 16, .planes = 2, .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC, @@ -99,62 +110,77 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = { */ { .fourcc = DRM_FORMAT_RGB332, + .v4l2 = V4L2_PIX_FMT_RGB332, .bpp = 8, .planes = 1, }, { .fourcc = DRM_FORMAT_ARGB4444, + .v4l2 = V4L2_PIX_FMT_ARGB444, .bpp = 16, .planes = 1, }, { .fourcc = DRM_FORMAT_XRGB4444, + .v4l2 = V4L2_PIX_FMT_XRGB444, .bpp = 16, .planes = 1, }, { .fourcc = DRM_FORMAT_BGR888, + .v4l2 = V4L2_PIX_FMT_RGB24, .bpp = 24, .planes = 1, }, { .fourcc = DRM_FORMAT_RGB888, + .v4l2 = V4L2_PIX_FMT_BGR24, .bpp = 24, .planes = 1, }, { .fourcc = DRM_FORMAT_BGRA8888, + .v4l2 = V4L2_PIX_FMT_ARGB32, .bpp = 32, .planes = 1, }, { .fourcc = DRM_FORMAT_BGRX8888, + .v4l2 = V4L2_PIX_FMT_XRGB32, .bpp = 32, .planes = 1, }, { .fourcc = DRM_FORMAT_YVYU, + .v4l2 = V4L2_PIX_FMT_YVYU, .bpp = 16, .planes = 1, }, { .fourcc = DRM_FORMAT_NV61, + .v4l2 = V4L2_PIX_FMT_NV61M, .bpp = 16, .planes = 2, }, { .fourcc = DRM_FORMAT_YUV420, + .v4l2 = V4L2_PIX_FMT_YUV420M, .bpp = 12, .planes = 3, }, { .fourcc = DRM_FORMAT_YVU420, + .v4l2 = V4L2_PIX_FMT_YVU420M, .bpp = 12, .planes = 3, }, { .fourcc = DRM_FORMAT_YUV422, + .v4l2 = V4L2_PIX_FMT_YUV422M, .bpp = 16, .planes = 3, }, { .fourcc = DRM_FORMAT_YVU422, + .v4l2 = V4L2_PIX_FMT_YVU422M, .bpp = 16, .planes = 3, }, { .fourcc = DRM_FORMAT_YUV444, + .v4l2 = V4L2_PIX_FMT_YUV444M, .bpp = 24, .planes = 3, }, { .fourcc = DRM_FORMAT_YVU444, + .v4l2 = V4L2_PIX_FMT_YVU444M, .bpp = 24, .planes = 3, }, @@ -639,6 +665,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) encoder->possible_clones = (1 << num_encoders) - 1; } + /* Create the writeback connectors. */ + if (rcdu->info->gen >= 3) { + for (i = 0; i < rcdu->num_crtcs; ++i) { + struct rcar_du_crtc *rcrtc = &rcdu->crtcs[i]; + + ret = rcar_du_writeback_init(rcdu, rcrtc); + if (ret < 0) + return ret; + } + } + /* * Initialize the default DPAD0 source to the index of the first DU * channel that can be connected to DPAD0. The exact value doesn't diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h index e171527abdaa..0346504d8c59 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h @@ -19,6 +19,7 @@ struct rcar_du_device; struct rcar_du_format_info { u32 fourcc; + u32 v4l2; unsigned int bpp; unsigned int planes; unsigned int pnmr; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 0878accbd134..5e4faf258c31 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -10,6 +10,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_cma_helper.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> @@ -26,16 +27,19 @@ #include "rcar_du_drv.h" #include "rcar_du_kms.h" #include "rcar_du_vsp.h" +#include "rcar_du_writeback.h" -static void rcar_du_vsp_complete(void *private, bool completed, u32 crc) +static void rcar_du_vsp_complete(void *private, unsigned int status, u32 crc) { struct rcar_du_crtc *crtc = private; if (crtc->vblank_enable) drm_crtc_handle_vblank(&crtc->crtc); - if (completed) + if (status & VSP1_DU_STATUS_COMPLETE) rcar_du_crtc_finish_page_flip(crtc); + if (status & VSP1_DU_STATUS_WRITEBACK) + rcar_du_writeback_complete(crtc); drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc); } @@ -43,7 +47,7 @@ static void rcar_du_vsp_complete(void *private, bool completed, u32 crc) void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode; - struct rcar_du_device *rcdu = crtc->group->dev; + struct rcar_du_device *rcdu = crtc->dev; struct vsp1_du_lif_config cfg = { .width = mode->hdisplay, .height = mode->vdisplay, @@ -107,11 +111,12 @@ void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) state = to_rcar_crtc_state(crtc->crtc.state); cfg.crc = state->crc; + rcar_du_writeback_setup(crtc, &cfg.writeback); + vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg); } -/* Keep the two tables in sync. */ -static const u32 formats_kms[] = { +static const u32 rcar_du_vsp_formats[] = { DRM_FORMAT_RGB332, DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB4444, @@ -139,40 +144,13 @@ static const u32 formats_kms[] = { DRM_FORMAT_YVU444, }; -static const u32 formats_v4l2[] = { - V4L2_PIX_FMT_RGB332, - V4L2_PIX_FMT_ARGB444, - V4L2_PIX_FMT_XRGB444, - V4L2_PIX_FMT_ARGB555, - V4L2_PIX_FMT_XRGB555, - V4L2_PIX_FMT_RGB565, - V4L2_PIX_FMT_RGB24, - V4L2_PIX_FMT_BGR24, - V4L2_PIX_FMT_ARGB32, - V4L2_PIX_FMT_XRGB32, - V4L2_PIX_FMT_ABGR32, - V4L2_PIX_FMT_XBGR32, - V4L2_PIX_FMT_UYVY, - V4L2_PIX_FMT_YUYV, - V4L2_PIX_FMT_YVYU, - V4L2_PIX_FMT_NV12M, - V4L2_PIX_FMT_NV21M, - V4L2_PIX_FMT_NV16M, - V4L2_PIX_FMT_NV61M, - V4L2_PIX_FMT_YUV420M, - V4L2_PIX_FMT_YVU420M, - V4L2_PIX_FMT_YUV422M, - V4L2_PIX_FMT_YVU422M, - V4L2_PIX_FMT_YUV444M, - V4L2_PIX_FMT_YVU444M, -}; - static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) { struct rcar_du_vsp_plane_state *state = to_rcar_vsp_plane_state(plane->plane.state); struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc); struct drm_framebuffer *fb = plane->plane.state->fb; + const struct rcar_du_format_info *format; struct vsp1_du_atomic_config cfg = { .pixelformat = 0, .pitch = fb->pitches[0], @@ -195,37 +173,23 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane) cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl) + fb->offsets[i]; - for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) { - if (formats_kms[i] == state->format->fourcc) { - cfg.pixelformat = formats_v4l2[i]; - break; - } - } + format = rcar_du_format_info(state->format->fourcc); + cfg.pixelformat = format->v4l2; vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe, plane->index, &cfg); } -static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state) +int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) { - struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); - struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp; struct rcar_du_device *rcdu = vsp->dev; unsigned int i; int ret; - /* - * There's no need to prepare (and unprepare) the framebuffer when the - * plane is not visible, as it will not be displayed. - */ - if (!state->visible) - return 0; - - for (i = 0; i < rstate->format->planes; ++i) { - struct drm_gem_cma_object *gem = - drm_fb_cma_get_gem_obj(state->fb, i); - struct sg_table *sgt = &rstate->sg_tables[i]; + for (i = 0; i < fb->format->num_planes; ++i) { + struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i); + struct sg_table *sgt = &sg_tables[i]; ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr, gem->base.size); @@ -240,15 +204,11 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, } } - ret = drm_gem_fb_prepare_fb(plane, state); - if (ret) - goto fail; - return 0; fail: while (i--) { - struct sg_table *sgt = &rstate->sg_tables[i]; + struct sg_table *sgt = &sg_tables[i]; vsp1_du_unmap_sg(vsp->vsp, sgt); sg_free_table(sgt); @@ -257,24 +217,52 @@ fail: return ret; } -static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *state) +static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) { struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp; - unsigned int i; + int ret; + /* + * There's no need to prepare (and unprepare) the framebuffer when the + * plane is not visible, as it will not be displayed. + */ if (!state->visible) - return; + return 0; + + ret = rcar_du_vsp_map_fb(vsp, state->fb, rstate->sg_tables); + if (ret < 0) + return ret; + + return drm_gem_fb_prepare_fb(plane, state); +} + +void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) +{ + unsigned int i; - for (i = 0; i < rstate->format->planes; ++i) { - struct sg_table *sgt = &rstate->sg_tables[i]; + for (i = 0; i < fb->format->num_planes; ++i) { + struct sg_table *sgt = &sg_tables[i]; vsp1_du_unmap_sg(vsp->vsp, sgt); sg_free_table(sgt); } } +static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state); + struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp; + + if (!state->visible) + return; + + rcar_du_vsp_unmap_fb(vsp, state->fb, rstate->sg_tables); +} + static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { @@ -395,8 +383,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, &rcar_du_vsp_plane_funcs, - formats_kms, - ARRAY_SIZE(formats_kms), + rcar_du_vsp_formats, + ARRAY_SIZE(rcar_du_vsp_formats), NULL, type, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h index db232037f24a..9b4724159378 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h @@ -12,8 +12,10 @@ #include <drm/drm_plane.h> +struct drm_framebuffer; struct rcar_du_format_info; struct rcar_du_vsp; +struct sg_table; struct rcar_du_vsp_plane { struct drm_plane plane; @@ -60,6 +62,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc); void rcar_du_vsp_disable(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc); void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc); +int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]); +void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb, + struct sg_table sg_tables[3]); #else static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, @@ -71,6 +77,17 @@ static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { }; static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { }; +static inline int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, + struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) +{ + return -ENXIO; +} +static inline void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, + struct drm_framebuffer *fb, + struct sg_table sg_tables[3]) +{ +} #endif #endif /* __RCAR_DU_VSP_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c new file mode 100644 index 000000000000..989a0be94131 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rcar_du_writeback.c -- R-Car Display Unit Writeback Support + * + * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_device.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_writeback.h> + +#include "rcar_du_crtc.h" +#include "rcar_du_drv.h" +#include "rcar_du_kms.h" + +/** + * struct rcar_du_wb_conn_state - Driver-specific writeback connector state + * @state: base DRM connector state + * @format: format of the writeback framebuffer + */ +struct rcar_du_wb_conn_state { + struct drm_connector_state state; + const struct rcar_du_format_info *format; +}; + +#define to_rcar_wb_conn_state(s) \ + container_of(s, struct rcar_du_wb_conn_state, state) + +/** + * struct rcar_du_wb_job - Driver-private data for writeback jobs + * @sg_tables: scatter-gather tables for the framebuffer memory + */ +struct rcar_du_wb_job { + struct sg_table sg_tables[3]; +}; + +static int rcar_du_wb_conn_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + return drm_add_modes_noedid(connector, dev->mode_config.max_width, + dev->mode_config.max_height); +} + +static int rcar_du_wb_prepare_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector); + struct rcar_du_wb_job *rjob; + int ret; + + if (!job->fb) + return 0; + + rjob = kzalloc(sizeof(*rjob), GFP_KERNEL); + if (!rjob) + return -ENOMEM; + + /* Map the framebuffer to the VSP. */ + ret = rcar_du_vsp_map_fb(rcrtc->vsp, job->fb, rjob->sg_tables); + if (ret < 0) { + kfree(rjob); + return ret; + } + + job->priv = rjob; + return 0; +} + +static void rcar_du_wb_cleanup_job(struct drm_writeback_connector *connector, + struct drm_writeback_job *job) +{ + struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector); + struct rcar_du_wb_job *rjob = job->priv; + + if (!job->fb) + return; + + rcar_du_vsp_unmap_fb(rcrtc->vsp, job->fb, rjob->sg_tables); + kfree(rjob); +} + +static const struct drm_connector_helper_funcs rcar_du_wb_conn_helper_funcs = { + .get_modes = rcar_du_wb_conn_get_modes, + .prepare_writeback_job = rcar_du_wb_prepare_job, + .cleanup_writeback_job = rcar_du_wb_cleanup_job, +}; + +static struct drm_connector_state * +rcar_du_wb_conn_duplicate_state(struct drm_connector *connector) +{ + struct rcar_du_wb_conn_state *copy; + + if (WARN_ON(!connector->state)) + return NULL; + + copy = kzalloc(sizeof(*copy), GFP_KERNEL); + if (!copy) + return NULL; + + __drm_atomic_helper_connector_duplicate_state(connector, ©->state); + + return ©->state; +} + +static void rcar_du_wb_conn_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + __drm_atomic_helper_connector_destroy_state(state); + kfree(to_rcar_wb_conn_state(state)); +} + +static void rcar_du_wb_conn_reset(struct drm_connector *connector) +{ + struct rcar_du_wb_conn_state *state; + + if (connector->state) { + rcar_du_wb_conn_destroy_state(connector, connector->state); + connector->state = NULL; + } + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (state == NULL) + return; + + __drm_atomic_helper_connector_reset(connector, &state->state); +} + +static const struct drm_connector_funcs rcar_du_wb_conn_funcs = { + .reset = rcar_du_wb_conn_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = rcar_du_wb_conn_duplicate_state, + .atomic_destroy_state = rcar_du_wb_conn_destroy_state, +}; + +static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct rcar_du_wb_conn_state *wb_state = + to_rcar_wb_conn_state(conn_state); + const struct drm_display_mode *mode = &crtc_state->mode; + struct drm_device *dev = encoder->dev; + struct drm_framebuffer *fb; + + if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + return 0; + + fb = conn_state->writeback_job->fb; + + /* + * Verify that the framebuffer format is supported and that its size + * matches the current mode. + */ + if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) { + dev_dbg(dev->dev, "%s: invalid framebuffer size %ux%u\n", + __func__, fb->width, fb->height); + return -EINVAL; + } + + wb_state->format = rcar_du_format_info(fb->format->format); + if (wb_state->format == NULL) { + dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__, + fb->format->format); + return -EINVAL; + } + + return 0; +} + +static const struct drm_encoder_helper_funcs rcar_du_wb_enc_helper_funcs = { + .atomic_check = rcar_du_wb_enc_atomic_check, +}; + +/* + * Only RGB formats are currently supported as the VSP outputs RGB to the DU + * and can't convert to YUV separately for writeback. + */ +static const u32 writeback_formats[] = { + DRM_FORMAT_RGB332, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, +}; + +int rcar_du_writeback_init(struct rcar_du_device *rcdu, + struct rcar_du_crtc *rcrtc) +{ + struct drm_writeback_connector *wb_conn = &rcrtc->writeback; + + wb_conn->encoder.possible_crtcs = 1 << drm_crtc_index(&rcrtc->crtc); + drm_connector_helper_add(&wb_conn->base, + &rcar_du_wb_conn_helper_funcs); + + return drm_writeback_connector_init(rcdu->ddev, wb_conn, + &rcar_du_wb_conn_funcs, + &rcar_du_wb_enc_helper_funcs, + writeback_formats, + ARRAY_SIZE(writeback_formats)); +} + +void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc, + struct vsp1_du_writeback_config *cfg) +{ + struct rcar_du_wb_conn_state *wb_state; + struct drm_connector_state *state; + struct rcar_du_wb_job *rjob; + struct drm_framebuffer *fb; + unsigned int i; + + state = rcrtc->writeback.base.state; + if (!state || !state->writeback_job || !state->writeback_job->fb) + return; + + fb = state->writeback_job->fb; + rjob = state->writeback_job->priv; + wb_state = to_rcar_wb_conn_state(state); + + cfg->pixelformat = wb_state->format->v4l2; + cfg->pitch = fb->pitches[0]; + + for (i = 0; i < wb_state->format->planes; ++i) + cfg->mem[i] = sg_dma_address(rjob->sg_tables[i].sgl) + + fb->offsets[i]; + + drm_writeback_queue_job(&rcrtc->writeback, state); +} + +void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc) +{ + drm_writeback_signal_completion(&rcrtc->writeback, 0); +} diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.h b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h new file mode 100644 index 000000000000..fa87ebf8d21f --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * rcar_du_writeback.h -- R-Car Display Unit Writeback Support + * + * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#ifndef __RCAR_DU_WRITEBACK_H__ +#define __RCAR_DU_WRITEBACK_H__ + +#include <drm/drm_plane.h> + +struct rcar_du_crtc; +struct rcar_du_device; +struct vsp1_du_atomic_pipe_config; + +#ifdef CONFIG_DRM_RCAR_WRITEBACK +int rcar_du_writeback_init(struct rcar_du_device *rcdu, + struct rcar_du_crtc *rcrtc); +void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc, + struct vsp1_du_writeback_config *cfg); +void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc); +#else +static inline int rcar_du_writeback_init(struct rcar_du_device *rcdu, + struct rcar_du_crtc *rcrtc) +{ + return -ENXIO; +} +static inline void +rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc, + struct vsp1_du_writeback_config *cfg) +{ +} +static inline void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc) +{ +} +#endif + +#endif /* __RCAR_DU_WRITEBACK_H__ */ diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 7ef97b2a6eda..620b51aab291 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -283,7 +283,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk, * divider. */ fout = fvco / (1 << e) / div7; - div = DIV_ROUND_CLOSEST(fout, target); + div = max(1UL, DIV_ROUND_CLOSEST(fout, target)); diff = abs(fout / div - target); if (diff < pll->diff) { @@ -485,9 +485,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge) } if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) { - /* Turn on the LVDS PHY. */ + /* + * Turn on the LVDS PHY. On D3, the LVEN and LVRES bit must be + * set at the same time, so don't write the register yet. + */ lvdcr0 |= LVDCR0_LVEN; - rcar_lvds_write(lvds, LVDCR0, lvdcr0); + if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_PWD)) + rcar_lvds_write(lvds, LVDCR0, lvdcr0); } if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) { @@ -531,11 +535,16 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); + int min_freq; + /* * The internal LVDS encoder has a restricted clock frequency operating - * range (31MHz to 148.5MHz). Clamp the clock accordingly. + * range, from 5MHz to 148.5MHz on D3 and E3, and from 31MHz to + * 148.5MHz on all other platforms. Clamp the clock accordingly. */ - adjusted_mode->clock = clamp(adjusted_mode->clock, 31000, 148500); + min_freq = lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL ? 5000 : 31000; + adjusted_mode->clock = clamp(adjusted_mode->clock, min_freq, 148500); return true; } diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index e75f77ff8e0f..fa92e992a282 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -561,10 +561,10 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, * Following code is a way to avoid quirks all around TCON * and DOTCLOCK drivers. */ - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) + if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) clk_set_phase(tcon->dclk, 240); - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) clk_set_phase(tcon->dclk, 0); regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index e8723a2412a6..d775d10dbe6a 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -149,7 +149,8 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe, /* Vsync IRQ at start of Vsync at first */ ctrl1 |= TVE200_VSTSTYPE_VSYNC; - if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) + if (connector->display_info.bus_flags & + DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) ctrl1 |= TVE200_CTRL_TVCLKP; if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */ diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index d5a23295dd80..bb7b58407039 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); out: - drm_gem_object_put(&gobj->base); + drm_gem_object_put_unlocked(&gobj->base); unlock: mutex_unlock(&udl->gem_lock); return ret; diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index afb1c4ec4f18..cc2888dd7171 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -324,7 +324,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, TXP_WRITE(TXP_DST_CTRL, ctrl); - drm_writeback_queue_job(&txp->connector, conn_state->writeback_job); + drm_writeback_queue_job(&txp->connector, conn_state); } static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b913a56f3426..2a9112515f46 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info) 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }; - struct drm_display_mode *old_mode; struct drm_display_mode *mode; int ret; - old_mode = par->set_mode; mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); if (!mode) { DRM_ERROR("Could not create new fb mode.\n"); @@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info) mode->vdisplay = var->yres; vmw_guess_mode_timing(mode); - if (old_mode && drm_mode_equal(old_mode, mode)) { - drm_mode_destroy(vmw_priv->dev, mode); - mode = old_mode; - old_mode = NULL; - } else if (!vmw_kms_validate_mode_vram(vmw_priv, + if (!vmw_kms_validate_mode_vram(vmw_priv, mode->hdisplay * DIV_ROUND_UP(var->bits_per_pixel, 8), mode->vdisplay)) { @@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info) schedule_delayed_work(&par->local_work, 0); out_unlock: - if (old_mode) - drm_mode_destroy(vmw_priv->dev, old_mode); + if (par->set_mode) + drm_mode_destroy(vmw_priv->dev, par->set_mode); par->set_mode = mode; mutex_unlock(&par->bo_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index b93c558dd86e..7da752ca1c34 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) - return id; + return (id != -ENOMEM ? 0 : id); spin_lock(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 31786b200afc..a3357ff7540d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -311,7 +311,13 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) { - return sg_page_iter_dma_address(&viter->iter); + /* + * FIXME: This driver wrongly mixes DMA and CPU SG list iteration and + * needs revision. See + * https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/ + */ + return sg_page_iter_dma_address( + container_of(&viter->iter, struct sg_dma_page_iter, base)); } |