summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c407
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c450
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c284
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c318
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c141
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c281
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c147
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c193
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c116
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c129
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c312
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c259
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c72
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c134
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c190
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c1154
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c86
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c270
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c30
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h81
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c146
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c41
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c35
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c12
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h7
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h12
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h16
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h4
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h79
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h14
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c212
-rw-r--r--drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h128
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h40
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h11
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v12_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c13
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c407
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c384
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c355
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c60
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h5
269 files changed, 8594 insertions, 3191 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 741b68874e53..ee85e8aba636 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -71,7 +71,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o
+ nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
# add DF block
amdgpu-y += \
@@ -88,7 +88,7 @@ amdgpu-y += \
# add UMC block
amdgpu-y += \
- umc_v6_1.o umc_v6_0.o umc_v8_7.o
+ umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o
# add IH block
amdgpu-y += \
@@ -179,9 +179,14 @@ amdgpu-y += \
smuio_v11_0_6.o \
smuio_v13_0.o
+# add reset block
+amdgpu-y += \
+ amdgpu_reset.o
+
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
+
ifneq ($(CONFIG_HSA_AMD),)
AMDKFD_PATH := ../amdkfd
include $(FULL_AMD_PATH)/amdkfd/Makefile
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
new file mode 100644
index 000000000000..65b1dca4b02e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "aldebaran.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static struct amdgpu_reset_handler *
+aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ dev_dbg(adev->dev, "Getting reset handler for method %d\n",
+ reset_context->method);
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ }
+
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2) {
+ reset_context->method = AMD_RESET_METHOD_MODE2;
+ return handler;
+ }
+ }
+ }
+
+ dev_dbg(adev->dev, "Reset handler not found!\n");
+
+ return NULL;
+}
+
+static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+ if (r) {
+ dev_err(adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ return r;
+}
+
+static int
+aldebaran_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_dbg(adev->dev, "Aldebaran prepare hw context\n");
+ /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
+ if (!amdgpu_sriov_vf(adev))
+ r = aldebaran_mode2_suspend_ip(adev);
+
+ return r;
+}
+
+static void aldebaran_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+
+static int aldebaran_mode2_reset(struct amdgpu_device *adev)
+{
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
+ return adev->asic_reset_res;
+}
+
+static int
+aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int r = 0;
+
+ dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+ if (reset_context->hive == NULL) {
+ /* Wrong context, return error */
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
+ }
+ /*
+ * Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
+ * them together so that they can be completed asynchronously on multiple nodes
+ */
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_unbound_wq,
+ &tmp_adev->reset_cntl->reset_work))
+ r = -EALREADY;
+ } else
+ r = aldebaran_mode2_reset(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev,
+ &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->reset_cntl->reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+ }
+
+ return r;
+}
+
+static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ struct amdgpu_firmware_info *ucode;
+ struct amdgpu_ip_block *cmn_block;
+ int ucode_count = 0;
+ int i, r;
+
+ dev_dbg(adev->dev, "Reloading ucodes after reset\n");
+ for (i = 0; i < adev->firmware.max_ucodes; i++) {
+ ucode = &adev->firmware.ucode[i];
+ if (!ucode->fw)
+ continue;
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ case AMDGPU_UCODE_ID_SDMA1:
+ case AMDGPU_UCODE_ID_SDMA2:
+ case AMDGPU_UCODE_ID_SDMA3:
+ case AMDGPU_UCODE_ID_SDMA4:
+ case AMDGPU_UCODE_ID_SDMA5:
+ case AMDGPU_UCODE_ID_SDMA6:
+ case AMDGPU_UCODE_ID_SDMA7:
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ case AMDGPU_UCODE_ID_RLC_G:
+ ucode_list[ucode_count++] = ucode;
+ break;
+ default:
+ break;
+ };
+ }
+
+ /* Reinit NBIF block */
+ cmn_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_COMMON);
+ if (unlikely(!cmn_block)) {
+ dev_err(adev->dev, "Failed to get BIF handle\n");
+ return -EINVAL;
+ }
+ r = cmn_block->version->funcs->resume(adev);
+ if (r)
+ return r;
+
+ /* Reinit GFXHUB */
+ adev->gfxhub.funcs->init(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r) {
+ dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+ return r;
+ }
+
+ /* Reload GFX firmware */
+ r = psp_load_fw_list(&adev->psp, ucode_list, ucode_count);
+ if (r) {
+ dev_err(adev->dev, "GFX ucode load failed after reset\n");
+ return r;
+ }
+
+ /* Resume RLC, FW needs RLC alive to complete reset process */
+ adev->gfx.rlc.funcs->resume(adev);
+
+ /* Wait for FW reset event complete */
+ r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed to get response from firmware after reset\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_COMMON))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ (void *)adev);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r;
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (reset_context->hive == NULL) {
+ /* Wrong context, return error */
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+ gmc.xgmi.head) {
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ r = aldebaran_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ /*
+ * Add this ASIC as tracked as reset was already
+ * complete successfully.
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS */
+ amdgpu_ras_resume(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(reset_context->hive,
+ tmp_adev);
+
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ tmp_adev->asic_reset_res = r;
+ goto end;
+ }
+ }
+ }
+
+end:
+ return r;
+}
+
+static struct amdgpu_reset_handler aldebaran_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = aldebaran_mode2_prepare_hwcontext,
+ .perform_reset = aldebaran_mode2_perform_reset,
+ .restore_hwcontext = aldebaran_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = aldebaran_mode2_reset,
+};
+
+int aldebaran_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = aldebaran_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = aldebaran_get_reset_handler;
+
+ INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ amdgpu_reset_add_handler(reset_ctl, &aldebaran_mode2_handler);
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int aldebaran_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.h b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
new file mode 100644
index 000000000000..a07db5454d49
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ALDEBARAN_H__
+#define __ALDEBARAN_H__
+
+#include "amdgpu.h"
+
+int aldebaran_reset_init(struct amdgpu_device *adev);
+int aldebaran_reset_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a037c223c251..dc3a69296321 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -107,7 +107,6 @@
#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
-#include "amdgpu_hdp.h"
#define MAX_GPU_INSTANCE 16
@@ -271,6 +270,8 @@ struct amdgpu_bo_va_mapping;
struct amdgpu_atif;
struct kfd_vm_fault_info;
struct amdgpu_hive_info;
+struct amdgpu_reset_context;
+struct amdgpu_reset_control;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
@@ -589,6 +590,7 @@ struct amdgpu_allowed_register_entry {
};
enum amd_reset_method {
+ AMD_RESET_METHOD_NONE = -1,
AMD_RESET_METHOD_LEGACY = 0,
AMD_RESET_METHOD_MODE0,
AMD_RESET_METHOD_MODE1,
@@ -920,6 +922,7 @@ struct amdgpu_device {
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
struct amdgpu_irq_src dmub_trace_irq;
+ struct amdgpu_irq_src dmub_outbox_irq;
/* rings */
u64 fence_context;
@@ -1030,13 +1033,9 @@ struct amdgpu_device {
/* s3/s4 mask */
bool in_suspend;
- bool in_hibernate;
-
- /*
- * The combination flag in_poweroff_reboot_com used to identify the poweroff
- * and reboot opt in the s0i3 system-wide suspend.
- */
- bool in_poweroff_reboot_com;
+ bool in_s3;
+ bool in_s4;
+ bool in_s0ix;
atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;
@@ -1078,6 +1077,8 @@ struct amdgpu_device {
bool in_pci_err_recovery;
struct pci_saved_state *pci_state;
+
+ struct amdgpu_reset_control *reset_cntl;
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1129,13 +1130,10 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- bool *need_full_reset_arg);
+ struct amdgpu_reset_context *reset_context);
-int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
- struct list_head *device_list_handle,
- bool *need_full_reset_arg,
- bool skip_hw_reset);
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context);
int emu_soc_asic_init(struct amdgpu_device *adev);
@@ -1275,8 +1273,9 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
+bool amdgpu_device_supports_atpx(struct drm_device *dev);
+bool amdgpu_device_supports_px(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
@@ -1390,6 +1389,13 @@ void amdgpu_pci_resume(struct pci_dev *pdev);
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
+
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+ enum amd_powergating_state state);
+
#include "amdgpu_object.h"
static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1c6be53313a8..5f6696a3c778 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -246,6 +246,7 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (cp_mqd_gfx9)
bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
@@ -317,6 +318,7 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo_user *ubo;
struct amdgpu_bo_param bp;
int r;
@@ -327,14 +329,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
bp.type = ttm_bo_type_device;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
- r = amdgpu_bo_create(adev, &bp, &bo);
+ r = amdgpu_bo_create_user(adev, &bp, &ubo);
if (r) {
dev_err(adev->dev,
"failed to allocate gws BO for amdkfd (%d)\n", r);
return r;
}
+ bo = &ubo->bo;
*mem_obj = bo;
return 0;
}
@@ -495,8 +499,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
*dma_buf_kgd = (struct kgd_dev *)adev;
if (bo_size)
*bo_size = amdgpu_bo_size(bo);
- if (metadata_size)
- *metadata_size = bo->metadata_size;
if (metadata_buffer)
r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
metadata_size, &metadata_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 14f68c028126..5ffb07b02810 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -234,14 +234,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
})
/* GPUVM API */
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
- void **vm, void **process_info,
- struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
struct dma_fence **ef);
-void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e93850f2f3b1..7d4118c8128a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -445,22 +445,19 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
mapping_flags |= AMDGPU_VM_MTYPE_UC;
} else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev) {
- mapping_flags |= AMDGPU_VM_MTYPE_RW;
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
if (adev->gmc.xgmi.connected_to_cpu)
snoop = true;
} else {
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
if (amdgpu_xgmi_same_hive(adev, bo_adev))
snoop = true;
}
} else {
snoop = true;
- if (adev->gmc.xgmi.connected_to_cpu)
- /* system memory uses NC on A+A */
- mapping_flags |= AMDGPU_VM_MTYPE_NC;
- else
- mapping_flags |= coherent ?
- AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
default:
@@ -1037,41 +1034,6 @@ create_evict_fence_fail:
return ret;
}
-int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
- void **vm, void **process_info,
- struct dma_fence **ef)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *new_vm;
- int ret;
-
- new_vm = kzalloc(sizeof(*new_vm), GFP_KERNEL);
- if (!new_vm)
- return -ENOMEM;
-
- /* Initialize AMDGPU part of the VM */
- ret = amdgpu_vm_init(adev, new_vm, AMDGPU_VM_CONTEXT_COMPUTE, pasid);
- if (ret) {
- pr_err("Failed init vm ret %d\n", ret);
- goto amdgpu_vm_init_fail;
- }
-
- /* Initialize KFD part of the VM and process info */
- ret = init_kfd_vm(new_vm, process_info, ef);
- if (ret)
- goto init_kfd_vm_fail;
-
- *vm = (void *) new_vm;
-
- return 0;
-
-init_kfd_vm_fail:
- amdgpu_vm_fini(adev, new_vm);
-amdgpu_vm_init_fail:
- kfree(new_vm);
- return ret;
-}
-
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
struct file *filp, u32 pasid,
void **vm, void **process_info,
@@ -1138,21 +1100,6 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
}
}
-void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
-
- if (WARN_ON(!kgd || !vm))
- return;
-
- pr_debug("Destroying process vm %p\n", vm);
-
- /* Release the VM context */
- amdgpu_vm_fini(adev, avm);
- kfree(vm);
-}
-
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index e05648a8a145..494b2e1717d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1232,157 +1232,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
- u16 *leakage_id)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return -EINVAL;
-
- switch (crev) {
- case 3:
- case 4:
- args.v3.ucVoltageType = 0;
- args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
- args.v3.usVoltageLevel = 0;
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id)
-{
- int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
- u8 frev, crev;
- u16 data_offset, size;
- int i, j;
- ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
- u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
-
- *vddc = 0;
- *vddci = 0;
-
- if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset))
- return -EINVAL;
-
- profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
- (adev->mode_info.atom_context->bios + data_offset);
-
- switch (frev) {
- case 1:
- return -EINVAL;
- case 2:
- switch (crev) {
- case 1:
- if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
- return -EINVAL;
- leakage_bin = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usLeakageBinArrayOffset));
- vddc_id_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
- vddc_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
- vddci_id_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
- vddci_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
-
- if (profile->ucElbVDDC_Num > 0) {
- for (i = 0; i < profile->ucElbVDDC_Num; i++) {
- if (vddc_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- if (profile->ucElbVDDCI_Num > 0) {
- for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
- if (vddci_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-union get_voltage_info {
- struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
- struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
-};
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
- u16 virtual_voltage_id,
- u16 *voltage)
-{
- int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
- u32 entry_id;
- u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
- union get_voltage_info args;
-
- for (entry_id = 0; entry_id < count; entry_id++) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
- virtual_voltage_id)
- break;
- }
-
- if (entry_id >= count)
- return -EINVAL;
-
- args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
- args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
- args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
- args.in.ulSCLKFreq =
- cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
-
- return 0;
-}
-
union voltage_object_info {
struct _ATOM_VOLTAGE_OBJECT_INFO v1;
struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
@@ -1913,7 +1762,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
- return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+ return sysfs_emit(buf, "%s\n", ctx->vbios_version);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 1321ec09c734..8cc0222dba19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -168,18 +168,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
u32 eng_clock, u32 mem_clock);
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
- u16 *leakage_id);
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id);
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
- u16 virtual_voltage_id,
- u16 *voltage);
-
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
u8 voltage_type, u8 voltage_mode);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index d9b35df33806..313517f7cf10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -85,6 +85,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
bp.flags = 0;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, &bp, &sobj);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0f82c5d21237..7d3b54615147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,6 +65,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
+#include "amdgpu_reset.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
@@ -137,7 +138,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
+ return sysfs_emit(buf, "%llu\n", cnt);
}
static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
@@ -161,7 +162,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+ return sysfs_emit(buf, "%s\n", adev->product_name);
}
static DEVICE_ATTR(product_name, S_IRUGO,
@@ -183,7 +184,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+ return sysfs_emit(buf, "%s\n", adev->product_number);
}
static DEVICE_ATTR(product_number, S_IRUGO,
@@ -205,25 +206,25 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+ return sysfs_emit(buf, "%s\n", adev->serial);
}
static DEVICE_ATTR(serial_number, S_IRUGO,
amdgpu_device_get_serial_number, NULL);
/**
- * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
+ * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
*
* @dev: drm_device pointer
*
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ATPX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_atpx(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev->flags & AMD_IS_PX)
+ if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
return true;
return false;
}
@@ -233,14 +234,15 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev)
*
* @dev: drm_device pointer
*
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ACPI power control,
* otherwise return false.
*/
bool amdgpu_device_supports_boco(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev->has_pr3)
+ if (adev->has_pr3 ||
+ ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
return true;
return false;
}
@@ -326,6 +328,35 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
/*
* register access helper functions.
*/
+
+/* Check if hw access should be skipped because of hotplug or device error */
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
+{
+ if (adev->in_pci_err_recovery)
+ return true;
+
+#ifdef CONFIG_LOCKDEP
+ /*
+ * This is a bit complicated to understand, so worth a comment. What we assert
+ * here is that the GPU reset is not running on another thread in parallel.
+ *
+ * For this we trylock the read side of the reset semaphore, if that succeeds
+ * we know that the reset is not running in paralell.
+ *
+ * If the trylock fails we assert that we are either already holding the read
+ * side of the lock or are the reset thread itself and hold the write side of
+ * the lock.
+ */
+ if (in_task()) {
+ if (down_read_trylock(&adev->reset_sem))
+ up_read(&adev->reset_sem);
+ else
+ lockdep_assert_held(&adev->reset_sem);
+ }
+#endif
+ return false;
+}
+
/**
* amdgpu_device_rreg - read a memory mapped IO or indirect register
*
@@ -340,7 +371,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
{
uint32_t ret;
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if ((reg * 4) < adev->rmmio_size) {
@@ -377,7 +408,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
*/
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if (offset < adev->rmmio_size)
@@ -402,7 +433,7 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
*/
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (offset < adev->rmmio_size)
@@ -425,7 +456,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if ((reg * 4) < adev->rmmio_size) {
@@ -452,14 +483,14 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
uint32_t reg, uint32_t v)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (amdgpu_sriov_fullaccess(adev) &&
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
- return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -476,7 +507,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
*/
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_doorbells) {
@@ -499,7 +530,7 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_doorbells) {
@@ -520,7 +551,7 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
*/
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
if (index < adev->doorbell.num_doorbells) {
@@ -543,7 +574,7 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
*/
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
{
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
if (index < adev->doorbell.num_doorbells) {
@@ -1391,7 +1422,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2049,6 +2080,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
return r;
}
+
+ /*get pf2vf msg info at it's earliest time*/
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
}
}
@@ -2331,8 +2367,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
* Returns 0 on success, negative error code on failure.
*/
-static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
- enum amd_clockgating_state state)
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
{
int i, j, r;
@@ -2343,6 +2379,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ /* skip CG for GFX on S0ix */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2363,7 +2403,8 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+ enum amd_powergating_state state)
{
int i, j, r;
@@ -2374,6 +2415,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ /* skip PG for GFX on S0ix */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2655,11 +2700,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (adev->in_poweroff_reboot_com ||
- !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- }
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
@@ -2699,6 +2741,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->in_s0ix)
+ amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -2721,6 +2766,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
continue;
}
+
+ /* skip suspend of gfx and psp for S0ix
+ * gfx is in gfxoff state, so on resume it will exit gfxoff just
+ * like at runtime. PSP is also part of the always on hardware
+ * so no need to suspend it.
+ */
+ if (adev->in_s0ix &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+ continue;
+
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -3086,8 +3142,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
if (adev->asic_reset_res)
goto fail;
- if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
- adev->mmhub.funcs->reset_ras_error_count(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->reset_ras_error_count)
+ adev->mmhub.ras_funcs->reset_ras_error_count(adev);
} else {
task_barrier_full(&hive->tb);
@@ -3197,7 +3254,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
- bool atpx = false;
+ bool px = false;
u32 max_MBps;
adev->shutdown = false;
@@ -3353,29 +3410,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
- /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
- /* this will fail for cards that aren't VGA class devices, just
- * ignore it */
- if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
- vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
-
- if (amdgpu_device_supports_atpx(ddev))
- atpx = true;
- if (amdgpu_has_atpx() &&
- (amdgpu_is_atpx_hybrid() ||
- amdgpu_has_atpx_dgpu_power_cntl()) &&
- !pci_is_thunderbolt_attached(adev->pdev))
- vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, atpx);
- if (atpx)
- vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
-
if (amdgpu_emu_mode == 1) {
/* post the asic on emulation mode */
emu_soc_asic_init(adev);
goto fence_driver_init;
}
+ amdgpu_reset_init(adev);
+
/* detect if we are with an SRIOV vbios */
amdgpu_device_detect_sriov_bios(adev);
@@ -3564,6 +3606,19 @@ fence_driver_init:
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(pdev);
+ /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
+ /* this will fail for cards that aren't VGA class devices, just
+ * ignore it */
+ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
+
+ if (amdgpu_device_supports_px(ddev)) {
+ px = true;
+ vga_switcheroo_register_client(adev->pdev,
+ &amdgpu_switcheroo_ops, px);
+ vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+ }
+
if (adev->gmc.xgmi.pending_reset)
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
@@ -3575,8 +3630,6 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
- if (atpx)
- vga_switcheroo_fini_domain_pm_ops(adev->dev);
failed_unmap:
iounmap(adev->rmmio);
@@ -3626,6 +3679,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
+
+ amdgpu_reset_fini(adev);
+
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
@@ -3635,13 +3691,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
kfree(adev->bios);
adev->bios = NULL;
- if (amdgpu_has_atpx() &&
- (amdgpu_is_atpx_hybrid() ||
- amdgpu_has_atpx_dgpu_power_cntl()) &&
- !pci_is_thunderbolt_attached(adev->pdev))
+ if (amdgpu_device_supports_px(adev_to_drm(adev))) {
vga_switcheroo_unregister_client(adev->pdev);
- if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
+ }
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
iounmap(adev->rmmio);
@@ -3674,14 +3727,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
*/
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
- struct amdgpu_device *adev;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- adev = drm_to_adev(dev);
-
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -3693,61 +3741,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
cancel_delayed_work_sync(&adev->delayed_init_work);
- if (!amdgpu_device_has_dc_support(adev)) {
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter)
- drm_helper_connector_dpms(connector,
- DRM_MODE_DPMS_OFF);
- drm_connector_list_iter_end(&iter);
- drm_modeset_unlock_all(dev);
- /* unpin the front buffers and cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
-
- if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- }
-
- if (fb == NULL || fb->obj[0] == NULL) {
- continue;
- }
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
- }
- }
- }
- }
-
amdgpu_ras_suspend(adev);
r = amdgpu_device_ip_suspend_phase1(adev);
- amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+ if (!adev->in_s0ix)
+ amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- if (adev->in_poweroff_reboot_com ||
- !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
- r = amdgpu_device_ip_suspend_phase2(adev);
- else
- amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+ r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
* using the CPU.
@@ -3769,16 +3775,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
*/
int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
struct amdgpu_device *adev = drm_to_adev(dev);
- struct drm_crtc *crtc;
int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (amdgpu_acpi_is_s0ix_supported(adev))
+ if (adev->in_s0ix)
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
/* post card */
@@ -3803,50 +3806,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- if (!amdgpu_device_has_dc_support(adev)) {
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- if (r != 0)
- dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- }
- }
+ if (!adev->in_s0ix) {
+ r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+ if (r)
+ return r;
}
- r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
- if (r)
- return r;
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
- /* blat the mode back in */
- if (fbcon) {
- if (!amdgpu_device_has_dc_support(adev)) {
- /* pre DCE11 */
- drm_helper_resume_force_mode(dev);
-
- /* turn on display hw */
- drm_modeset_lock_all(dev);
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter)
- drm_helper_connector_dpms(connector,
- DRM_MODE_DPMS_ON);
- drm_connector_list_iter_end(&iter);
-
- drm_modeset_unlock_all(dev);
- }
+ if (fbcon)
amdgpu_fbdev_set_suspend(adev, 0);
- }
drm_kms_helper_poll_enable(dev);
@@ -4144,11 +4114,11 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_amdkfd_post_reset(adev);
error:
- amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
amdgpu_inc_vram_lost(adev);
r = amdgpu_device_recover_vram(adev);
}
+ amdgpu_virt_release_full_gpu(adev, true);
return r;
}
@@ -4225,6 +4195,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
+ case CHIP_ALDEBARAN:
break;
default:
goto disabled;
@@ -4279,11 +4251,15 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
}
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- bool *need_full_reset_arg)
+ struct amdgpu_reset_context *reset_context)
{
int i, r = 0;
- bool need_full_reset = *need_full_reset_arg;
+ struct amdgpu_job *job = NULL;
+ bool need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
+ if (reset_context->reset_req_dev == adev)
+ job = reset_context->job;
/* no need to dump if device is not in good state during probe period */
if (!adev->gmc.xgmi.pending_reset)
@@ -4308,6 +4284,13 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if(job)
drm_sched_increase_karma(&job->base);
+ r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -ENOSYS)
+ r = 0;
+ else
+ return r;
+
/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
if (!amdgpu_sriov_vf(adev)) {
@@ -4326,22 +4309,38 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (need_full_reset)
r = amdgpu_device_ip_suspend(adev);
-
- *need_full_reset_arg = need_full_reset;
+ if (need_full_reset)
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ else
+ clear_bit(AMDGPU_NEED_FULL_RESET,
+ &reset_context->flags);
}
return r;
}
-int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
- struct list_head *device_list_handle,
- bool *need_full_reset_arg,
- bool skip_hw_reset)
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
{
struct amdgpu_device *tmp_adev = NULL;
- bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ bool need_full_reset, skip_hw_reset, vram_lost = false;
int r = 0;
+ /* Try reset handler method first */
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+ r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -ENOSYS)
+ r = 0;
+ else
+ return r;
+
+ /* Reset handler not implemented, use the default method */
+ need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
/*
* ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
@@ -4378,9 +4377,9 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
if (!r && amdgpu_ras_intr_triggered()) {
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- if (tmp_adev->mmhub.funcs &&
- tmp_adev->mmhub.funcs->reset_ras_error_count)
- tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+ if (tmp_adev->mmhub.ras_funcs &&
+ tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
+ tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
}
amdgpu_ras_intr_cleared();
@@ -4425,7 +4424,8 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
*/
amdgpu_register_gpu_instance(tmp_adev);
- if (!hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ if (!reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(tmp_adev);
r = amdgpu_device_ip_late_init(tmp_adev);
@@ -4453,8 +4453,10 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
/* Update PSP FW topology after reset */
- if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
- r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(
+ reset_context->hive, tmp_adev);
}
}
@@ -4478,7 +4480,10 @@ out:
}
end:
- *need_full_reset_arg = need_full_reset;
+ if (need_full_reset)
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ else
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
return r;
}
@@ -4615,6 +4620,74 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_device_recheck_guilty_jobs(
+ struct amdgpu_device *adev, struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
+{
+ int i, r = 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ int ret = 0;
+ struct drm_sched_job *s_job;
+
+ if (!ring || !ring->sched.thread)
+ continue;
+
+ s_job = list_first_entry_or_null(&ring->sched.pending_list,
+ struct drm_sched_job, list);
+ if (s_job == NULL)
+ continue;
+
+ /* clear job's guilty and depend the folowing step to decide the real one */
+ drm_sched_reset_karma(s_job);
+ drm_sched_resubmit_jobs_ext(&ring->sched, 1);
+
+ ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
+ if (ret == 0) { /* timeout */
+ DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
+ ring->sched.name, s_job->id);
+
+ /* set guilty */
+ drm_sched_increase_karma(s_job);
+retry:
+ /* do hw reset */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_device_reset_sriov(adev, false);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ clear_bit(AMDGPU_SKIP_HW_RESET,
+ &reset_context->flags);
+ r = amdgpu_do_asic_reset(device_list_handle,
+ reset_context);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /*
+ * add reset counter so that the following
+ * resubmitted job could flush vmid
+ */
+ atomic_inc(&adev->gpu_reset_counter);
+ continue;
+ }
+
+ /* got the hw fence, signal finished fence */
+ atomic_dec(ring->sched.score);
+ dma_fence_get(&s_job->s_fence->finished);
+ dma_fence_signal(&s_job->s_fence->finished);
+ dma_fence_put(&s_job->s_fence->finished);
+
+ /* remove node from list and free the job */
+ spin_lock(&ring->sched.job_list_lock);
+ list_del_init(&s_job->list);
+ spin_unlock(&ring->sched.job_list_lock);
+ ring->sched.ops->free_job(s_job);
+ }
+}
+
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -4630,13 +4703,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job)
{
struct list_head device_list, *device_list_handle = NULL;
- bool need_full_reset = false;
bool job_signaled = false;
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
bool need_emergency_restart = false;
bool audio_suspended = false;
+ int tmp_vram_lost_counter;
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
/*
* Special case: RAS triggered and full reset isn't supported
@@ -4677,6 +4753,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
mutex_lock(&hive->hive_lock);
}
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.job = job;
+ reset_context.hive = hive;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
/*
* lock the device before we try to operate the linked list
* if didn't get the device lock, don't touch the linked list since
@@ -4777,9 +4859,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- r = amdgpu_device_pre_asic_reset(tmp_adev,
- (tmp_adev == adev) ? job : NULL,
- &need_full_reset);
+ r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -4788,6 +4868,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
}
}
+ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/
/* TODO Implement XGMI hive reset logic for SRIOV */
if (amdgpu_sriov_vf(adev)) {
@@ -4795,7 +4876,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (r)
adev->asic_reset_res = r;
} else {
- r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
+ r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
if (r && r == -EAGAIN)
goto retry;
}
@@ -4805,6 +4886,18 @@ skip_hw_reset:
/* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /*
+ * Sometimes a later bad compute job can block a good gfx job as gfx
+ * and compute ring share internal GC HW mutually. We add an additional
+ * guilty jobs recheck step to find the real guilty job, it synchronously
+ * submits and pends for the first job being signaled. If it gets timeout,
+ * we identify it as a real guilty job.
+ */
+ if (amdgpu_gpu_recovery == 2 &&
+ !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
+ amdgpu_device_recheck_guilty_jobs(
+ tmp_adev, device_list_handle, &reset_context);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -5148,12 +5241,14 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
int r, i;
- bool need_full_reset = true;
+ struct amdgpu_reset_context reset_context;
u32 memsize;
struct list_head device_list;
DRM_INFO("PCI error: slot reset callback!!\n");
+ memset(&reset_context, 0, sizeof(reset_context));
+
INIT_LIST_HEAD(&device_list);
list_add_tail(&adev->reset_list, &device_list);
@@ -5176,13 +5271,18 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
goto out;
}
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+
adev->in_pci_err_recovery = true;
- r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+ r = amdgpu_device_pre_asic_reset(adev, &reset_context);
adev->in_pci_err_recovery = false;
if (r)
goto out;
- r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+ r = amdgpu_do_asic_reset(&device_list, &reset_context);
out:
if (!r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b05301e1815c..8a1fb8b6606e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -837,6 +837,174 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return 0;
}
+static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
+ unsigned int *width, unsigned int *height)
+{
+ unsigned int cpp_log2 = ilog2(cpp);
+ unsigned int pixel_log2 = block_log2 - cpp_log2;
+ unsigned int width_log2 = (pixel_log2 + 1) / 2;
+ unsigned int height_log2 = pixel_log2 - width_log2;
+
+ *width = 1 << width_log2;
+ *height = 1 << height_log2;
+}
+
+static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
+ bool pipe_aligned)
+{
+ unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+
+ switch (ver) {
+ case AMD_FMT_MOD_TILE_VER_GFX9: {
+ /*
+ * TODO: for pipe aligned we may need to check the alignment of the
+ * total size of the surface, which may need to be bigger than the
+ * natural alignment due to some HW workarounds
+ */
+ return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
+ }
+ case AMD_FMT_MOD_TILE_VER_GFX10:
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
+ int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
+
+ if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
+ AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
+ ++pipes_log2;
+
+ return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
+ }
+ default:
+ return 0;
+ }
+}
+
+static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
+ const struct drm_format_info *format,
+ unsigned int block_width, unsigned int block_height,
+ unsigned int block_size_log2)
+{
+ unsigned int width = rfb->base.width /
+ ((plane && plane < format->num_planes) ? format->hsub : 1);
+ unsigned int height = rfb->base.height /
+ ((plane && plane < format->num_planes) ? format->vsub : 1);
+ unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
+ unsigned int block_pitch = block_width * cpp;
+ unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
+ unsigned int block_size = 1 << block_size_log2;
+ uint64_t size;
+
+ if (rfb->base.pitches[plane] % block_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is not a multiple of block pitch %d\n",
+ rfb->base.pitches[plane], plane, block_pitch);
+ return -EINVAL;
+ }
+ if (rfb->base.pitches[plane] < min_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is less than minimum pitch %d\n",
+ rfb->base.pitches[plane], plane, min_pitch);
+ return -EINVAL;
+ }
+
+ /* Force at least natural alignment. */
+ if (rfb->base.offsets[plane] % block_size) {
+ drm_dbg_kms(rfb->base.dev,
+ "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
+ rfb->base.offsets[plane], plane, block_size);
+ return -EINVAL;
+ }
+
+ size = rfb->base.offsets[plane] +
+ (uint64_t)rfb->base.pitches[plane] / block_pitch *
+ block_size * DIV_ROUND_UP(height, block_height);
+
+ if (rfb->base.obj[0]->size < size) {
+ drm_dbg_kms(rfb->base.dev,
+ "BO size 0x%zx is less than 0x%llx required for plane %d\n",
+ rfb->base.obj[0]->size, size, plane);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
+{
+ const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
+ uint64_t modifier = rfb->base.modifier;
+ int ret;
+ unsigned int i, block_width, block_height, block_size_log2;
+
+ if (!rfb->base.dev->mode_config.allow_fb_modifiers)
+ return 0;
+
+ for (i = 0; i < format_info->num_planes; ++i) {
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ block_width = 256 / format_info->cpp[i];
+ block_height = 1;
+ block_size_log2 = 8;
+ } else {
+ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+
+ switch ((swizzle & ~3) + 1) {
+ case DC_SW_256B_S:
+ block_size_log2 = 8;
+ break;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ block_size_log2 = 12;
+ break;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_T:
+ case DC_SW_64KB_S_X:
+ block_size_log2 = 16;
+ break;
+ default:
+ drm_dbg_kms(rfb->base.dev,
+ "Swizzle mode with unknown block size: %d\n", swizzle);
+ return -EINVAL;
+ }
+
+ get_block_dimensions(block_size_log2, format_info->cpp[i],
+ &block_width, &block_height);
+ }
+
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ if (AMD_FMT_MOD_GET(DCC, modifier)) {
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
+ block_size_log2 = get_dcc_block_size(modifier, false, false);
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height,
+ block_size_log2);
+ if (ret)
+ return ret;
+
+ ++i;
+ block_size_log2 = get_dcc_block_size(modifier, true, true);
+ } else {
+ bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
+
+ block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
+ }
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags, bool *tmz_surface)
{
@@ -902,12 +1070,23 @@ int amdgpu_display_gem_fb_verify_and_init(
int ret;
rfb->base.obj[0] = obj;
-
- /* Verify that bo size can fit the fb size. */
- ret = drm_gem_fb_init_with_funcs(dev, &rfb->base, file_priv, mode_cmd,
- &amdgpu_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
+ /* Verify that the modifier is supported. */
+ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+ drm_dbg_kms(dev,
+ "unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->modifier[0]);
+
+ ret = -EINVAL;
+ goto err;
+ }
ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
if (ret)
@@ -954,9 +1133,12 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
}
}
- for (i = 1; i < rfb->base.format->num_planes; ++i) {
+ ret = amdgpu_display_verify_sizes(rfb);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < rfb->base.format->num_planes; ++i) {
drm_gem_object_get(rfb->base.obj[0]);
- drm_gem_object_put(rfb->base.obj[i]);
rfb->base.obj[i] = rfb->base.obj[0];
}
@@ -986,6 +1168,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
@@ -1354,3 +1537,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
stime, etime, mode);
}
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int r;
+
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
+ drm_modeset_unlock_all(dev);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+
+ if (fb == NULL || fb->obj[0] == NULL) {
+ continue;
+ }
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ /* don't unpin kernel fb objects */
+ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
+ }
+ }
+ return 0;
+}
+
+int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ int r;
+
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+ }
+
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index dc7b7d116549..7b6d83e2b13c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
+int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index e0c4f7c7f1b9..baa980a477d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -291,8 +291,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
- r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
- dir, &sgt);
+ r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
+ bo->tbo.base.size, attach->dev, dir, &sgt);
if (r)
return ERR_PTR(r);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 33991b4a5627..f93883db2b46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -36,6 +36,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h>
+#include <linux/suspend.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -46,6 +47,7 @@
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_reset.h"
/*
* KMS wrapper.
@@ -183,7 +185,7 @@ uint amdgpu_ras_mask = 0xffffffff;
int amdgpu_bad_page_threshold = -1;
struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
.timeout_fatal_disable = false,
- .period = 0x23, /* default to max. timeout = 1 << 0x23 cycles */
+ .period = 0x0, /* default to 0x0 (timeout disable) */
};
/**
@@ -515,7 +517,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* DOC: gpu_recovery (int)
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/
-MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/**
@@ -551,7 +553,7 @@ module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_di
* DOC: timeout_period (uint)
* Modify the watchdog timeout max_cycles as (1 << period)
*/
-MODULE_PARM_DESC(timeout_period, "watchdog timeout period (1 to 0x23(default), timeout maxCycles = (1 << period)");
+MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)");
module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
/**
@@ -1161,6 +1163,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
/* Van Gogh */
@@ -1333,9 +1336,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
- adev->in_poweroff_reboot_com = true;
amdgpu_device_ip_suspend(adev);
- adev->in_poweroff_reboot_com = false;
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -1349,7 +1350,9 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
struct list_head device_list;
struct amdgpu_device *adev;
int i, r;
- bool need_full_reset = true;
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
mutex_lock(&mgpu_info.mutex);
if (mgpu_info.pending_reset == true) {
@@ -1359,9 +1362,14 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
mgpu_info.pending_reset = true;
mutex_unlock(&mgpu_info.mutex);
+ /* Use a common context, just need to make sure full reset is done */
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
for (i = 0; i < mgpu_info.num_dgpu; i++) {
adev = mgpu_info.gpu_ins[i].adev;
- r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+ reset_context.reset_req_dev = adev;
+ r = amdgpu_device_pre_asic_reset(adev, &reset_context);
if (r) {
dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
r, adev_to_drm(adev)->unique);
@@ -1388,7 +1396,10 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
list_for_each_entry(adev, &device_list, reset_list)
amdgpu_unregister_gpu_instance(adev);
- r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+ /* Use a common context, just need to make sure full reset is done */
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ r = amdgpu_do_asic_reset(&device_list, &reset_context);
+
if (r) {
DRM_ERROR("reinit gpus failure");
return;
@@ -1402,18 +1413,50 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
return;
}
+static int amdgpu_pmops_prepare(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ /* Return a positive number here so
+ * DPM_FLAG_SMART_SUSPEND works properly
+ */
+ if (amdgpu_device_supports_boco(drm_dev))
+ return pm_runtime_suspended(dev) &&
+ pm_suspend_via_firmware();
+
+ return 0;
+}
+
+static void amdgpu_pmops_complete(struct device *dev)
+{
+ /* nothing to do */
+}
+
static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_suspend(drm_dev, true);
+ if (amdgpu_acpi_is_s0ix_supported(adev))
+ adev->in_s0ix = true;
+ adev->in_s3 = true;
+ r = amdgpu_device_suspend(drm_dev, true);
+ adev->in_s3 = false;
+
+ return r;
}
static int amdgpu_pmops_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- return amdgpu_device_resume(drm_dev, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ if (amdgpu_acpi_is_s0ix_supported(adev))
+ adev->in_s0ix = false;
+ return r;
}
static int amdgpu_pmops_freeze(struct device *dev)
@@ -1422,9 +1465,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
- adev->in_hibernate = true;
+ adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_hibernate = false;
+ adev->in_s4 = false;
if (r)
return r;
return amdgpu_asic_reset(adev);
@@ -1440,13 +1483,8 @@ static int amdgpu_pmops_thaw(struct device *dev)
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = drm_to_adev(drm_dev);
- int r;
- adev->in_poweroff_reboot_com = true;
- r = amdgpu_device_suspend(drm_dev, true);
- adev->in_poweroff_reboot_com = false;
- return r;
+ return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_restore(struct device *dev)
@@ -1479,7 +1517,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
adev->in_runpm = true;
- if (amdgpu_device_supports_atpx(drm_dev))
+ if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
ret = amdgpu_device_suspend(drm_dev, false);
@@ -1488,16 +1526,14 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
return ret;
}
- if (amdgpu_device_supports_atpx(drm_dev)) {
+ if (amdgpu_device_supports_px(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (!amdgpu_is_atpx_hybrid()) {
- amdgpu_device_cache_pci_state(pdev);
- pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
- pci_set_power_state(pdev, PCI_D3cold);
- }
+ amdgpu_device_cache_pci_state(pdev);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
} else if (amdgpu_device_supports_baco(drm_dev)) {
amdgpu_device_baco_enter(drm_dev);
@@ -1516,19 +1552,17 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (!adev->runpm)
return -EINVAL;
- if (amdgpu_device_supports_atpx(drm_dev)) {
+ if (amdgpu_device_supports_px(drm_dev)) {
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (!amdgpu_is_atpx_hybrid()) {
- pci_set_power_state(pdev, PCI_D0);
- amdgpu_device_load_pci_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
- return ret;
- }
+ pci_set_power_state(pdev, PCI_D0);
+ amdgpu_device_load_pci_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
pci_set_master(pdev);
} else if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
@@ -1539,7 +1573,10 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
- if (amdgpu_device_supports_atpx(drm_dev))
+ if (ret)
+ return ret;
+
+ if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
return 0;
@@ -1620,6 +1657,8 @@ out:
}
static const struct dev_pm_ops amdgpu_pm_ops = {
+ .prepare = amdgpu_pmops_prepare,
+ .complete = amdgpu_pmops_complete,
.suspend = amdgpu_pmops_suspend,
.resume = amdgpu_pmops_resume,
.freeze = amdgpu_pmops_freeze,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 4c5c19820d37..4f10c4529840 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -205,7 +205,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL;
int ret;
- unsigned long tmp;
memset(&mode_cmd, 0, sizeof(mode_cmd));
mode_cmd.width = sizes->surface_width;
@@ -246,8 +245,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->fbops = &amdgpufb_ops;
- tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
- info->fix.smem_start = adev->gmc.aper_base + tmp;
+ info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo);
info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1a4809d9e850..47ea46859618 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -439,7 +439,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* Helper function for amdgpu_fence_driver_init().
*/
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission)
+ unsigned num_hw_submission,
+ atomic_t *sched_score)
{
struct amdgpu_device *adev = ring->adev;
long timeout;
@@ -467,30 +468,31 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
return -ENOMEM;
/* No need to setup the GPU scheduler for rings that don't need it */
- if (!ring->no_scheduler) {
- switch (ring->funcs->type) {
- case AMDGPU_RING_TYPE_GFX:
- timeout = adev->gfx_timeout;
- break;
- case AMDGPU_RING_TYPE_COMPUTE:
- timeout = adev->compute_timeout;
- break;
- case AMDGPU_RING_TYPE_SDMA:
- timeout = adev->sdma_timeout;
- break;
- default:
- timeout = adev->video_timeout;
- break;
- }
+ if (ring->no_scheduler)
+ return 0;
- r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission, amdgpu_job_hang_limit,
- timeout, NULL, ring->name);
- if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
- return r;
- }
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ timeout = adev->gfx_timeout;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ timeout = adev->compute_timeout;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ timeout = adev->sdma_timeout;
+ break;
+ default:
+ timeout = adev->video_timeout;
+ break;
+ }
+
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+ num_hw_submission, amdgpu_job_hang_limit,
+ timeout, sched_score, ring->name);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 5807cad833d3..c5a9a4fb10d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -126,6 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
if (r) {
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index fb7171e5507c..311bcdc59eda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -58,6 +58,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
struct drm_gem_object **obj)
{
struct amdgpu_bo *bo;
+ struct amdgpu_bo_user *ubo;
struct amdgpu_bo_param bp;
int r;
@@ -71,10 +72,13 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
bp.preferred_domain = initial_domain;
bp.flags = flags;
bp.domain = initial_domain;
- r = amdgpu_bo_create(adev, &bp, &bo);
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create_user(adev, &bp, &ubo);
if (r)
return r;
+ bo = &ubo->bo;
*obj = &bo->tbo.base;
(*obj)->funcs = &amdgpu_gem_object_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 689addb1520d..95d4f43a03df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -310,9 +310,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->eop_gpu_addr = kiq->eop_gpu_addr;
ring->no_scheduler = true;
sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
- r = amdgpu_ring_init(adev, ring, 1024,
- irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
- AMDGPU_RING_PRIO_DEFAULT);
+ r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
@@ -463,20 +462,25 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &kiq->ring;
- int i;
+ int i, r;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
+ spin_lock(&adev->gfx.kiq.ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
- adev->gfx.num_compute_rings))
+ adev->gfx.num_compute_rings)) {
+ spin_unlock(&adev->gfx.kiq.ring_lock);
return -ENOMEM;
+ }
for (i = 0; i < adev->gfx.num_compute_rings; i++)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
- return amdgpu_ring_test_helper(kiq_ring);
+ return r;
}
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
@@ -519,12 +523,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue);
-
+ spin_lock(&adev->gfx.kiq.ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
return r;
}
@@ -533,6 +538,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
if (r)
DRM_ERROR("KCQ enable failed\n");
@@ -671,8 +677,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
*/
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, err_data);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_count)
+ adev->gfx.ras_funcs->query_ras_error_count(adev, err_data);
amdgpu_ras_reset_gpu(adev);
}
return AMDGPU_RAS_SUCCESS;
@@ -705,7 +712,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return 0;
BUG_ON(!ring->funcs->emit_rreg);
@@ -772,7 +779,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
BUG_ON(!ring->funcs->emit_wreg);
- if (adev->in_pci_err_recovery)
+ if (amdgpu_device_skip_hw_access(adev))
return;
spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -836,14 +843,10 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
{
- if (is_support_sw_smu(adev)) {
- smu_gfx_state_change_set(&adev->smu, state);
- } else {
- mutex_lock(&adev->pm.mutex);
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->gfx_state_change_set)
- ((adev)->powerplay.pp_funcs->gfx_state_change_set(
- (adev)->powerplay.pp_handle, state));
- mutex_unlock(&adev->pm.mutex);
- }
+ mutex_lock(&adev->pm.mutex);
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->gfx_state_change_set)
+ ((adev)->powerplay.pp_funcs->gfx_state_change_set(
+ (adev)->powerplay.pp_handle, state));
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 38af93f501e1..d43fe2ed8116 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -205,6 +205,19 @@ struct amdgpu_cu_info {
uint32_t bitmap[4][4];
};
+struct amdgpu_gfx_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ int (*ras_error_inject)(struct amdgpu_device *adev,
+ void *inject_if);
+ int (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
+ void (*reset_ras_error_status)(struct amdgpu_device *adev);
+ void (*enable_watchdog_timer)(struct amdgpu_device *adev);
+};
+
struct amdgpu_gfx_funcs {
/* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
@@ -220,14 +233,8 @@ struct amdgpu_gfx_funcs {
uint32_t *dst);
void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
u32 queue, u32 vmid);
- int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
- int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
- void (*reset_ras_error_count) (struct amdgpu_device *adev);
void (*init_spm_golden)(struct amdgpu_device *adev);
- void (*query_ras_error_status) (struct amdgpu_device *adev);
- void (*reset_ras_error_status) (struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
- void (*enable_watchdog_timer)(struct amdgpu_device *adev);
};
struct sq_work {
@@ -330,7 +337,8 @@ struct amdgpu_gfx {
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
/*ras */
- struct ras_common_if *ras_if;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_gfx_ras_funcs *ras_funcs;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 6f7995293a1e..c39ed9eb0987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -55,6 +55,8 @@ int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
if (r)
return r;
@@ -389,26 +391,46 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
{
int r;
- if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
- r = adev->umc.funcs->ras_late_init(adev);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ras_late_init) {
+ r = adev->umc.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
+
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->ras_late_init) {
+ r = adev->mmhub.ras_funcs->ras_late_init(adev);
if (r)
return r;
}
- if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
- r = adev->mmhub.funcs->ras_late_init(adev);
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
+
+ if (adev->gmc.xgmi.ras_funcs &&
+ adev->gmc.xgmi.ras_funcs->ras_late_init) {
+ r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
if (r)
return r;
}
- return amdgpu_xgmi_ras_late_init(adev);
+ return 0;
}
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
{
- amdgpu_umc_ras_fini(adev);
- amdgpu_mmhub_ras_fini(adev);
- amdgpu_xgmi_ras_fini(adev);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->ras_fini)
+ adev->umc.ras_funcs->ras_fini(adev);
+
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->ras_fini)
+ amdgpu_mmhub_ras_fini(adev);
+
+ if (adev->gmc.xgmi.ras_funcs &&
+ adev->gmc.xgmi.ras_funcs->ras_fini)
+ adev->gmc.xgmi.ras_funcs->ras_fini(adev);
}
/*
@@ -465,6 +487,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_RAVEN:
+ case CHIP_RENOIR:
if (amdgpu_tmz == 0) {
adev->gmc.tmz_enabled = false;
dev_info(adev->dev,
@@ -475,7 +498,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
"Trusted Memory Zone (TMZ) feature enabled\n");
}
break;
- case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
@@ -514,6 +536,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
/*
* noretry = 0 will cause kfd page fault tests fail
@@ -638,8 +661,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
u64 vram_addr = adev->vm_manager.vram_base_offset -
adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
u64 vram_end = vram_addr + vram_size;
- u64 gart_ptb_gpu_pa = amdgpu_bo_gpu_offset(adev->gart.bo) +
- adev->vm_manager.vram_base_offset - adev->gmc.vram_start;
+ u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
flags |= AMDGPU_PTE_WRITEABLE;
@@ -662,3 +684,39 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
/* Requires gart_ptb_gpu_pa to be 4K aligned */
amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
}
+
+/**
+ * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
+ * address
+ *
+ * @adev: amdgpu_device pointer
+ * @mc_addr: MC address of buffer
+ */
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
+{
+ return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+}
+
+/**
+ * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
+ * GPU's view
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
+ */
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
+}
+
+/**
+ * amdgpu_gmc_vram_cpu_pa - calculate vram buffer object's physical address
+ * from CPU's view
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
+ */
+uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 7e248a4e2fa3..9d11c02a3938 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -135,6 +135,14 @@ struct amdgpu_gmc_funcs {
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
};
+struct amdgpu_xgmi_ras_funcs {
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
+ int (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
struct amdgpu_xgmi {
/* from psp */
u64 node_id;
@@ -151,6 +159,7 @@ struct amdgpu_xgmi {
struct ras_common_if *ras_if;
bool connected_to_cpu;
bool pending_reset;
+ const struct amdgpu_xgmi_ras_funcs *ras_funcs;
};
struct amdgpu_gmc {
@@ -209,15 +218,6 @@ struct amdgpu_gmc {
*/
u64 fb_start;
u64 fb_end;
- /* In the case of use GART table for vmid0 FB access, [fb_start, fb_end]
- * will be squeezed to GART aperture. But we have a PSP FW issue to fix
- * for now. To temporarily workaround the PSP FW issue, added below two
- * variables to remember the original fb_start/end to re-enable FB
- * aperture to workaround the PSP FW issue. Will delete it after we
- * get a proper PSP FW fix.
- */
- u64 fb_start_original;
- u64 fb_end_original;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
@@ -332,4 +332,7 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 8980329cded0..540c01052b21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -49,8 +49,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- man->size * PAGE_SIZE);
+ return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
}
/**
@@ -68,8 +67,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_gtt_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
}
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 148a3b481b12..a2fe2dac32c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -76,6 +76,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
+ /* flush the cache before commit the IB */
+ ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
if (!vm)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 94b069630db3..b4971e90b98c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
+ /* Don't use per engine and per process VMID at the same time */
+ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
+ NULL : ring;
+
+ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
if (!fences[i])
break;
++i;
@@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
if (updates && (*id)->flushed_updates &&
updates->context == (*id)->flushed_updates->context &&
!dma_fence_is_later(updates, (*id)->flushed_updates))
- updates = NULL;
+ updates = NULL;
if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
@@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
+ /* Don't use per engine and per process VMID at the same time */
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
/* to prevent one context starved by another context */
(*id)->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
@@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true;
- /* Concurrent flushes are only possible starting with Vega10 and
- * are broken on Navi10 and Navi14.
- */
- if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
- adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14))
+ if (needs_flush && !adev->vm_manager.concurrent_flush)
continue;
/* Good, we can use this VMID. Remember this submission as
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index af026109421a..90f50561b43a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -199,13 +199,13 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
* ack the interrupt if it is there
*/
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
- if (adev->nbio.funcs &&
- adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
- adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
+ adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
- if (adev->nbio.funcs &&
- adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
- adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
}
return ret;
@@ -382,11 +382,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
kfree(src->enabled_types);
src->enabled_types = NULL;
- if (src->data) {
- kfree(src->data);
- kfree(src);
- adev->irq.client[i].sources[j] = NULL;
- }
}
kfree(adev->irq.client[i].sources);
adev->irq.client[i].sources = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index ac527e5deae6..cf6116648322 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -62,7 +62,6 @@ struct amdgpu_irq_src {
unsigned num_types;
atomic_t *enabled_types;
const struct amdgpu_irq_src_funcs *funcs;
- void *data;
};
struct amdgpu_irq_client {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index ada807de978b..39ee88d29cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -159,7 +159,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
- if (amdgpu_device_supports_atpx(dev) &&
+ if (amdgpu_device_supports_px(dev) &&
(amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
adev->runpm = true;
dev_info(adev->dev, "Using ATPX for runtime pm\n");
@@ -200,9 +200,13 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (adev->runpm) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_atpx(dev) &&
- !amdgpu_is_atpx_hybrid())
+ if (amdgpu_device_supports_px(dev))
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ /* we want direct complete for BOCO */
+ if (amdgpu_device_supports_boco(dev))
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_MAY_SKIP_RESUME);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_allow(dev->dev);
@@ -785,9 +789,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
}
- dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
- dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+ dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info->cu_active_number = adev->gfx.cu_info.number;
dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info->ce_ram_size = adev->gfx.ce_ram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1ae9bdae7311..11aa29933c1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -21,12 +21,16 @@
#ifndef __AMDGPU_MMHUB_H__
#define __AMDGPU_MMHUB_H__
-struct amdgpu_mmhub_funcs {
- void (*ras_init)(struct amdgpu_device *adev);
+struct amdgpu_mmhub_ras_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
+ void *ras_error_status);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_mmhub_funcs {
u64 (*get_fb_location)(struct amdgpu_device *adev);
void (*init)(struct amdgpu_device *adev);
int (*gart_enable)(struct amdgpu_device *adev);
@@ -40,12 +44,12 @@ struct amdgpu_mmhub_funcs {
uint64_t page_table_base);
void (*update_power_gating)(struct amdgpu_device *adev,
bool enable);
- void (*query_ras_error_status)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
struct ras_common_if *ras_if;
const struct amdgpu_mmhub_funcs *funcs;
+ const struct amdgpu_mmhub_ras_funcs *ras_funcs;
};
int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 7c11bce4514b..25ee53545837 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -47,6 +47,17 @@ struct nbio_hdp_flush_reg {
u32 ref_and_mask_sdma7;
};
+struct amdgpu_nbio_ras_funcs {
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+ void (*query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
+};
+
struct amdgpu_nbio_funcs {
const struct nbio_hdp_flush_reg *hdp_flush_reg;
u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
@@ -79,13 +90,6 @@ struct amdgpu_nbio_funcs {
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
void (*remap_hdp_registers)(struct amdgpu_device *adev);
- void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
- void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
- int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
- int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
- void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
- int (*ras_late_init)(struct amdgpu_device *adev);
void (*enable_aspm)(struct amdgpu_device *adev,
bool enable);
void (*program_aspm)(struct amdgpu_device *adev);
@@ -97,6 +101,7 @@ struct amdgpu_nbio {
struct amdgpu_irq_src ras_err_event_athub_irq;
struct ras_common_if *ras_if;
const struct amdgpu_nbio_funcs *funcs;
+ const struct amdgpu_nbio_ras_funcs *ras_funcs;
};
int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ac1bb5089260..1345f7eba011 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -77,6 +77,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo_user *ubo;
if (bo->tbo.pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
@@ -94,7 +95,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
}
amdgpu_bo_unref(&bo->parent);
- kfree(bo->metadata);
+ if (bo->tbo.type == ttm_bo_type_device) {
+ ubo = to_amdgpu_bo_user(bo);
+ kfree(ubo->metadata);
+ }
+
kfree(bo);
}
@@ -248,6 +253,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
if (!*bo_ptr) {
r = amdgpu_bo_create(adev, &bp, bo_ptr);
@@ -543,9 +549,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (!amdgpu_bo_validate_size(adev, size, bp->domain))
return -ENOMEM;
- *bo_ptr = NULL;
+ BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
- bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
+ *bo_ptr = NULL;
+ bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
@@ -635,6 +642,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
AMDGPU_GEM_CREATE_SHADOW;
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
if (!r) {
@@ -669,6 +677,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
int r;
bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+
r = amdgpu_bo_do_create(adev, bp, bo_ptr);
if (r)
return r;
@@ -691,6 +700,34 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
+ * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @ubo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be used by user application;
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr)
+{
+ struct amdgpu_bo *bo_ptr;
+ int r;
+
+ bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+ bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
+ r = amdgpu_bo_do_create(adev, bp, &bo_ptr);
+ if (r)
+ return r;
+
+ *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
+ return r;
+}
+/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
@@ -1024,13 +1061,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
struct ttm_resource_manager *man;
- /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-#ifndef CONFIG_HIBERNATION
- if (adev->flags & AMD_IS_APU) {
- /* Useless to evict on IGP chips */
+ if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
+ /* No need to evict vram on APUs for suspend to ram */
return 0;
}
-#endif
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
@@ -1096,25 +1130,6 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_bo_fbdev_mmap - mmap fbdev memory
- * @bo: &amdgpu_bo buffer object
- * @vma: vma as input from the fbdev mmap method
- *
- * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
- struct vm_area_struct *vma)
-{
- if (vma->vm_pgoff != 0)
- return -EACCES;
-
- return ttm_bo_mmap_obj(vma, &bo->tbo);
-}
-
-/**
* amdgpu_bo_set_tiling_flags - set tiling flags
* @bo: &amdgpu_bo buffer object
* @tiling_flags: new flags
@@ -1128,12 +1143,15 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_bo_user *ubo;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
if (adev->family <= AMDGPU_FAMILY_CZ &&
AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
return -EINVAL;
- bo->tiling_flags = tiling_flags;
+ ubo = to_amdgpu_bo_user(bo);
+ ubo->tiling_flags = tiling_flags;
return 0;
}
@@ -1147,10 +1165,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
*/
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
+ struct amdgpu_bo_user *ubo;
+
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
dma_resv_assert_held(bo->tbo.base.resv);
+ ubo = to_amdgpu_bo_user(bo);
if (tiling_flags)
- *tiling_flags = bo->tiling_flags;
+ *tiling_flags = ubo->tiling_flags;
}
/**
@@ -1169,13 +1191,16 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
uint32_t metadata_size, uint64_t flags)
{
+ struct amdgpu_bo_user *ubo;
void *buffer;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+ ubo = to_amdgpu_bo_user(bo);
if (!metadata_size) {
- if (bo->metadata_size) {
- kfree(bo->metadata);
- bo->metadata = NULL;
- bo->metadata_size = 0;
+ if (ubo->metadata_size) {
+ kfree(ubo->metadata);
+ ubo->metadata = NULL;
+ ubo->metadata_size = 0;
}
return 0;
}
@@ -1187,10 +1212,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (buffer == NULL)
return -ENOMEM;
- kfree(bo->metadata);
- bo->metadata_flags = flags;
- bo->metadata = buffer;
- bo->metadata_size = metadata_size;
+ kfree(ubo->metadata);
+ ubo->metadata_flags = flags;
+ ubo->metadata = buffer;
+ ubo->metadata_size = metadata_size;
return 0;
}
@@ -1214,21 +1239,25 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags)
{
+ struct amdgpu_bo_user *ubo;
+
if (!buffer && !metadata_size)
return -EINVAL;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+ ubo = to_amdgpu_bo_user(bo);
if (buffer) {
- if (buffer_size < bo->metadata_size)
+ if (buffer_size < ubo->metadata_size)
return -EINVAL;
- if (bo->metadata_size)
- memcpy(buffer, bo->metadata, bo->metadata_size);
+ if (ubo->metadata_size)
+ memcpy(buffer, ubo->metadata, ubo->metadata_size);
}
if (metadata_size)
- *metadata_size = bo->metadata_size;
+ *metadata_size = ubo->metadata_size;
if (flags)
- *flags = bo->metadata_flags;
+ *flags = ubo->metadata_flags;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 54ceb065e546..2d1fefbe1e99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -37,9 +37,12 @@
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
#define AMDGPU_BO_MAX_PLACEMENTS 3
+#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+
struct amdgpu_bo_param {
unsigned long size;
int byte_align;
+ u32 bo_ptr_size;
u32 domain;
u32 preferred_domain;
u64 flags;
@@ -89,10 +92,6 @@ struct amdgpu_bo {
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u64 flags;
- u64 tiling_flags;
- u64 metadata_flags;
- void *metadata;
- u32 metadata_size;
unsigned prime_shared_count;
/* per VM structure for page tables and with virtual addresses */
struct amdgpu_vm_bo_base *vm_bo;
@@ -111,6 +110,15 @@ struct amdgpu_bo {
struct kgd_mem *kfd_bo;
};
+struct amdgpu_bo_user {
+ struct amdgpu_bo bo;
+ u64 tiling_flags;
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
+
+};
+
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
@@ -254,6 +262,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
uint64_t offset, uint64_t size, uint32_t domain,
struct amdgpu_bo **bo_ptr, void **cpu_addr);
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
@@ -268,8 +279,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
- struct vm_area_struct *vma);
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 19c0a3655228..82e9ecf84352 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -519,8 +519,10 @@ static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
GFP_KERNEL);
- if (!pmu_entry->pmu.attr_groups)
+ if (!pmu_entry->pmu.attr_groups) {
+ ret = -ENOMEM;
goto err_attr_group;
+ }
snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
adev_to_drm(pmu_entry->adev)->primary->index);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index bae304b0d67a..a09483beb968 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -328,8 +328,12 @@ psp_cmd_submit_buf(struct psp_context *psp,
static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
- uint64_t tmr_mc, uint32_t size)
+ uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t size = amdgpu_bo_size(tmr_bo);
+ uint64_t tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
+
if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
else
@@ -337,6 +341,9 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
+ cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
}
static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
@@ -407,16 +414,6 @@ static int psp_tmr_init(struct psp_context *psp)
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
- /* workaround the tmr_mc_addr:
- * PSP requires an address in FB aperture. Right now driver produce
- * tmr_mc_addr in the GART aperture. Convert it back to FB aperture
- * for PSP. Will revert it after we get a fix from PSP FW.
- */
- if (psp->adev->asic_type == CHIP_ALDEBARAN) {
- psp->tmr_mc_addr -= psp->adev->gmc.fb_start;
- psp->tmr_mc_addr += psp->adev->gmc.fb_start_original;
- }
-
return ret;
}
@@ -466,8 +463,7 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr,
- amdgpu_bo_size(psp->tmr_bo));
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
@@ -556,6 +552,24 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_boot_config_set(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+ if (adev->asic_type != CHIP_SIENNA_CICHLID || amdgpu_sriov_vf(adev))
+ return 0;
+
+ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+ cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
+ cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
+ cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC;
+ cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC;
+
+ return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+}
+
static int psp_rl_load(struct amdgpu_device *adev)
{
struct psp_context *psp = &adev->psp;
@@ -1912,6 +1926,11 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
+ ret = psp_boot_config_set(adev);
+ if (ret) {
+ DRM_WARN("PSP set boot config@\n");
+ }
+
ret = psp_tmr_init(psp);
if (ret) {
DRM_ERROR("PSP tmr init failed!\n");
@@ -2146,9 +2165,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
return 0;
-
- if (amdgpu_in_reset(adev) && ras && ras->supported &&
- adev->asic_type == CHIP_ARCTURUS) {
+ if ((amdgpu_in_reset(adev) &&
+ ras && ras->supported &&
+ (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_VEGA20)) ||
+ (adev->in_runpm &&
+ adev->asic_type >= CHIP_NAVI10 &&
+ adev->asic_type <= CHIP_NAVI12)) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -2201,6 +2224,22 @@ static bool fw_load_skip_check(struct psp_context *psp,
return false;
}
+int psp_load_fw_list(struct psp_context *psp,
+ struct amdgpu_firmware_info **ucode_list, int ucode_count)
+{
+ int ret = 0, i;
+ struct amdgpu_firmware_info *ucode;
+
+ for (i = 0; i < ucode_count; ++i) {
+ ucode = ucode_list[i];
+ psp_print_fw_hdr(psp, ucode);
+ ret = psp_execute_np_fw_load(psp, ucode);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
@@ -2967,7 +3006,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
return ret;
}
- return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+ return sysfs_emit(buf, "%x\n", fw_ver);
}
static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 64f143390835..46a5328e00e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -420,4 +420,7 @@ int psp_init_ta_microcode(struct psp_context *psp,
const char *chip_name);
int psp_get_fw_attestation_records_addr(struct psp_context *psp,
uint64_t *output_ptr);
+
+int psp_load_fw_list(struct psp_context *psp,
+ struct amdgpu_firmware_info **ucode_list, int ucode_count);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 0e16683876aa..b0d2fc9454ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -99,6 +99,49 @@ static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
return false;
}
+static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
+{
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct eeprom_table_record err_rec;
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ dev_warn(adev->dev,
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
+ return -EINVAL;
+ }
+
+ if (amdgpu_ras_check_bad_page(adev, address)) {
+ dev_warn(adev->dev,
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
+ return 0;
+ }
+
+ memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+
+ err_rec.address = address;
+ err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec.ts = (uint64_t)ktime_get_real_seconds();
+ err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+ err_data.err_addr = &err_rec;
+ err_data.err_addr_cnt = 1;
+
+ if (amdgpu_bad_page_threshold != 0) {
+ amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+ err_data.err_addr_cnt);
+ amdgpu_ras_save_bad_pages(adev);
+ }
+
+ dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
+ dev_warn(adev->dev, "Clear EEPROM:\n");
+ dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -178,11 +221,24 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 1;
else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
op = 2;
+ else if (strstr(str, "retire_page") != NULL)
+ op = 3;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
if (op != -1) {
+ if (op == 3) {
+ if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
+ sscanf(str, "%*s %llu", &address) != 1)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+
+ return 0;
+ }
+
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
return -EINVAL;
@@ -198,11 +254,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->op = op;
if (op == 2) {
- if (sscanf(str, "%*s %*s %*s %u %llu %llu",
- &sub_block, &address, &value) != 3)
- if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
- &sub_block, &address, &value) != 3)
- return -EINVAL;
+ if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
+ &sub_block, &address, &value) != 3 &&
+ sscanf(str, "%*s %*s %*s %u %llu %llu",
+ &sub_block, &address, &value) != 3)
+ return -EINVAL;
data->head.sub_block_index = sub_block;
data->inject.address = address;
data->inject.value = value;
@@ -221,7 +277,7 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
/**
* DOC: AMDGPU RAS debugfs control interface
*
- * It accepts struct ras_debug_if who has two members.
+ * The control interface accepts struct ras_debug_if which has two members.
*
* First member: ras_debug_if::head or ras_debug_if::inject.
*
@@ -246,32 +302,33 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
*
* How to use the interface?
*
- * Programs
+ * In a program
*
- * Copy the struct ras_debug_if in your codes and initialize it.
- * Write the struct to the control node.
+ * Copy the struct ras_debug_if in your code and initialize it.
+ * Write the struct to the control interface.
*
- * Shells
+ * From shell
*
* .. code-block:: bash
*
- * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
+ * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "inject <block> <error> <sub-block> <address> <value> > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
*
- * Parameters:
+ * Where N, is the card which you want to affect.
*
- * op: disable, enable, inject
- * disable: only block is needed
- * enable: block and error are needed
- * inject: error, address, value are needed
- * block: umc, sdma, gfx, .........
+ * "disable" requires only the block.
+ * "enable" requires the block and error type.
+ * "inject" requires the block, error type, address, and value.
+ * The block is one of: umc, sdma, gfx, etc.
* see ras_block_string[] for details
- * error: ue, ce
- * ue: multi_uncorrectable
- * ce: single_correctable
- * sub_block:
- * sub block index, pass 0 if there is no sub block
+ * The error type is one of: ue, ce, where,
+ * ue is multi-uncorrectable
+ * ce is single-correctable
+ * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
+ * The address and value are hexadecimal numbers, leading 0x is optional.
*
- * here are some examples for bash commands:
+ * For instance,
*
* .. code-block:: bash
*
@@ -279,17 +336,17 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
* echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
* echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
*
- * How to check the result?
+ * How to check the result of the operation?
*
- * For disable/enable, please check ras features at
+ * To check disable/enable, see "ras" features at,
* /sys/class/drm/card[0/1/2...]/device/ras/features
*
- * For inject, please check corresponding err count at
- * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ * To check inject, see the corresponding error count at,
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
*
* .. note::
* Operations are only allowed on blocks which are supported.
- * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
* to see which blocks support RAS on a particular asic.
*
*/
@@ -310,6 +367,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
if (ret)
return -EINVAL;
+ if (data.op == 3) {
+ ret = amdgpu_reserve_page_direct(adev, data.inject.address);
+ if (!ret)
+ return size;
+ else
+ return ret;
+ }
+
if (!amdgpu_ras_is_supported(adev, data.head.block))
return -EINVAL;
@@ -431,15 +496,19 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
};
if (!amdgpu_ras_get_error_query_ready(obj->adev))
- return snprintf(buf, PAGE_SIZE,
- "Query currently inaccessible\n");
+ return sysfs_emit(buf, "Query currently inaccessible\n");
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
- "ue", info.ue_count,
- "ce", info.ce_count);
+
+ if (obj->adev->asic_type == CHIP_ALDEBARAN) {
+ if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
+ DRM_WARN("Failed to reset error counter and error status");
+ }
+
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count);
}
/* obj begin */
@@ -449,11 +518,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
static inline void put_obj(struct ras_manager *obj)
{
- if (obj && --obj->use == 0)
+ if (obj && (--obj->use == 0))
list_del(&obj->node);
- if (obj && obj->use < 0) {
- DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
- }
+ if (obj && (obj->use < 0))
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
}
/* make one obj and return it. */
@@ -777,13 +845,15 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__UMC:
- if (adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
/* umc query_ras_error_address is also responsible for clearing
* error status
*/
- if (adev->umc.funcs->query_ras_error_address)
- adev->umc.funcs->query_ras_error_address(adev, &err_data);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address)
+ adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__SDMA:
if (adev->sdma.funcs->query_ras_error_count) {
@@ -793,25 +863,32 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
}
break;
case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.funcs->query_ras_error_count)
- adev->gfx.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_count)
+ adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
- if (adev->gfx.funcs->query_ras_error_status)
- adev->gfx.funcs->query_ras_error_status(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_status)
+ adev->gfx.ras_funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.funcs->query_ras_error_count)
- adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->query_ras_error_count)
+ adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
- if (adev->mmhub.funcs->query_ras_error_status)
- adev->mmhub.funcs->query_ras_error_status(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->query_ras_error_status)
+ adev->mmhub.ras_funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__PCIE_BIF:
- if (adev->nbio.funcs->query_ras_error_count)
- adev->nbio.funcs->query_ras_error_count(adev, &err_data);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->query_ras_error_count)
+ adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
break;
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
- amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+ if (adev->gmc.xgmi.ras_funcs &&
+ adev->gmc.xgmi.ras_funcs->query_ras_error_count)
+ adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
break;
default:
break;
@@ -848,15 +925,18 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
switch (block) {
case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.funcs->reset_ras_error_count)
- adev->gfx.funcs->reset_ras_error_count(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->reset_ras_error_count)
+ adev->gfx.ras_funcs->reset_ras_error_count(adev);
- if (adev->gfx.funcs->reset_ras_error_status)
- adev->gfx.funcs->reset_ras_error_status(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->reset_ras_error_status)
+ adev->gfx.ras_funcs->reset_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.funcs->reset_ras_error_count)
- adev->mmhub.funcs->reset_ras_error_count(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->reset_ras_error_count)
+ adev->mmhub.ras_funcs->reset_ras_error_count(adev);
break;
case AMDGPU_RAS_BLOCK__SDMA:
if (adev->sdma.funcs->reset_ras_error_count)
@@ -921,12 +1001,14 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.funcs->ras_error_inject)
- ret = adev->gfx.funcs->ras_error_inject(adev, info);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_error_inject)
+ ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
else
ret = -EINVAL;
break;
case AMDGPU_RAS_BLOCK__UMC:
+ case AMDGPU_RAS_BLOCK__SDMA:
case AMDGPU_RAS_BLOCK__MMHUB:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
@@ -1191,6 +1273,8 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *
&amdgpu_ras_debugfs_ctrl_ops);
debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
&amdgpu_ras_debugfs_eeprom_ops);
+ debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
+ &con->bad_page_cnt_threshold);
/*
* After one uncorrectable error happens, usually GPU recovery will
@@ -1508,12 +1592,14 @@ static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
*/
switch (info->head.block) {
case AMDGPU_RAS_BLOCK__GFX:
- if (adev->gfx.funcs->query_ras_error_status)
- adev->gfx.funcs->query_ras_error_status(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->query_ras_error_status)
+ adev->gfx.ras_funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
- if (adev->mmhub.funcs->query_ras_error_status)
- adev->mmhub.funcs->query_ras_error_status(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->query_ras_error_status)
+ adev->mmhub.ras_funcs->query_ras_error_status(adev);
break;
default:
break;
@@ -1933,15 +2019,13 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
+static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
- if (adev->asic_type != CHIP_VEGA10 &&
- adev->asic_type != CHIP_VEGA20 &&
- adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_SIENNA_CICHLID)
- return 1;
- else
- return 0;
+ return adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN ||
+ adev->asic_type == CHIP_SIENNA_CICHLID;
}
/*
@@ -1960,22 +2044,32 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*supported = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
- amdgpu_ras_check_asic_type(adev))
+ !amdgpu_ras_asic_supported(adev))
return;
- if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- dev_info(adev->dev, "MEM ECC is active.\n");
- *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- } else
- dev_info(adev->dev, "MEM ECC is not presented.\n");
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ dev_info(adev->dev, "MEM ECC is active.\n");
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else {
+ dev_info(adev->dev, "MEM ECC is not presented.\n");
+ }
- if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
- dev_info(adev->dev, "SRAM ECC is active.\n");
- *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
- 1 << AMDGPU_RAS_BLOCK__DF);
- } else
- dev_info(adev->dev, "SRAM ECC is not presented.\n");
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ dev_info(adev->dev, "SRAM ECC is active.\n");
+ *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else {
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
+ }
+ } else {
+ /* driver only manages a few IP blocks RAS feature
+ * when GPU is connected cpu through XGMI */
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__MMHUB);
+ }
/* hw_supported needs to be aligned with RAS block mask. */
*hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -2024,14 +2118,31 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
/* Might need get this flag from vbios. */
con->flags = RAS_DEFAULT_FLAGS;
- if (adev->nbio.funcs->init_ras_controller_interrupt) {
- r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+ /* initialize nbio ras function ahead of any other
+ * ras functions so hardware fatal error interrupt
+ * can be enabled as early as possible */
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
+ break;
+ default:
+ /* nbio ras is not available */
+ break;
+ }
+
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_controller_interrupt) {
+ r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
if (r)
goto release_con;
}
- if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
- r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
if (r)
goto release_con;
}
@@ -2052,6 +2163,32 @@ release_con:
return r;
}
+static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
+{
+ if (adev->gmc.xgmi.connected_to_cpu)
+ return 1;
+ return 0;
+}
+
+static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ struct ras_query_if info = {
+ .head = *ras_block,
+ };
+
+ if (!amdgpu_persistent_edc_harvesting_supported(adev))
+ return 0;
+
+ if (amdgpu_ras_query_error_status(adev, &info) != 0)
+ DRM_WARN("RAS init harvest failure");
+
+ if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
+ DRM_WARN("RAS init harvest reset failure");
+
+ return 0;
+}
+
/* helper function to handle common stuff in ip late init phase */
int amdgpu_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block,
@@ -2081,6 +2218,9 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
return r;
}
+ /* check for errors on warm reset edc persisant supported ASIC */
+ amdgpu_persistent_edc_harvesting(adev, ras_block);
+
/* in resume phase, no need to create ras fs node */
if (adev->in_suspend || amdgpu_in_reset(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index a05dbbbd9803..f40c871da0c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -31,6 +31,7 @@
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
+#define EEPROM_I2C_TARGET_ADDR_ALDEBARAN 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -64,7 +65,8 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
(adev->asic_type == CHIP_ARCTURUS) ||
- (adev->asic_type == CHIP_SIENNA_CICHLID))
+ (adev->asic_type == CHIP_SIENNA_CICHLID) ||
+ (adev->asic_type == CHIP_ALDEBARAN))
return true;
return false;
@@ -106,6 +108,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
*i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
break;
+ case CHIP_ALDEBARAN:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ALDEBARAN;
+ break;
+
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index b49a61d07d60..40f2adf305bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -64,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
node = res->mm_node;
- while (start > node->size << PAGE_SHIFT)
+ while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
cur->start = (node->start << PAGE_SHIFT) + start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
new file mode 100644
index 000000000000..02afd4115675
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_reset.h"
+#include "aldebaran.h"
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_handler *handler)
+{
+ /* TODO: Check if handler exists? */
+ list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers);
+ return 0;
+}
+
+int amdgpu_reset_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ALDEBARAN:
+ ret = aldebaran_reset_init(adev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_reset_fini(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ALDEBARAN:
+ ret = aldebaran_reset_fini(adev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *reset_handler = NULL;
+
+ if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
+ reset_handler = adev->reset_cntl->get_reset_handler(
+ adev->reset_cntl, reset_context);
+ if (!reset_handler)
+ return -ENOSYS;
+
+ return reset_handler->prepare_hwcontext(adev->reset_cntl,
+ reset_context);
+}
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
+{
+ int ret;
+ struct amdgpu_reset_handler *reset_handler = NULL;
+
+ if (adev->reset_cntl)
+ reset_handler = adev->reset_cntl->get_reset_handler(
+ adev->reset_cntl, reset_context);
+ if (!reset_handler)
+ return -ENOSYS;
+
+ ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
+ if (ret)
+ return ret;
+
+ return reset_handler->restore_hwcontext(adev->reset_cntl,
+ reset_context);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
new file mode 100644
index 000000000000..e00d38d9160a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RESET_H__
+#define __AMDGPU_RESET_H__
+
+#include "amdgpu.h"
+
+enum AMDGPU_RESET_FLAGS {
+
+ AMDGPU_NEED_FULL_RESET = 0,
+ AMDGPU_SKIP_HW_RESET = 1,
+};
+
+struct amdgpu_reset_context {
+ enum amd_reset_method method;
+ struct amdgpu_device *reset_req_dev;
+ struct amdgpu_job *job;
+ struct amdgpu_hive_info *hive;
+ unsigned long flags;
+};
+
+struct amdgpu_reset_handler {
+ enum amd_reset_method reset_method;
+ struct list_head handler_list;
+ int (*prepare_env)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*prepare_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*perform_reset)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*restore_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*restore_env)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+
+ int (*do_reset)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_reset_control {
+ void *handle;
+ struct work_struct reset_work;
+ struct mutex reset_lock;
+ struct list_head reset_handlers;
+ atomic_t in_reset;
+ enum amd_reset_method active_reset;
+ struct amdgpu_reset_handler *(*get_reset_handler)(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ void (*async_reset)(struct work_struct *work);
+};
+
+int amdgpu_reset_init(struct amdgpu_device *adev);
+int amdgpu_reset_fini(struct amdgpu_device *adev);
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_handler *handler);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b644c78475fd..688624ebe421 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -164,7 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
- unsigned int irq_type, unsigned int hw_prio)
+ unsigned int irq_type, unsigned int hw_prio,
+ atomic_t *sched_score)
{
int r;
int sched_hw_submission = amdgpu_sched_hw_submission;
@@ -189,7 +190,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
+ r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
+ sched_score);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 56acec1075ac..ca1622835296 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -111,7 +111,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
+ unsigned num_hw_submission,
+ atomic_t *sched_score);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -282,7 +283,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int ring_size, struct amdgpu_irq_src *irq_src,
- unsigned int irq_type, unsigned int prio);
+ unsigned int irq_type, unsigned int prio,
+ atomic_t *sched_score);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t val0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index aeaaae713c59..4fc2ce8ce8ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -127,7 +127,8 @@ struct amdgpu_rlc_funcs {
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
- void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+ void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag);
+ u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag);
bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 9cf856c94f94..5369c8dd0764 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -95,9 +95,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
struct drm_device *dev = adev_to_drm(adev);
uint32_t phy_id;
uint32_t op;
- int i;
char str[64];
- char i2c_output[256];
int ret;
if (*pos || size > sizeof(str) - 1)
@@ -139,11 +137,9 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
if (!ret) {
if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
- memset(i2c_output, 0, sizeof(i2c_output));
- for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
- sprintf(i2c_output, "%s 0x%X", i2c_output,
- securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
- dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+ dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is: %*ph\n",
+ TA_SECUREDISPLAY_I2C_BUFFER_SIZE,
+ securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf);
} else {
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index 7b230bcbf2c6..909d830b513e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -62,6 +62,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
bp.flags = 0;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
r = amdgpu_bo_create(adev, &bp, &vram_obj);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1c6131489a85..3bef0432cac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -823,15 +823,14 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int r;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ int r;
/* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
- ttm->num_pages << PAGE_SHIFT,
+ (u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (r)
goto release_sg;
@@ -861,13 +860,12 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
- if (!ttm->sg->sgl)
+ if (!ttm->sg || !ttm->sg->sgl)
return;
/* unmap the pages mapped to the device */
@@ -1087,13 +1085,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
- if (!gtt->bound)
- return;
-
/* if the pages have userptr pinning then clear that first */
if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ if (!gtt->bound)
+ return;
+
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return;
@@ -1503,7 +1501,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
memcpy(buf, &value, bytes);
}
} else {
- bytes = cursor.size & 0x3ull;
+ bytes = cursor.size & ~0x3ULL;
amdgpu_device_vram_access(adev, cursor.start,
(uint32_t *)buf, bytes,
write);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index dec0db8b0b13..9e38475e0f8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -112,6 +112,7 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,
+ u64 offset, u64 size,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index a2975c8092a9..ea6f99be070b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -60,8 +60,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
}
/* ras init of specific umc version */
- if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
- adev->umc.funcs->err_cnt_init(adev);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->err_cnt_init)
+ adev->umc.ras_funcs->err_cnt_init(adev);
return 0;
@@ -95,12 +96,12 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- if (adev->umc.funcs &&
- adev->umc.funcs->query_ras_error_count)
- adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_count)
+ adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
- if (adev->umc.funcs &&
- adev->umc.funcs->query_ras_error_address &&
+ if (adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_error_address &&
adev->umc.max_ras_err_cnt_per_query) {
err_data->err_addr =
kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -116,7 +117,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
/* umc query_ras_error_address is also responsible for clearing
* error status
*/
- adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+ adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
}
/* only uncorrectable error needs gpu reset */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index 183814493658..bbcccf53080d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -35,13 +35,17 @@
#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
-struct amdgpu_umc_funcs {
+struct amdgpu_umc_ras_funcs {
void (*err_cnt_init)(struct amdgpu_device *adev);
int (*ras_late_init)(struct amdgpu_device *adev);
+ void (*ras_fini)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
- void *ras_error_status);
+ void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
+};
+
+struct amdgpu_umc_funcs {
void (*init_registers)(struct amdgpu_device *adev);
};
@@ -59,6 +63,7 @@ struct amdgpu_umc {
struct ras_common_if *ras_if;
const struct amdgpu_umc_funcs *funcs;
+ const struct amdgpu_umc_ras_funcs *ras_funcs;
};
int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e2ed4689118a..c6dbc0801604 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_POLARIS10 ||
adev->asic_type == CHIP_POLARIS11) &&
(adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+ DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
version_major, version_minor);
} else {
unsigned int enc_major, enc_minor, dec_minor;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1843bf8de0cf..bc76cab67697 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -212,6 +212,7 @@ struct amdgpu_vcn_inst {
void *saved_bo;
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ atomic_t sched_score;
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index d9ffff8eb41d..0c9c5255aa42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -466,6 +466,8 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
adev->virt.gim_feature =
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+ adev->virt.reg_access =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
break;
default:
@@ -617,6 +619,14 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
+ } else if (adev->bios != NULL) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+
+ return;
}
if (adev->virt.vf2pf_update_interval_ms != 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 8dd624c20f89..383d4bdc3fb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -104,6 +104,17 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
/* PP ONE VF MODE in GIM */
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
+ /* Indirect Reg Access enabled */
+ AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+};
+
+enum AMDGIM_REG_ACCESS_FLAG {
+ /* Use PSP to program IH_RB_CNTL */
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ /* Use RLC to program MMHUB regs */
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ /* Use RLC to program GC regs */
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
};
struct amdgim_pf2vf_info_v1 {
@@ -217,6 +228,7 @@ struct amdgpu_virt {
bool tdr_debug;
struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
bool ras_init_done;
+ uint32_t reg_access;
/* vf2pf message */
struct delayed_work vf2pf_work;
@@ -238,6 +250,22 @@ struct amdgpu_virt {
#define amdgpu_sriov_fullaccess(adev) \
(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+#define amdgpu_sriov_reg_indirect_en(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS)))
+
+#define amdgpu_sriov_reg_indirect_ih(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN)))
+
+#define amdgpu_sriov_reg_indirect_mmhub(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN)))
+
+#define amdgpu_sriov_reg_indirect_gc(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f314e1e269cd..9acee4a5b2ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -869,6 +869,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ bp->bo_ptr_size = sizeof(struct amdgpu_bo);
if (vm->use_cpu_for_update)
bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
@@ -2197,8 +2198,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t eaddr;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2263,8 +2264,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
int r;
/* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+ if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+ size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
@@ -2409,7 +2410,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->start = eaddr + 1;
after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->start - tmp->start;
+ after->offset += (after->start - tmp->start) << PAGE_SHIFT;
after->flags = tmp->flags;
after->bo_va = tmp->bo_va;
list_add(&after->list, &tmp->bo_va->invalids);
@@ -3147,6 +3148,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
unsigned i;
+ /* Concurrent flushes are only possible starting with Vega10 and
+ * are broken on Navi10 and Navi14.
+ */
+ adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
+ adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev);
adev->vm_manager.fence_context =
@@ -3300,7 +3307,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
struct amdgpu_bo *root;
uint64_t value, flags;
struct amdgpu_vm *vm;
- long r;
+ int r;
spin_lock(&adev->vm_manager.pasid_lock);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
@@ -3349,6 +3356,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
value = 0;
}
+ r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+ if (r) {
+ pr_debug("failed %d to reserve fence slot\n", r);
+ goto error_unlock;
+ }
+
r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
addr, flags, value, NULL, NULL,
NULL);
@@ -3360,7 +3373,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
error_unlock:
amdgpu_bo_unreserve(root);
if (r < 0)
- DRM_ERROR("Can't handle page fault (%ld)\n", r);
+ DRM_ERROR("Can't handle page fault (%d)\n", r);
error_unref:
amdgpu_bo_unref(&root);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 976a12e5a8b9..4e140288159c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
unsigned int first_kfd_vmid;
+ bool concurrent_flush;
/* Handling of VM fences */
u64 fence_context;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index b2fc475ce6f7..bce105e2973e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -25,6 +25,7 @@
#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
+#include "amdgpu_res_cursor.h"
#include "amdgpu_atomfirmware.h"
#include "atom.h"
@@ -52,7 +53,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
+ return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
}
/**
@@ -69,7 +70,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
+ return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
}
/**
@@ -87,8 +88,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
}
/**
@@ -106,8 +106,7 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return snprintf(buf, PAGE_SIZE, "%llu\n",
- amdgpu_vram_mgr_vis_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
}
static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
@@ -119,27 +118,27 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
switch (adev->gmc.vram_vendor) {
case SAMSUNG:
- return snprintf(buf, PAGE_SIZE, "samsung\n");
+ return sysfs_emit(buf, "samsung\n");
case INFINEON:
- return snprintf(buf, PAGE_SIZE, "infineon\n");
+ return sysfs_emit(buf, "infineon\n");
case ELPIDA:
- return snprintf(buf, PAGE_SIZE, "elpida\n");
+ return sysfs_emit(buf, "elpida\n");
case ETRON:
- return snprintf(buf, PAGE_SIZE, "etron\n");
+ return sysfs_emit(buf, "etron\n");
case NANYA:
- return snprintf(buf, PAGE_SIZE, "nanya\n");
+ return sysfs_emit(buf, "nanya\n");
case HYNIX:
- return snprintf(buf, PAGE_SIZE, "hynix\n");
+ return sysfs_emit(buf, "hynix\n");
case MOSEL:
- return snprintf(buf, PAGE_SIZE, "mosel\n");
+ return sysfs_emit(buf, "mosel\n");
case WINBOND:
- return snprintf(buf, PAGE_SIZE, "winbond\n");
+ return sysfs_emit(buf, "winbond\n");
case ESMT:
- return snprintf(buf, PAGE_SIZE, "esmt\n");
+ return sysfs_emit(buf, "esmt\n");
case MICRON:
- return snprintf(buf, PAGE_SIZE, "micron\n");
+ return sysfs_emit(buf, "micron\n");
default:
- return snprintf(buf, PAGE_SIZE, "unknown\n");
+ return sysfs_emit(buf, "unknown\n");
}
}
@@ -567,6 +566,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
*
* @adev: amdgpu device pointer
* @mem: TTM memory object
+ * @offset: byte offset from the base of VRAM BO
+ * @length: number of bytes to export in sg_table
* @dev: the other device
* @dir: dma direction
* @sgt: resulting sg table
@@ -575,39 +576,47 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
*/
int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct ttm_resource *mem,
+ u64 offset, u64 length,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt)
{
- struct drm_mm_node *node;
+ struct amdgpu_res_cursor cursor;
struct scatterlist *sg;
int num_entries = 0;
- unsigned int pages;
int i, r;
*sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
if (!*sgt)
return -ENOMEM;
- for (pages = mem->num_pages, node = mem->mm_node;
- pages; pages -= node->size, ++node)
- ++num_entries;
+ /* Determine the number of DRM_MM nodes to export */
+ amdgpu_res_first(mem, offset, length, &cursor);
+ while (cursor.remaining) {
+ num_entries++;
+ amdgpu_res_next(&cursor, cursor.size);
+ }
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
if (r)
goto error_free;
+ /* Initialize scatterlist nodes of sg_table */
for_each_sgtable_sg((*sgt), sg, i)
sg->length = 0;
- node = mem->mm_node;
+ /*
+ * Walk down DRM_MM nodes to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_MM node
+ * and the number of bytes from it. Access the following
+ * DRM_MM node(s) if more buffer needs to exported
+ */
+ amdgpu_res_first(mem, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
- phys_addr_t phys = (node->start << PAGE_SHIFT) +
- adev->gmc.aper_base;
- size_t size = node->size << PAGE_SHIFT;
+ phys_addr_t phys = cursor.start + adev->gmc.aper_base;
+ size_t size = cursor.size;
dma_addr_t addr;
- ++node;
addr = dma_map_resource(dev, phys, size, dir,
DMA_ATTR_SKIP_CPU_SYNC);
r = dma_mapping_error(dev, addr);
@@ -617,7 +626,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
sg_set_page(sg, NULL, size, 0);
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
+
+ amdgpu_res_next(&cursor, cursor.size);
}
+
return 0;
error_unmap:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 33f748e5bbfc..8567d5d77346 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -24,7 +24,6 @@
#include <linux/list.h>
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
-#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
#include "soc15.h"
#include "df/df_3_6_offset.h"
@@ -217,7 +216,7 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
+ return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
}
@@ -246,7 +245,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
- return snprintf(buf, PAGE_SIZE, "%u\n", error_count);
+ return sysfs_emit(buf, "%u\n", error_count);
}
@@ -629,7 +628,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return psp_xgmi_terminate(&adev->psp);
}
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
{
int r;
struct ras_ih_if ih_info = {
@@ -643,7 +642,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
- amdgpu_xgmi_reset_ras_error_count(adev);
+ adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
if (!adev->gmc.xgmi.ras_if) {
adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
@@ -665,7 +664,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
return r;
}
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
{
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
adev->gmc.xgmi.ras_if) {
@@ -692,7 +691,7 @@ static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg
WREG32_PCIE(pcs_status_reg, 0);
}
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
{
uint32_t i;
@@ -752,8 +751,8 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
return 0;
}
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status)
+static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
int i;
@@ -802,10 +801,17 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
break;
}
- amdgpu_xgmi_reset_ras_error_count(adev);
+ adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
return 0;
}
+
+const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
+ .ras_late_init = amdgpu_xgmi_ras_late_init,
+ .ras_fini = amdgpu_xgmi_ras_fini,
+ .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
+ .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 148560d63554..12969c0830d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -50,6 +50,7 @@ struct amdgpu_pcs_ras_field {
uint32_t pcs_err_shift;
};
+extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs;
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
@@ -58,14 +59,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status);
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
-
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 5355827ed0ae..1a8f6d4baab2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -90,11 +90,22 @@ union amd_sriov_msg_feature_flags {
uint32_t host_flr_vramlost : 1;
uint32_t mm_bw_management : 1;
uint32_t pp_one_vf_mode : 1;
- uint32_t reserved : 27;
+ uint32_t reg_indirect_acc : 1;
+ uint32_t reserved : 26;
} flags;
uint32_t all;
};
+union amd_sriov_reg_access_flags {
+ struct {
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t reserved : 29;
+ } flags;
+ uint32_t all;
+};
+
union amd_sriov_msg_os_info {
struct {
uint32_t windows : 1;
@@ -149,8 +160,10 @@ struct amd_sriov_msg_pf2vf_info {
/* identification in ROCm SMI */
uint64_t uuid;
uint32_t fcn_idx;
+ /* flags which indicate the register access method VF should use */
+ union amd_sriov_reg_access_flags reg_access_flags;
/* reserved */
- uint32_t reserved[256-26];
+ uint32_t reserved[256-27];
};
struct amd_sriov_msg_vf2pf_info_header {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 43b978144b79..c4bb8eed246d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -984,10 +984,9 @@ static int cik_sdma_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ea825b4f8ee8..d1570a462a51 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2896,6 +2896,11 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2920,8 +2925,10 @@ static int dce_v10_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v10_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index a360a6dec198..18a7b3bd633b 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3026,6 +3026,11 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -3050,8 +3055,10 @@ static int dce_v11_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v11_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index ef124ac853b6..dbcb09cf83e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2769,7 +2769,11 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2793,8 +2797,10 @@ static int dce_v6_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v6_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c98650183828..b200b9e722d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2795,6 +2795,11 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2819,8 +2824,10 @@ static int dce_v8_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_v8_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 9810af712cc0..5c11144da051 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -39,6 +39,7 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
#include "ivsrcid/ivsrcid_vislands30.h"
+#include "amdgpu_display.h"
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
@@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle)
static int dce_virtual_suspend(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
return dce_virtual_hw_fini(handle);
}
static int dce_virtual_resume(void *handle)
{
- return dce_virtual_hw_init(handle);
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dce_virtual_hw_init(handle);
+ if (r)
+ return r;
+ return amdgpu_display_resume_helper(adev);
}
static bool dce_virtual_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 44109a6b8f44..0d8459d63bac 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -205,7 +205,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
count++;
}
- return snprintf(buf, PAGE_SIZE, "%i\n", count);
+ return sysfs_emit(buf, "%i\n", count);
}
/* device attr for available perfmon counters */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 45d1172b7bff..2408ed4c7d84 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -29,7 +29,6 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
#include "nv.h"
#include "nvd.h"
@@ -174,6 +173,11 @@
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
+#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28)
+#define GFX_RLCG_GC_WRITE (0x0 << 28)
+#define GFX_RLCG_GC_READ (0x1 << 28)
+#define GFX_RLCG_MMHUB_WRITE (0x2 << 28)
+
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -1419,38 +1423,127 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
};
-static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
+{
+ /* always programed by rlcg, only for gc */
+ if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) ||
+ offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) {
+ if (!amdgpu_sriov_reg_indirect_gc(adev))
+ *flag = GFX_RLCG_GC_WRITE_OLD;
+ else
+ *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+
+ return true;
+ }
+
+ /* currently support gc read/write, mmhub write */
+ if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) &&
+ offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) {
+ if (amdgpu_sriov_reg_indirect_gc(adev))
+ *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+ else
+ return false;
+ } else {
+ if (amdgpu_sriov_reg_indirect_mmhub(adev))
+ *flag = GFX_RLCG_MMHUB_WRITE;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
uint32_t i = 0;
uint32_t retries = 50000;
+ u32 ret = 0;
+
+ scratch_reg0 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
+ scratch_reg1 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4;
+ scratch_reg2 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
+ scratch_reg3 = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
+ spare_int = adev->rmmio +
+ (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (offset == grbm_cntl || offset == grbm_idx) {
+ if (offset == grbm_cntl)
+ writel(v, scratch_reg2);
+ else if (offset == grbm_idx)
+ writel(v, scratch_reg3);
+
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ writel(v, scratch_reg0);
+ writel(offset | flag, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & flag))
+ break;
- scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
- scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
- spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+ udelay(10);
+ }
- if (amdgpu_sriov_runtime(adev)) {
- pr_err("shouldn't call rlcg write register during runtime\n");
- return;
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
}
- writel(v, scratch_reg0);
- writel(offset | 0x80000000, scratch_reg1);
- writel(1, spare_int);
- for (i = 0; i < retries; i++) {
- u32 tmp;
+ ret = readl(scratch_reg0);
- tmp = readl(scratch_reg1);
- if (!(tmp & 0x80000000))
- break;
+ return ret;
+}
+
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag)
+{
+ uint32_t rlcg_flag;
- udelay(10);
+ if (amdgpu_sriov_fullaccess(adev) &&
+ gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) {
+ gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
+
+ return;
}
+ if (flag & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, value);
+ else
+ WREG32(offset, value);
+}
- if (i >= retries)
- pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag)
+{
+ uint32_t rlcg_flag;
+
+ if (amdgpu_sriov_fullaccess(adev) &&
+ gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0))
+ return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
+
+ if (flag & AMDGPU_REGS_NO_KIQ)
+ return RREG32_NO_KIQ(offset);
+ else
+ return RREG32(offset);
+
+ return 0;
}
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
@@ -3280,7 +3373,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x00000280, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0x07800000, 0x00800000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x00001d00, 0x00000500),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x00001d00, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003c0000, 0x00280400),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
@@ -4459,9 +4552,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_PRIO_DEFAULT);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
return 0;
@@ -4495,8 +4587,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
if (r)
return r;
@@ -7172,16 +7264,10 @@ static int gfx_v10_0_hw_init(void *handle)
* loaded firstly, so in direct type, it has to load smc ucode
* here before rlc.
*/
- if (adev->smu.ppt_funcs != NULL && !(adev->flags & AMD_IS_APU)) {
- r = smu_load_microcode(&adev->smu);
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_pm_load_smu_firmware(adev, NULL);
if (r)
return r;
-
- r = smu_check_fw_status(&adev->smu);
- if (r) {
- pr_err("SMC firmware status is not correct\n");
- return r;
- }
}
gfx_v10_0_disable_gpa_mode(adev);
}
@@ -7892,6 +7978,7 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
.rlcg_wreg = gfx_v10_rlcg_wreg,
+ .rlcg_rreg = gfx_v10_rlcg_rreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index ca74638dec9b..3a8d52a54873 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -3114,7 +3114,7 @@ static int gfx_v6_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -3137,7 +3137,7 @@ static int gfx_v6_0_sw_init(void *handle)
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index a368724c3dfc..c35fdd2ef2d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1877,7 +1877,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Initialize all compute VMIDs to have no GDS, GWS, or OA
- acccess. These should be enabled by FW for target VMIDs. */
+ access. These should be enabled by FW for target VMIDs. */
for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
@@ -2058,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
*
* Set up the number and offset of the CP scratch registers.
- * NOTE: use of CP scratch registers is a legacy inferface and
+ * NOTE: use of CP scratch registers is a legacy interface and
* is not used by default on newer asics (r6xx+). On newer asics,
* memory buffers are used for fences rather than scratch regs.
*/
@@ -2172,7 +2172,7 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
* @seq: sequence number
* @flags: fence related flags
*
- * Emits a fence sequnce number on the gfx ring and flushes
+ * Emits a fence sequence number on the gfx ring and flushes
* GPU caches.
*/
static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
@@ -2215,7 +2215,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
* @seq: sequence number
* @flags: fence related flags
*
- * Emits a fence sequnce number on the compute ring and flushes
+ * Emits a fence sequence number on the compute ring and flushes
* GPU caches.
*/
static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
@@ -2245,14 +2245,14 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
*
* @ring: amdgpu_ring structure holding ring information
- * @job: job to retrive vmid from
+ * @job: job to retrieve vmid from
* @ib: amdgpu indirect buffer object
* @flags: options (AMDGPU_HAVE_CTX_SWITCH)
*
* Emits an DE (drawing engine) or CE (constant engine) IB
* on the gfx ring. IBs are usually generated by userspace
* acceleration drivers and submitted to the kernel for
- * sheduling on the ring. This function schedules the IB
+ * scheduling on the ring. This function schedules the IB
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@@ -2402,7 +2402,7 @@ err1:
/*
* CP.
- * On CIK, gfx and compute now have independant command processors.
+ * On CIK, gfx and compute now have independent command processors.
*
* GFX
* Gfx consists of a single ring and can process both gfx jobs and
@@ -2630,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
ring->wptr = 0;
WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -2985,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
- /* set the wb address wether it's enabled or not */
+ /* set the wb address whether it's enabled or not */
wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
mqd->cp_hqd_pq_rptr_report_addr_hi =
@@ -3198,7 +3198,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
/**
* gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
*
- * @ring: the ring to emmit the commands to
+ * @ring: the ring to emit the commands to
*
* Sync the command pipeline with the PFP. E.g. wait for everything
* to be completed.
@@ -3220,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) {
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ /* sync CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -4438,7 +4438,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, irq_type,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -4512,7 +4512,7 @@ static int gfx_v7_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 4d45844a1d92..c26e06059466 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1927,8 +1927,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
if (r)
return r;
@@ -2033,7 +2033,7 @@ static int gfx_v8_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 1393ccae7418..a078a38c2cee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -734,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
-static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
{
static void *scratch_reg0;
static void *scratch_reg1;
@@ -787,6 +787,20 @@ static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
}
+static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+ if (amdgpu_sriov_fullaccess(adev)) {
+ gfx_v9_0_rlcg_rw(adev, offset, v, flag);
+
+ return;
+ }
+
+ if (flag & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, v);
+ else
+ WREG32(offset, v);
+}
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -1573,6 +1587,9 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
err = 0;
adev->gfx.mec2_fw = NULL;
}
+ } else {
+ adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
+ adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
@@ -2089,45 +2106,22 @@ static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_0_select_se_sh,
- .read_wave_data = &gfx_v9_0_read_wave_data,
- .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_0_select_se_sh,
+ .read_wave_data = &gfx_v9_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+};
+
+static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
+ .ras_late_init = amdgpu_gfx_ras_late_init,
+ .ras_fini = amdgpu_gfx_ras_fini,
.ras_error_inject = &gfx_v9_0_ras_error_inject,
.query_ras_error_count = &gfx_v9_0_query_ras_error_count,
.reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
-static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_0_select_se_sh,
- .read_wave_data = &gfx_v9_0_read_wave_data,
- .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
- .ras_error_inject = &gfx_v9_4_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
- .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
- .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
-};
-
-static const struct amdgpu_gfx_funcs gfx_v9_4_2_gfx_funcs = {
- .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
- .select_se_sh = &gfx_v9_0_select_se_sh,
- .read_wave_data = &gfx_v9_0_read_wave_data,
- .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
- .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
- .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
- .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
- .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
- .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
- .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
- .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
-};
-
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -2154,6 +2148,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
DRM_INFO("fix gfx.config for vega12\n");
break;
case CHIP_VEGA20:
+ adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2179,7 +2174,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARCTURUS:
- adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
+ adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2200,7 +2195,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config |= 0x22010042;
break;
case CHIP_ALDEBARAN:
- adev->gfx.funcs = &gfx_v9_4_2_gfx_funcs;
+ adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2286,8 +2281,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- return amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
+ return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
}
static int gfx_v9_0_sw_init(void *handle)
@@ -2376,10 +2371,9 @@ static int gfx_v9_0_sw_init(void *handle)
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq,
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -2434,7 +2428,9 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_gfx_ras_fini(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_fini)
+ adev->gfx.ras_funcs->ras_fini(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4025,8 +4021,14 @@ static int gfx_v9_0_hw_fini(void *handle)
}
gfx_v9_0_cp_enable(adev, false);
- adev->gfx.rlc.funcs->stop(adev);
+ /* Skip suspend with A+A reset */
+ if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
+ dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+ return 0;
+ }
+
+ adev->gfx.rlc.funcs->stop(adev);
return 0;
}
@@ -4747,12 +4749,16 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- r = amdgpu_gfx_ras_late_init(adev);
- if (r)
- return r;
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->ras_late_init) {
+ r = adev->gfx.ras_funcs->ras_late_init(adev);
+ if (r)
+ return r;
+ }
- if (adev->gfx.funcs->enable_watchdog_timer)
- adev->gfx.funcs->enable_watchdog_timer(adev);
+ if (adev->gfx.ras_funcs &&
+ adev->gfx.ras_funcs->enable_watchdog_timer)
+ adev->gfx.ras_funcs->enable_watchdog_timer(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index bc699d680ce8..b4789dfc2bb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -863,8 +863,8 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status)
+static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
uint32_t sec_count = 0, ded_count = 0;
@@ -906,7 +906,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
@@ -971,7 +971,8 @@ void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
}
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+ void *inject_if)
{
struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
int ret;
@@ -993,10 +994,10 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
return ret;
}
-static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
+static const struct soc15_reg_entry gfx_v9_4_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
{
uint32_t i, j;
uint32_t reg_value;
@@ -1006,18 +1007,33 @@ void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_ea_err_status_regs.instance;
j++) {
gfx_v9_4_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
- gfx_v9_4_rdrsp_status_regs));
- if (reg_value)
+ gfx_v9_4_ea_err_status_regs));
+ if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ /* SDP read/write error/parity error in FUE_IS_FATAL mode
+ * can cause system fatal error in arcturas. Harvest the error
+ * status before GPU reset */
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
+ }
}
}
gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = {
+ .ras_late_init = amdgpu_gfx_ras_late_init,
+ .ras_fini = amdgpu_gfx_ras_fini,
+ .ras_error_inject = &gfx_v9_4_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 875f18473a98..bdd16b568021 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -24,16 +24,6 @@
#ifndef __GFX_V9_4_H__
#define __GFX_V9_4_H__
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
-
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status);
-
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
- void *inject_if);
-
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
-
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs;
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 2e94998c9812..a30c7c10cd9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -808,7 +808,7 @@ static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = {
REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) },
};
-static const struct soc15_reg_entry gfx_v9_4_2_rdrsp_status_regs =
+static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs =
{ SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 };
static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
@@ -997,8 +997,9 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
blk->clear);
/* print the edc count */
- gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
- ded_cnt);
+ if (sec_cnt || ded_cnt)
+ gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
+ ded_cnt);
}
}
@@ -1040,11 +1041,11 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
uint32_t i, j;
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
- WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
}
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
@@ -1089,17 +1090,20 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
- for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
j++) {
gfx_v9_4_2_select_se_sh(adev, i, 0, j);
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
- gfx_v9_4_2_rdrsp_status_regs));
- if (reg_value)
+ gfx_v9_4_2_ea_err_status_regs));
+ if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
j, reg_value);
+ }
/* clear after read */
- WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
}
}
@@ -1112,19 +1116,19 @@ static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
uint32_t data;
data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
}
data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS);
- if (!data) {
+ if (data) {
dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
}
@@ -1283,4 +1287,15 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
}
gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
-} \ No newline at end of file
+}
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = {
+ .ras_late_init = amdgpu_gfx_ras_late_init,
+ .ras_fini = amdgpu_gfx_ras_fini,
+ .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
+ .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+ .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index c143d178ef98..81c5833b6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -30,11 +30,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id);
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
-void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev);
-int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if);
-void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev);
-int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
- void *ras_error_status);
-void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev);
-void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev);
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs;
+
#endif /* __GFX_V9_4_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index d189507dcef0..1e4678cb98f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -120,8 +120,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -141,21 +140,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* FB aperture and AGP aperture. Disable them.
*/
if (adev->gmc.pdb0_bo) {
- if (adev->asic_type == CHIP_ALDEBARAN) {
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
- } else {
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
- WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
- }
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 2aecc6a243e8..14c1c1a297dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -165,8 +165,7 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 410fd3a1a388..41807817de7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -164,8 +164,7 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 33e54eed2eec..498b28a35f5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -568,8 +568,7 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
- *addr = adev->vm_manager.vram_base_offset + *addr -
- adev->gmc.vram_start;
+ *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->gmc.translate_further)
@@ -655,7 +654,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
- adev->umc.funcs = &umc_v8_7_funcs;
+ adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c1bd190841f8..e4f27b3f28fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
@@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "polaris10";
break;
case CHIP_POLARIS12:
- if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
+ if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
chip_name = "polaris12_k";
- else
- chip_name = "polaris12";
+ } else {
+ WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
+ /* Polaris12 32bit ASIC needs a special MC firmware */
+ if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
+ chip_name = "polaris12_32";
+ else
+ chip_name = "polaris12";
+ }
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 468acc06b681..455bb91060d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -574,7 +574,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* be updated to avoid reading an incorrect value due to
* the new fast GRBM interface.
*/
- if (entry->vmid_src == AMDGPU_GFXHUB_0)
+ if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
+ (adev->asic_type < CHIP_ALDEBARAN))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -653,7 +654,8 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->gmc.xgmi.connected_to_cpu) {
adev->gmc.ecc_irq.num_types = 1;
adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
}
@@ -801,7 +803,8 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* be cleared to avoid a false ACK due to the new fast
* GRBM interface.
*/
- if (vmhub == AMDGPU_GFXHUB_0)
+ if ((vmhub == AMDGPU_GFXHUB_0) &&
+ (adev->asic_type < CHIP_ALDEBARAN))
RREG32_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng);
@@ -1047,8 +1050,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
- *addr = adev->vm_manager.vram_base_offset + *addr -
- adev->gmc.vram_start;
+ *addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->gmc.translate_further)
@@ -1155,7 +1157,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
- adev->umc.funcs = &umc_v6_1_funcs;
+ adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
case CHIP_ARCTURUS:
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
@@ -1163,7 +1165,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
- adev->umc.funcs = &umc_v6_1_funcs;
+ adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
default:
break;
@@ -1185,6 +1187,24 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
}
}
+static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
+ break;
+ case CHIP_ARCTURUS:
+ adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
+ break;
+ case CHIP_ALDEBARAN:
+ adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
+ break;
+ default:
+ /* mmhub ras is not available */
+ break;
+ }
+}
+
static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1194,12 +1214,6 @@ static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v9_0_set_gmc_funcs(adev);
- gmc_v9_0_set_irq_funcs(adev);
- gmc_v9_0_set_umc_funcs(adev);
- gmc_v9_0_set_mmhub_funcs(adev);
- gmc_v9_0_set_gfxhub_funcs(adev);
-
if (adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
@@ -1208,7 +1222,14 @@ static int gmc_v9_0_early_init(void *handle)
adev->gmc.xgmi.supported = true;
adev->gmc.xgmi.connected_to_cpu =
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
- }
+ }
+
+ gmc_v9_0_set_gmc_funcs(adev);
+ gmc_v9_0_set_irq_funcs(adev);
+ gmc_v9_0_set_umc_funcs(adev);
+ gmc_v9_0_set_mmhub_funcs(adev);
+ gmc_v9_0_set_mmhub_ras_funcs(adev);
+ gmc_v9_0_set_gfxhub_funcs(adev);
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
@@ -1240,8 +1261,9 @@ static int gmc_v9_0_late_init(void *handle)
}
}
- if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
- adev->mmhub.funcs->reset_ras_error_count(adev);
+ if (adev->mmhub.ras_funcs &&
+ adev->mmhub.ras_funcs->reset_ras_error_count)
+ adev->mmhub.ras_funcs->reset_ras_error_count(adev);
r = amdgpu_gmc_ras_late_init(adev);
if (r)
@@ -1506,7 +1528,8 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->gmc.xgmi.connected_to_cpu) {
/* interrupt sent to DF. */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
&adev->gmc.ecc_irq);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 7332a320ede8..9360204da7fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -487,7 +487,7 @@ int jpeg_v1_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 3b22953aa62e..de5abceced0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -108,7 +108,7 @@ static int jpeg_v2_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 072774ae16bd..83531997aeba 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -115,7 +115,7 @@ static int jpeg_v2_5_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
sprintf(ring->name, "jpeg_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index e8fbb2a0de34..de5dfcfb3859 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -94,7 +94,7 @@ static int jpeg_v3_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
sprintf(ring->name, "jpeg_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 7f30629f21a2..a7ec4ac89da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -848,7 +848,8 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
ring->no_scheduler = true;
sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
- return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
+ return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
}
static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index d7b39c07de20..a99953833820 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -114,8 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
return;
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -776,10 +775,14 @@ static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
+ .ras_fini = amdgpu_mmhub_ras_fini,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.get_fb_location = mmhub_v1_0_get_fb_location,
.init = mmhub_v1_0_init,
.gart_enable = mmhub_v1_0_gart_enable,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index d77f5b65a618..4661b094e007 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -24,5 +24,6 @@
#define __MMHUB_V1_0_H__
extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index ae7d8a1738a3..0103a5ab28e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -47,8 +47,6 @@ static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev)
adev->gmc.fb_start = base;
adev->gmc.fb_end = top;
- adev->gmc.fb_start_original = base;
- adev->gmc.fb_end_original = top;
return base;
}
@@ -126,17 +124,16 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
if (adev->gmc.pdb0_bo) {
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
- WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
}
if (amdgpu_sriov_vf(adev))
return;
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -1287,7 +1284,7 @@ static void mmhub_v1_7_reset_ras_error_count(struct amdgpu_device *adev)
}
}
-static const struct soc15_reg_entry mmhub_v1_7_err_status_regs[] = {
+static const struct soc15_reg_entry mmhub_v1_7_ea_err_status_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 },
{ SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 },
@@ -1304,19 +1301,27 @@ static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev)
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
return;
- for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_err_status_regs); i++) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ea_err_status_regs); i++) {
reg_value =
- RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_err_status_regs[i]));
- if (reg_value)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_ea_err_status_regs[i]));
+ if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
i, reg_value);
+ }
}
}
-const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
+ .ras_fini = amdgpu_mmhub_ras_fini,
.query_ras_error_count = mmhub_v1_7_query_ras_error_count,
.reset_ras_error_count = mmhub_v1_7_reset_ras_error_count,
+ .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
.get_fb_location = mmhub_v1_7_get_fb_location,
.init = mmhub_v1_7_init,
.gart_enable = mmhub_v1_7_gart_enable,
@@ -1325,5 +1330,4 @@ const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
.set_clockgating = mmhub_v1_7_set_clockgating,
.get_clockgating = mmhub_v1_7_get_clockgating,
.setup_vm_pt_regs = mmhub_v1_7_setup_vm_pt_regs,
- .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
index bf2fbeb172d1..a7f9dfc24697 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
@@ -24,5 +24,6 @@
#define __MMHUB_V1_7_H__
extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index f107385faba2..ac76081b91d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -210,8 +210,7 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -689,7 +688,6 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
}
const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
- .ras_late_init = amdgpu_mmhub_ras_late_init,
.init = mmhub_v2_0_init,
.gart_enable = mmhub_v2_0_gart_enable,
.set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index ab9be5ad5a5f..a9899335d0b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -162,8 +162,7 @@ static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
@@ -616,7 +615,6 @@ static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
}
const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = {
- .ras_late_init = amdgpu_mmhub_ras_late_init,
.init = mmhub_v2_3_init,
.gart_enable = mmhub_v2_3_gart_enable,
.set_fault_enable_default = mmhub_v2_3_set_fault_enable_default,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 4a31737b6bb0..47c8dd9d1c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -136,8 +136,7 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
- adev->vm_manager.vram_base_offset;
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
@@ -1646,16 +1645,27 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
- if (reg_value)
+ if (REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_WRRSP_STATUS) ||
+ REG_GET_FIELD(reg_value, MMEA0_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
+ /* SDP read/write error/parity error in FUE_IS_FATAL mode
+ * can cause system fatal error in arcturas. Harvest the error
+ * status before GPU reset */
dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
i, reg_value);
+ }
}
}
-const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
+ .ras_fini = amdgpu_mmhub_ras_fini,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
.reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
+ .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.get_fb_location = mmhub_v9_4_get_fb_location,
.init = mmhub_v9_4_init,
.gart_enable = mmhub_v9_4_gart_enable,
@@ -1664,5 +1674,4 @@ const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.set_clockgating = mmhub_v9_4_set_clockgating,
.get_clockgating = mmhub_v9_4_get_clockgating,
.setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
- .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
index 92404a8f66f3..90436efa92ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
@@ -24,5 +24,6 @@
#define __MMHUB_V9_4_H__
extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 83ea063388fd..0d2d629e2d6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -31,6 +31,28 @@
#include "vega10_enum.h"
#include <uapi/linux/kfd_ioctl.h>
+#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
+#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
+#define NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK 0x00001000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123530
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnRCC_BIF_STRAP2 0x10123488
+#define smnRCC_BIF_STRAP3 0x1012348c
+#define smnRCC_BIF_STRAP5 0x10123494
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
+
static void nbio_v6_1_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -256,6 +278,111 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
+static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v6_1_program_ltr(adev);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
@@ -274,4 +401,5 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
.remap_hdp_registers = nbio_v6_1_remap_hdp_registers,
+ .program_aspm = nbio_v6_1_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index c477f8972d5d..cef929746739 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -31,7 +31,26 @@
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include <uapi/linux/kfd_ioctl.h>
+#define smnPCIE_LC_CNTL 0x11140280
+#define smnPCIE_LC_CNTL3 0x111402d4
+#define smnPCIE_LC_CNTL6 0x111402ec
+#define smnPCIE_LC_CNTL7 0x111402f0
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c
+#define smnRCC_BIF_STRAP3 0x1012348c
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK 0x0000FFFFL
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK 0xFFFF0000L
+#define smnRCC_BIF_STRAP5 0x10123494
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK 0x0000FFFFL
+#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c
+#define BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK 0x0400L
+#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324
+#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4
+#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538
+#define smnRCC_BIF_STRAP2 0x10123488
+#define RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK 0x00004000L
+#define RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT 0x0
+#define RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT 0x10
+#define RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT 0x0
/*
* These are nbio v7_4_1 registers mask. Temporarily define these here since
@@ -557,6 +576,121 @@ static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
}
+const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
+ .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+ .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+ .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+ .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+ .ras_late_init = amdgpu_nbio_ras_late_init,
+ .ras_fini = amdgpu_nbio_ras_fini,
+};
+
+static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP2);
+ data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP2, data);
+
+ def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL);
+ data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+}
+
+static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL7, data);
+
+ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK);
+ data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK;
+ data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2);
+ data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
+
+ WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001);
+
+ def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2);
+ data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK |
+ PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK |
+ PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL6, data);
+
+ nbio_v7_4_program_ltr(adev);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
+ data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
+ data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP3, data);
+
+ def = data = RREG32_PCIE(smnRCC_BIF_STRAP5);
+ data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnRCC_BIF_STRAP5, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_LC_CNTL3);
+ data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
+ if (def != data)
+ WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+}
+
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
.get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
@@ -577,10 +711,5 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
- .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
- .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
- .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
- .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
- .query_ras_error_count = nbio_v7_4_query_ras_error_count,
- .ras_late_init = amdgpu_nbio_ras_late_init,
+ .program_aspm = nbio_v7_4_program_aspm,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index b1ac82872752..b8216581ec8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -28,5 +28,6 @@
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
+extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 5846eac292c3..d54af7f8801b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -34,7 +34,6 @@
#include "amdgpu_vce.h"
#include "amdgpu_ucode.h"
#include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
#include "atom.h"
#include "amd_pcie.h"
@@ -516,21 +515,9 @@ static int nv_asic_mode2_reset(struct amdgpu_device *adev)
return ret;
}
-static bool nv_asic_supports_baco(struct amdgpu_device *adev)
-{
- struct smu_context *smu = &adev->smu;
-
- if (smu_baco_is_support(smu))
- return true;
- else
- return false;
-}
-
static enum amd_reset_method
nv_asic_reset_method(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
-
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
@@ -549,7 +536,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
case CHIP_DIMGREY_CAVEFISH:
return AMD_RESET_METHOD_MODE1;
default:
- if (smu_baco_is_support(smu))
+ if (amdgpu_dpm_is_baco_supported(adev))
return AMD_RESET_METHOD_BACO;
else
return AMD_RESET_METHOD_MODE1;
@@ -559,11 +546,6 @@ nv_asic_reset_method(struct amdgpu_device *adev)
static int nv_asic_reset(struct amdgpu_device *adev)
{
int ret = 0;
- struct smu_context *smu = &adev->smu;
-
- /* skip reset on vangogh for now */
- if (adev->asic_type == CHIP_VANGOGH)
- return 0;
switch (nv_asic_reset_method(adev)) {
case AMD_RESET_METHOD_PCI:
@@ -572,13 +554,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
break;
case AMD_RESET_METHOD_BACO:
dev_info(adev->dev, "BACO reset\n");
-
- ret = smu_baco_enter(smu);
- if (ret)
- return ret;
- ret = smu_baco_exit(smu);
- if (ret)
- return ret;
+ ret = amdgpu_dpm_baco_reset(adev);
break;
case AMD_RESET_METHOD_MODE2:
dev_info(adev->dev, "MODE2 reset\n");
@@ -625,8 +601,7 @@ static void nv_program_aspm(struct amdgpu_device *adev)
if (amdgpu_aspm != 1)
return;
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- !(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->program_aspm))
adev->nbio.funcs->program_aspm(adev);
@@ -958,12 +933,7 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev,
if (adev->gfx.funcs->update_perfmon_mgcg)
adev->gfx.funcs->update_perfmon_mgcg(adev, !enter);
- /*
- * The ASPM function is not fully enabled and verified on
- * Navi yet. Temporarily skip this until ASPM enabled.
- */
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- !(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU) &&
(adev->nbio.funcs->enable_aspm))
adev->nbio.funcs->enable_aspm(adev, !enter);
@@ -986,7 +956,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.need_full_reset = &nv_need_full_reset,
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
- .supports_baco = &nv_asic_supports_baco,
+ .supports_baco = &amdgpu_dpm_is_baco_supported,
.pre_asic_init = &nv_pre_asic_init,
.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
.query_video_codecs = &nv_query_video_codecs,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index a41b054fe0ba..96064c343163 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -102,6 +102,21 @@ enum psp_gfx_cmd_id
/* IDs upto 0x1F are reserved for older programs (Raven, Vega 10/12/20) */
GFX_CMD_ID_LOAD_TOC = 0x00000020, /* Load TOC and obtain TMR size */
GFX_CMD_ID_AUTOLOAD_RLC = 0x00000021, /* Indicates all graphics fw loaded, start RLC autoload */
+ GFX_CMD_ID_BOOT_CFG = 0x00000022, /* Boot Config */
+};
+
+/* PSP boot config sub-commands */
+enum psp_gfx_boot_config_cmd
+{
+ BOOTCFG_CMD_SET = 1, /* Set boot configuration settings */
+ BOOTCFG_CMD_GET = 2, /* Get boot configuration settings */
+ BOOTCFG_CMD_INVALIDATE = 3 /* Reset current boot configuration settings to VBIOS defaults */
+};
+
+/* PSP boot config bitmask values */
+enum psp_gfx_boot_config
+{
+ BOOT_CONFIG_GECC = 0x1,
};
/* Command to load Trusted Application binary into PSP OS. */
@@ -170,10 +185,19 @@ struct psp_gfx_cmd_setup_tmr
uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */
uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */
uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */
+ union {
+ struct {
+ uint32_t sriov_enabled:1; /* whether the device runs under SR-IOV*/
+ uint32_t virt_phy_addr:1; /* driver passes both virtual and physical address to PSP*/
+ uint32_t reserved:30;
+ } bitfield;
+ uint32_t tmr_flags;
+ };
+ uint32_t system_phy_addr_lo; /* bits [31:0] of system physical address of TMR buffer (must be 4 KB aligned) */
+ uint32_t system_phy_addr_hi; /* bits [63:32] of system physical address of TMR buffer */
};
-
/* FW types for GFX_CMD_ID_LOAD_IP_FW command. Limit 31. */
enum psp_gfx_fw_type {
GFX_FW_TYPE_NONE = 0, /* */
@@ -273,6 +297,15 @@ struct psp_gfx_cmd_load_toc
uint32_t toc_size; /* FW buffer size in bytes */
};
+/* Dynamic boot configuration */
+struct psp_gfx_cmd_boot_cfg
+{
+ uint32_t timestamp; /* calendar time as number of seconds */
+ enum psp_gfx_boot_config_cmd sub_cmd; /* sub-command indicating how to process command data */
+ uint32_t boot_config; /* dynamic boot configuration bitmask */
+ uint32_t boot_config_valid; /* dynamic boot configuration valid bits bitmask */
+};
+
/* All GFX ring buffer commands. */
union psp_gfx_commands
{
@@ -285,6 +318,7 @@ union psp_gfx_commands
struct psp_gfx_cmd_reg_prog cmd_setup_reg_prog;
struct psp_gfx_cmd_setup_tmr cmd_setup_vmr;
struct psp_gfx_cmd_load_toc cmd_load_toc;
+ struct psp_gfx_cmd_boot_cfg boot_cfg;
};
struct psp_gfx_uresp_reserved
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index c325d6f53a71..589410c32d09 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -598,7 +598,7 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
}
/*
- * save and restore proces
+ * save and restore process
*/
static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
{
@@ -661,9 +661,9 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
/*
- * Long traing will encroach certain mount of bottom VRAM,
- * saving the content of this bottom VRAM to system memory
- * before training, and restoring it after training to avoid
+ * Long training will encroach a certain amount on the bottom of VRAM;
+ * save the content from the bottom of VRAM to system memory
+ * before training, and restore it after training to avoid
* VRAM corruption.
*/
sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index eb5dc6c5b46e..9f0dda040ec8 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -876,12 +876,10 @@ static int sdma_v2_4_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = false;
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index ad308d8c6d30..135727b59c41 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1160,12 +1160,10 @@ static int sdma_v3_0_sw_init(void *handle)
}
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 98b7db516438..5715be6770ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1968,7 +1968,7 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -1987,7 +1987,7 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
index 6fcb95c89999..bf95007f0843 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -160,6 +160,7 @@ static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
};
static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
+ uint32_t reg_offset,
uint32_t value,
uint32_t instance,
uint32_t *sec_count)
@@ -169,6 +170,9 @@ static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
/* double bits error (multiple bits) error detection is not supported */
for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) {
+ if (sdma_v4_4_ras_fields[i].reg_offset != reg_offset)
+ continue;
+
/* the SDMA_EDC_COUNTER register in each sdma instance
* shares the same sed shift_mask
* */
@@ -197,13 +201,30 @@ static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value = RREG32(reg_offset);
/* double bit error is not supported */
if (reg_value)
- sdma_v4_4_get_ras_error_count(adev, reg_value, instance, &sec_count);
- /* err_data->ce_count should be initialized to 0
- * before calling into this function */
- err_data->ce_count += sec_count;
- /* double bit error is not supported
- * set ue count to 0 */
- err_data->ue_count = 0;
+ sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER, reg_value,
+ instance, &sec_count);
+
+ reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER2);
+ reg_value = RREG32(reg_offset);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_4_get_ras_error_count(adev, regSDMA0_EDC_COUNTER2, reg_value,
+ instance, &sec_count);
+
+ /*
+ * err_data->ue_count should be initialized to 0
+ * before calling into this function
+ *
+ * SDMA RAS supports single bit uncorrectable error detection.
+ * So, increment uncorrectable error count.
+ */
+ err_data->ue_count += sec_count;
+
+ /*
+ * SDMA RAS does not support correctable errors.
+ * Set ce count to 0.
+ */
+ err_data->ce_count = 0;
return 0;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d345e324837d..920fc6d4a127 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1273,12 +1273,10 @@ static int sdma_v5_0_sw_init(void *handle)
: (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index b39e7db0e299..b1ad9e52b234 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -370,6 +370,33 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
}
/**
+ * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
+ *
+ * @ring: amdgpu ring pointer
+ * @job: job to retrieve vmid from
+ * @ib: IB object to schedule
+ *
+ * flush the IB by graphics cache rinse.
+ */
+static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ uint32_t gcr_cntl =
+ SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
+ SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+ SDMA_GCR_GLI_INV(1);
+
+ /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
+ SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
+ amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
+ SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
+}
+
+/**
* sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
*
* @ring: amdgpu ring pointer
@@ -1283,10 +1310,9 @@ static int sdma_v5_2_sw_init(void *handle)
(adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
sprintf(ring->name, "sdma%d", i);
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->sdma.trap_irq,
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -1664,6 +1690,7 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */
.emit_ib = sdma_v5_2_ring_emit_ib,
+ .emit_mem_sync = sdma_v5_2_ring_emit_mem_sync,
.emit_fence = sdma_v5_2_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 488497ad5e0c..cb703e307238 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -507,10 +507,9 @@ static int si_dma_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_INSTANCE0 :
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 3808402cd964..d80e12b80c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -76,7 +76,6 @@
#include "smuio_v13_0.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
-#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
#include "amdgpu_xgmi.h"
#include <uapi/linux/kfd_ioctl.h>
@@ -817,11 +816,12 @@ static void soc15_pcie_gen3_enable(struct amdgpu_device *adev)
static void soc15_program_aspm(struct amdgpu_device *adev)
{
-
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if (!(adev->flags & AMD_IS_APU) &&
+ (adev->nbio.funcs->program_aspm))
+ adev->nbio.funcs->program_aspm(adev);
}
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
@@ -1495,8 +1495,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
AMD_CG_SUPPORT_SDMA_LS |
- AMD_CG_SUPPORT_IH_CG;
- /*AMD_CG_SUPPORT_VCN_MGCG |AMD_CG_SUPPORT_JPEG_MGCG;*/
+ AMD_CG_SUPPORT_IH_CG |
+ AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
@@ -1524,8 +1524,9 @@ static int soc15_common_late_init(void *handle)
if (adev->hdp.funcs->reset_ras_error_count)
adev->hdp.funcs->reset_ras_error_count(adev);
- if (adev->nbio.funcs->ras_late_init)
- r = adev->nbio.funcs->ras_late_init(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->ras_late_init)
+ r = adev->nbio.ras_funcs->ras_late_init(adev);
return r;
}
@@ -1546,7 +1547,9 @@ static int soc15_common_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_nbio_ras_fini(adev);
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->ras_fini)
+ adev->nbio.ras_funcs->ras_fini(adev);
adev->df.funcs->sw_fini(adev);
return 0;
}
@@ -1610,9 +1613,11 @@ static int soc15_common_hw_fini(void *handle)
if (adev->nbio.ras_if &&
amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
- if (adev->nbio.funcs->init_ras_controller_interrupt)
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_controller_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
- if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+ if (adev->nbio.ras_funcs &&
+ adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 8cdf5d1685cb..14bd794bbea6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -77,27 +77,11 @@
})
#define WREG32_RLC(reg, value) \
- do { \
- if (amdgpu_sriov_fullaccess(adev)) { \
- uint32_t i = 0; \
- uint32_t retries = 50000; \
- uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
- uint32_t r1 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1; \
- uint32_t spare_int = adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT; \
- WREG32(r0, value); \
- WREG32(r1, (reg | 0x80000000)); \
- WREG32(spare_int, 0x1); \
- for (i = 0; i < retries; i++) { \
- u32 tmp = RREG32(r1); \
- if (!(tmp & 0x80000000)) \
- break; \
- udelay(10); \
- } \
- if (i >= retries) \
- pr_err("timeout: rlcg program reg:0x%05x failed !\n", reg); \
- } else { \
- WREG32(reg, value); \
- } \
+ do { \
+ if (adev->gfx.rlc.funcs->rlcg_wreg) \
+ adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \
+ else \
+ WREG32(reg, value); \
} while (0)
#define WREG32_RLC_EX(prefix, reg, value) \
@@ -125,23 +109,24 @@
} while (0)
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
- do { \
- uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
- if (amdgpu_sriov_fullaccess(adev)) { \
- uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
- uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
- uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
- uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; \
- if (target_reg == grbm_cntl) \
- WREG32(r2, value); \
- else if (target_reg == grbm_idx) \
- WREG32(r3, value); \
- WREG32(target_reg, value); \
- } else { \
- WREG32(target_reg, value); \
- } \
+ WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+
+#define RREG32_RLC(reg) \
+ (adev->gfx.rlc.funcs->rlcg_rreg ? \
+ adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg))
+
+#define WREG32_RLC_NO_KIQ(reg, value) \
+ do { \
+ if (adev->gfx.rlc.funcs->rlcg_wreg) \
+ adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \
+ else \
+ WREG32_NO_KIQ(reg, value); \
} while (0)
+#define RREG32_RLC_NO_KIQ(reg) \
+ (adev->gfx.rlc.funcs->rlcg_rreg ? \
+ adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg))
+
#define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
@@ -160,10 +145,13 @@
} \
} while (0)
+#define RREG32_SOC15_RLC(ip, inst, reg) \
+ RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
#define WREG32_SOC15_RLC(ip, inst, reg, value) \
do { \
- uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
- WREG32_RLC(target_reg, value); \
+ uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
+ WREG32_RLC(target_reg, value); \
} while (0)
#define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
@@ -173,11 +161,14 @@
} while (0)
#define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \
- WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
- (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
- & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+ WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
+ (RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+ & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
- WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+ WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+
+#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
+ RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset))
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 96d7769609f4..20b44983ac94 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -22,6 +22,7 @@
*/
#include "umc_v6_1.h"
#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
#include "amdgpu.h"
#include "rsmu/rsmu_0_0_2_offset.h"
@@ -464,9 +465,10 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
umc_v6_1_enable_umc_index_mode(adev);
}
-const struct amdgpu_umc_funcs umc_v6_1_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = {
.err_cnt_init = umc_v6_1_err_cnt_init,
.ras_late_init = amdgpu_umc_ras_late_init,
+ .ras_fini = amdgpu_umc_ras_fini,
.query_ras_error_count = umc_v6_1_query_ras_error_count,
.query_ras_error_address = umc_v6_1_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
index 0ce1d323cfdd..5dc36c730bb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
@@ -45,7 +45,7 @@
/* umc ce count initial value */
#define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD)
-extern const struct amdgpu_umc_funcs umc_v6_1_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs;
extern const uint32_t
umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
new file mode 100644
index 000000000000..3a8f787374c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_7.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu.h"
+
+#include "umc/umc_6_7_0_offset.h"
+#include "umc/umc_6_7_0_sh_mask.h"
+
+static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
+ uint32_t umc_inst,
+ uint32_t ch_inst)
+{
+ return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
+}
+
+static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+ uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCnt);
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* select the lower chip and check the error count */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+ UMC_V6_7_CE_CNT_INIT);
+
+ /* select the higher chip and check the err counter */
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+ ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+ *error_count +=
+ (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+ UMC_V6_7_CE_CNT_INIT);
+
+ /* check for SRAM correctable error
+ MCUMC_STATUS is a 64 bit register */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset,
+ unsigned long *error_count)
+{
+ uint64_t mc_umc_status;
+ uint32_t mc_umc_status_addr;
+
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+ /* check the MCUMC_STATUS */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+ if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ regUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ regUMCCH0_0_EccErrCnt);
+
+ /* select the lower chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear lower chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_7_CE_CNT_INIT);
+
+ /* select the higher chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear higher chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_7_CE_CNT_INIT);
+}
+
+static void umc_v6_7_reset_error_count(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_7_reset_error_count_per_channel(adev,
+ umc_reg_offset);
+ }
+}
+
+static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC registers. Will add the protection */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_query_correctable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ce_count));
+ umc_v6_7_querry_uncorrectable_error_count(adev,
+ umc_reg_offset,
+ &(err_data->ue_count));
+ }
+
+ umc_v6_7_reset_error_count(adev);
+}
+
+static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ uint32_t umc_reg_offset,
+ uint32_t ch_inst,
+ uint32_t umc_inst)
+{
+ uint32_t mc_umc_status_addr;
+ uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
+ struct eeprom_table_record *err_rec;
+ uint32_t channel_index;
+
+ mc_umc_status_addr =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+ mc_umc_addrt0 =
+ SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
+ if (!err_data->err_addr) {
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+ return;
+ }
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+ /* calculate error address if ue/ce error is detected */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+ err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+ err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(err_addr);
+
+ /* we only save ue error information currently, ce is skipped */
+ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+ == 1) {
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+ }
+ }
+
+ /* clear umc status */
+ WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+}
+
+static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ /*TODO: driver needs to toggle DF Cstate to ensure
+ * safe access of UMC resgisters. Will add the protection
+ * when firmware interface is ready */
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ umc_v6_7_query_error_address(adev,
+ err_data,
+ umc_reg_offset,
+ ch_inst,
+ umc_inst);
+ }
+}
+
+const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
+ .ras_late_init = amdgpu_umc_ras_late_init,
+ .ras_fini = amdgpu_umc_ras_fini,
+ .query_ras_error_count = umc_v6_7_query_ras_error_count,
+ .query_ras_error_address = umc_v6_7_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
new file mode 100644
index 000000000000..4eb85f247e96
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_7_H__
+#define __UMC_V6_7_H__
+
+/* EccErrCnt max value */
+#define UMC_V6_7_CE_CNT_MAX 0xffff
+/* umc ce interrupt threshold */
+#define UMC_V6_7_CE_INT_THRESHOLD 0xffff
+/* umc ce count initial value */
+#define UMC_V6_7_CE_CNT_INIT (UMC_V6_7_CE_CNT_MAX - UMC_V6_7_CE_INT_THRESHOLD)
+
+#define UMC_V6_7_INST_DIST 0x40000
+
+extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
index a064c097690c..89d20adfa001 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
@@ -22,6 +22,7 @@
*/
#include "umc_v8_7.h"
#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
#include "amdgpu.h"
#include "rsmu/rsmu_0_0_2_offset.h"
@@ -323,9 +324,10 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
}
}
-const struct amdgpu_umc_funcs umc_v8_7_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = {
.err_cnt_init = umc_v8_7_err_cnt_init,
.ras_late_init = amdgpu_umc_ras_late_init,
+ .ras_fini = amdgpu_umc_ras_fini,
.query_ras_error_count = umc_v8_7_query_ras_error_count,
.query_ras_error_address = umc_v8_7_query_ras_error_address,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
index d4d0468e3df5..37e6dc7c28e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
@@ -44,7 +44,7 @@
/* umc ce count initial value */
#define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
-extern const struct amdgpu_umc_funcs umc_v8_7_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs;
extern const uint32_t
umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 10ecae257b18..284447d7a579 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -562,7 +562,7 @@ static int uvd_v3_1_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index a70d2a0de316..a301518e4957 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -119,7 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index f3b0a927101b..a4d5bd21c83c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -117,7 +117,7 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 760859880c1e..2bab9c77952f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -420,7 +420,7 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -434,7 +434,7 @@ static int uvd_v6_0_sw_init(void *handle)
sprintf(ring->name, "uvd_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 7cd67cb2ac5f..0cd98fcb1f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -454,7 +454,7 @@ static int uvd_v7_0_sw_init(void *handle)
sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -475,7 +475,7 @@ static int uvd_v7_0_sw_init(void *handle)
}
r = amdgpu_ring_init(adev, ring, 512,
&adev->uvd.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 0e2945baf0f1..c7d28c169be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -433,9 +433,8 @@ static int vce_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512,
- &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6d9108fa22e0..3b82fb289ef6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -443,7 +443,7 @@ static int vce_v3_0_sw_init(void *handle)
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 37fa163393fd..8e238dea7bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -477,7 +477,7 @@ static int vce_v4_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 6117931fa8d7..51a773a37a35 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -129,7 +129,7 @@ static int vcn_v1_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_dec;
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -148,7 +148,7 @@ static int vcn_v1_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index d63198c945bf..116b9643d5ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -136,7 +136,7 @@ static int vcn_v2_0_sw_init(void *handle)
sprintf(ring->name, "vcn_dec");
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -167,7 +167,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 87ec883f7e06..948813d7caa0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -189,7 +189,7 @@ static int vcn_v2_5_sw_init(void *handle)
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
sprintf(ring->name, "vcn_dec_%d", j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
- 0, AMDGPU_RING_PRIO_DEFAULT);
+ 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
@@ -203,7 +203,7 @@ static int vcn_v2_5_sw_init(void *handle)
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->vcn.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index b61d1ba1aa9d..cf165ab5dd26 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -50,6 +50,9 @@
#define VCN_INSTANCES_SIENNA_CICHLID 2
#define DEC_SW_RING_ENABLED FALSE
+#define RDECODE_MSG_CREATE 0x00000000
+#define RDECODE_MESSAGE_CREATE 0x00000001
+
static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN,
SOC15_IH_CLIENTID_VCN1
@@ -171,6 +174,7 @@ static int vcn_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -198,6 +202,8 @@ static int vcn_v3_0_sw_init(void *handle)
if (r)
return r;
+ atomic_set(&adev->vcn.inst[i].sched_score, 0);
+
ring = &adev->vcn.inst[i].ring_dec;
ring->use_doorbell = true;
if (amdgpu_sriov_vf(adev)) {
@@ -205,11 +211,10 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
}
- if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0)
- ring->no_scheduler = true;
sprintf(ring->name, "vcn_dec_%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT,
+ &adev->vcn.inst[i].sched_score);
if (r)
return r;
@@ -227,11 +232,10 @@ static int vcn_v3_0_sw_init(void *handle)
} else {
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
}
- if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
- ring->no_scheduler = true;
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT,
+ &adev->vcn.inst[i].sched_score);
if (r)
return r;
}
@@ -585,6 +589,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
+ UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
@@ -1844,6 +1852,132 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+{
+ struct drm_gpu_scheduler **scheds;
+
+ /* The create msg must be in the first IB submitted */
+ if (atomic_read(&p->entity->fence_seq))
+ return -EINVAL;
+
+ scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+ [AMDGPU_RING_PRIO_DEFAULT].sched;
+ drm_sched_entity_modify_sched(p->entity, scheds, 1);
+ return 0;
+}
+
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va_mapping *map;
+ uint32_t *msg, num_buffers;
+ struct amdgpu_bo *bo;
+ uint64_t start, end;
+ unsigned int i;
+ void * ptr;
+ int r;
+
+ addr &= AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
+ return r;
+ }
+
+ start = map->start * AMDGPU_GPU_PAGE_SIZE;
+ end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
+ if (addr & 0x7) {
+ DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+ return -EINVAL;
+ }
+
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, &ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
+ return r;
+ }
+
+ msg = ptr + addr - start;
+
+ /* Check length */
+ if (msg[1] > end - addr) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (msg[3] != RDECODE_MSG_CREATE)
+ goto out;
+
+ num_buffers = msg[2];
+ for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
+ uint32_t offset, size, *create;
+
+ if (msg[0] != RDECODE_MESSAGE_CREATE)
+ continue;
+
+ offset = msg[1];
+ size = msg[2];
+
+ if (offset + size > end) {
+ r = -EINVAL;
+ goto out;
+ }
+
+ create = ptr + addr + offset - start;
+
+ /* H246, HEVC and VP9 can run on any instance */
+ if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
+ continue;
+
+ r = vcn_v3_0_limit_sched(p);
+ if (r)
+ goto out;
+ }
+
+out:
+ amdgpu_bo_kunmap(bo);
+ return r;
+}
+
+static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ uint32_t msg_lo = 0, msg_hi = 0;
+ unsigned i;
+ int r;
+
+ /* The first instance can decode anything */
+ if (!ring->me)
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+ uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
+
+ if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+ msg_lo = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+ msg_hi = val;
+ } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+ val == 0) {
+ r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+ if (r)
+ return r;
+ }
+ }
+ return 0;
+}
+
static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
@@ -1851,6 +1985,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
.get_rptr = vcn_v3_0_dec_ring_get_rptr,
.get_wptr = vcn_v3_0_dec_ring_get_wptr,
.set_wptr = vcn_v3_0_dec_ring_set_wptr,
+ .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 6c3cb3513b98..8a122b413bf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -264,10 +264,10 @@ static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
{
uint32_t tmp;
- /* vega20 ih reroute will go through psp
- * this function is only used for arcturus
+ /* vega20 ih reroute will go through psp this
+ * function is used for newer asics starting arcturus
*/
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->asic_type >= CHIP_ARCTURUS) {
/* Reroute to IH ring 1 for VMC */
WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index ea338de5818a..735ebbd1148f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,6 +81,30 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
+#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
+#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK 0x00000004L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK 0x00000008L
+#define PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK 0x00000010L
+#define ixPCIE_L1_PM_SUB_CNTL 0x378
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK 0x00000004L
+#define PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK 0x00000008L
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK 0x00000001L
+#define PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK 0x00000002L
+#define PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK 0x00200000L
+#define LINK_CAP 0x64
+#define PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK 0x00040000L
+#define ixCPM_CONTROL 0x1400118
+#define ixPCIE_LC_CNTL7 0x100100BC
+#define PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK 0x00000400L
+#define PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT 0x00000007
+#define PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT 0x00000009
+#define CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK 0x01000000L
+#define PCIE_L1_PM_SUB_CNTL 0x378
+#define ASIC_IS_P22(asic_type, rid) ((asic_type >= CHIP_POLARIS10) && \
+ (asic_type <= CHIP_POLARIS12) && \
+ (rid >= 0x6E))
/* Topaz */
static const struct amdgpu_video_codecs topaz_video_codecs_encode =
{
@@ -1091,13 +1115,178 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
/* todo */
}
+static void vi_enable_aspm(struct amdgpu_device *adev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data |= PCIE_LC_CNTL__LC_L0S_INACTIVITY_DEFAULT <<
+ PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
+ data |= PCIE_LC_CNTL__LC_L1_INACTIVITY_DEFAULT <<
+ PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
+ data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ data |= PCIE_LC_CNTL__LC_DELAY_L1_EXIT_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+}
+
static void vi_program_aspm(struct amdgpu_device *adev)
{
+ u32 data, data1, orig;
+ bool bL1SS = false;
+ bool bClkReqSupport = true;
- if (amdgpu_aspm == 0)
+ if (amdgpu_aspm != 1)
return;
- /* todo */
+ if (adev->flags & AMD_IS_APU ||
+ adev->asic_type < CHIP_POLARIS10)
+ return;
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
+ data |= 0x0024 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT;
+ data |= PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_OVERRIDE_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL3);
+ data |= PCIE_LC_CNTL3__LC_GO_TO_RECOVERY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_P_CNTL);
+ data |= PCIE_P_CNTL__P_IGNORE_EDB_ERR_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_P_CNTL, data);
+
+ data = RREG32_PCIE(ixPCIE_LC_L1_PM_SUBSTATE);
+ pci_read_config_dword(adev->pdev, PCIE_L1_PM_SUB_CNTL, &data1);
+ if (data & PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK &&
+ (data & (PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_1_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_2_OVERRIDE_MASK |
+ PCIE_LC_L1_PM_SUBSTATE__LC_ASPM_L1_1_OVERRIDE_MASK))) {
+ bL1SS = true;
+ } else if (data1 & (PCIE_L1_PM_SUB_CNTL__ASPM_L1_2_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__ASPM_L1_1_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_2_EN_MASK |
+ PCIE_L1_PM_SUB_CNTL__PCI_PM_L1_1_EN_MASK)) {
+ bL1SS = true;
+ }
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL6);
+ data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL6, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL);
+ data |= PCIE_LC_LINK_WIDTH_CNTL__LC_DYN_LANES_PWR_STATE_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL, data);
+
+ pci_read_config_dword(adev->pdev, LINK_CAP, &data);
+ if (!(data & PCIE_LINK_CAP__CLOCK_POWER_MANAGEMENT_MASK))
+ bClkReqSupport = false;
+
+ if (bClkReqSupport) {
+ orig = data = RREG32_SMC(ixTHM_CLK_CNTL);
+ data &= ~(THM_CLK_CNTL__CMON_CLK_SEL_MASK | THM_CLK_CNTL__TMON_CLK_SEL_MASK);
+ data |= (1 << THM_CLK_CNTL__CMON_CLK_SEL__SHIFT) |
+ (1 << THM_CLK_CNTL__TMON_CLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixTHM_CLK_CNTL, data);
+
+ orig = data = RREG32_SMC(ixMISC_CLK_CTRL);
+ data &= ~(MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK |
+ MISC_CLK_CTRL__ZCLK_SEL_MASK | MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK);
+ data |= (1 << MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT) |
+ (1 << MISC_CLK_CTRL__ZCLK_SEL__SHIFT);
+ data |= (0x20 << MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMISC_CLK_CTRL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL);
+ data |= CG_CLKPIN_CNTL__XTALIN_DIVIDE_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
+ data |= CG_CLKPIN_CNTL_2__ENABLE_XCLK_MASK;
+ if (orig != data)
+ WREG32_SMC(ixCG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(ixMPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK;
+ data |= (4 << MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT);
+ if (orig != data)
+ WREG32_SMC(ixMPLL_BYPASSCLK_SEL, data);
+
+ orig = data = RREG32_PCIE(ixCPM_CONTROL);
+ data |= (CPM_CONTROL__REFCLK_XSTCLK_ENABLE_MASK |
+ CPM_CONTROL__CLKREQb_UNGATE_TXCLK_ENABLE_MASK);
+ if (orig != data)
+ WREG32_PCIE(ixCPM_CONTROL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_CONFIG_CNTL);
+ data &= ~PCIE_CONFIG_CNTL__DYN_CLK_LATENCY_MASK;
+ data |= (0xE << PCIE_CONFIG_CNTL__DYN_CLK_LATENCY__SHIFT);
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_CONFIG_CNTL, data);
+
+ orig = data = RREG32(mmBIF_CLK_CTRL);
+ data |= BIF_CLK_CTRL__BIF_XSTCLK_READY_MASK;
+ if (orig != data)
+ WREG32(mmBIF_CLK_CTRL, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL7);
+ data |= PCIE_LC_CNTL7__LC_L1_SIDEBAND_CLKREQ_PDWN_EN_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL7, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_HW_DEBUG);
+ data |= PCIE_HW_DEBUG__HW_01_DEBUG_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_HW_DEBUG, data);
+
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL2);
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK;
+ data |= PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
+ if (bL1SS)
+ data &= ~PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL2, data);
+
+ }
+
+ vi_enable_aspm(adev);
+
+ data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
+ data1 = RREG32_PCIE(ixPCIE_LC_STATUS1);
+ if (((data & PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) == PCIE_LC_N_FTS_CNTL__LC_N_FTS_MASK) &&
+ data1 & PCIE_LC_STATUS1__LC_REVERSE_XMIT_MASK &&
+ data1 & PCIE_LC_STATUS1__LC_REVERSE_RCVR_MASK) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_CNTL);
+ data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_CNTL, data);
+ }
+
+ if ((adev->asic_type == CHIP_POLARIS12 &&
+ !(ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))) ||
+ ASIC_IS_P22(adev->asic_type, adev->external_rev_id)) {
+ orig = data = RREG32_PCIE(ixPCIE_LC_TRAINING_CNTL);
+ data &= ~PCIE_LC_TRAINING_CNTL__LC_DISABLE_TRAINING_BIT_ARCH_MASK;
+ if (orig != data)
+ WREG32_PCIE(ixPCIE_LC_TRAINING_CNTL, data);
+ }
}
static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 6802c616e10e..43de260b2230 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -870,52 +870,47 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
{
struct kfd_ioctl_get_process_apertures_args *args = data;
struct kfd_process_device_apertures *pAperture;
- struct kfd_process_device *pdd;
+ int i;
dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
mutex_lock(&p->mutex);
+ /* Run over all pdd of the process */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ pAperture =
+ &args->process_apertures[args->num_of_nodes];
+ pAperture->gpu_id = pdd->dev->id;
+ pAperture->lds_base = pdd->lds_base;
+ pAperture->lds_limit = pdd->lds_limit;
+ pAperture->gpuvm_base = pdd->gpuvm_base;
+ pAperture->gpuvm_limit = pdd->gpuvm_limit;
+ pAperture->scratch_base = pdd->scratch_base;
+ pAperture->scratch_limit = pdd->scratch_limit;
- /*if the process-device list isn't empty*/
- if (kfd_has_process_device_data(p)) {
- /* Run over all pdd of the process */
- pdd = kfd_get_first_process_device_data(p);
- do {
- pAperture =
- &args->process_apertures[args->num_of_nodes];
- pAperture->gpu_id = pdd->dev->id;
- pAperture->lds_base = pdd->lds_base;
- pAperture->lds_limit = pdd->lds_limit;
- pAperture->gpuvm_base = pdd->gpuvm_base;
- pAperture->gpuvm_limit = pdd->gpuvm_limit;
- pAperture->scratch_base = pdd->scratch_base;
- pAperture->scratch_limit = pdd->scratch_limit;
-
- dev_dbg(kfd_device,
- "node id %u\n", args->num_of_nodes);
- dev_dbg(kfd_device,
- "gpu id %u\n", pdd->dev->id);
- dev_dbg(kfd_device,
- "lds_base %llX\n", pdd->lds_base);
- dev_dbg(kfd_device,
- "lds_limit %llX\n", pdd->lds_limit);
- dev_dbg(kfd_device,
- "gpuvm_base %llX\n", pdd->gpuvm_base);
- dev_dbg(kfd_device,
- "gpuvm_limit %llX\n", pdd->gpuvm_limit);
- dev_dbg(kfd_device,
- "scratch_base %llX\n", pdd->scratch_base);
- dev_dbg(kfd_device,
- "scratch_limit %llX\n", pdd->scratch_limit);
-
- args->num_of_nodes++;
-
- pdd = kfd_get_next_process_device_data(p, pdd);
- } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
- }
+ dev_dbg(kfd_device,
+ "node id %u\n", args->num_of_nodes);
+ dev_dbg(kfd_device,
+ "gpu id %u\n", pdd->dev->id);
+ dev_dbg(kfd_device,
+ "lds_base %llX\n", pdd->lds_base);
+ dev_dbg(kfd_device,
+ "lds_limit %llX\n", pdd->lds_limit);
+ dev_dbg(kfd_device,
+ "gpuvm_base %llX\n", pdd->gpuvm_base);
+ dev_dbg(kfd_device,
+ "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+ dev_dbg(kfd_device,
+ "scratch_base %llX\n", pdd->scratch_base);
+ dev_dbg(kfd_device,
+ "scratch_limit %llX\n", pdd->scratch_limit);
+ if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
+ break;
+ }
mutex_unlock(&p->mutex);
return 0;
@@ -926,9 +921,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
{
struct kfd_ioctl_get_process_apertures_new_args *args = data;
struct kfd_process_device_apertures *pa;
- struct kfd_process_device *pdd;
- uint32_t nodes = 0;
int ret;
+ int i;
dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
@@ -937,17 +931,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* sufficient memory
*/
mutex_lock(&p->mutex);
-
- if (!kfd_has_process_device_data(p))
- goto out_unlock;
-
- /* Run over all pdd of the process */
- pdd = kfd_get_first_process_device_data(p);
- do {
- args->num_of_nodes++;
- pdd = kfd_get_next_process_device_data(p, pdd);
- } while (pdd);
-
+ args->num_of_nodes = p->n_pdds;
goto out_unlock;
}
@@ -962,22 +946,23 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
mutex_lock(&p->mutex);
- if (!kfd_has_process_device_data(p)) {
+ if (!p->n_pdds) {
args->num_of_nodes = 0;
kfree(pa);
goto out_unlock;
}
/* Run over all pdd of the process */
- pdd = kfd_get_first_process_device_data(p);
- do {
- pa[nodes].gpu_id = pdd->dev->id;
- pa[nodes].lds_base = pdd->lds_base;
- pa[nodes].lds_limit = pdd->lds_limit;
- pa[nodes].gpuvm_base = pdd->gpuvm_base;
- pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
- pa[nodes].scratch_base = pdd->scratch_base;
- pa[nodes].scratch_limit = pdd->scratch_limit;
+ for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
+ pa[i].gpu_id = pdd->dev->id;
+ pa[i].lds_base = pdd->lds_base;
+ pa[i].lds_limit = pdd->lds_limit;
+ pa[i].gpuvm_base = pdd->gpuvm_base;
+ pa[i].gpuvm_limit = pdd->gpuvm_limit;
+ pa[i].scratch_base = pdd->scratch_base;
+ pa[i].scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
@@ -993,17 +978,14 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
"scratch_base %llX\n", pdd->scratch_base);
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
- nodes++;
-
- pdd = kfd_get_next_process_device_data(p, pdd);
- } while (pdd && (nodes < args->num_of_nodes));
+ }
mutex_unlock(&p->mutex);
- args->num_of_nodes = nodes;
+ args->num_of_nodes = i;
ret = copy_to_user(
(void __user *)args->kfd_process_device_apertures_ptr,
pa,
- (nodes * sizeof(struct kfd_process_device_apertures)));
+ (i * sizeof(struct kfd_process_device_apertures)));
kfree(pa);
return ret ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index b258a3dae767..159add0f5aaa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
/* Wait till CP writes sync code: */
status = amdkfd_fence_wait_timeout(
- (unsigned int *) rm_state,
+ rm_state,
QUEUESTATE__ACTIVE, 1500);
kfd_gtt_sa_free(dbgdev->dev, mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 511712c2e382..673d5e34f213 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show, NULL);
}
+static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
+{
+ seq_printf(m, "echo gpu_id > hang_hws\n");
+ return 0;
+}
static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
const char __user *user_buf, size_t size, loff_t *ppos)
@@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
- NULL, &kfd_debugfs_hang_hws_fops);
+ kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
}
void kfd_debugfs_fini(void)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index f860cd705961..357b9bf62a1c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1322,7 +1322,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd)
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
{
- if (kfd)
+ if (kfd && kfd->init_complete)
kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 965f9f230045..d3eaa1549bd7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1180,7 +1180,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
if (retval)
goto fail_allocate_vidmem;
- dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+ dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
init_interrupts(dqm);
@@ -1353,8 +1353,8 @@ out:
return retval;
}
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
- unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+ uint64_t fence_value,
unsigned int timeout_ms)
{
unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index dd0d8834b432..71e2fde56b2b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -187,7 +187,7 @@ struct device_queue_manager {
uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
uint64_t fence_gpu_addr;
- unsigned int *fence_addr;
+ uint64_t *fence_addr;
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 1c20458f3962..696944fa0177 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -25,6 +25,70 @@
#include "soc15_int.h"
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
+#include "amdgpu.h"
+
+enum SQ_INTERRUPT_WORD_ENCODING {
+ SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
+ SQ_INTERRUPT_WORD_ENCODING_INST,
+ SQ_INTERRUPT_WORD_ENCODING_ERROR,
+};
+
+enum SQ_INTERRUPT_ERROR_TYPE {
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
+ SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
+ SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
+ SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
+};
+
+/* SQ_INTERRUPT_WORD_AUTO_CTXID */
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 1
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 2
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 3
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 4
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 5
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 6
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 7
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 8
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 24
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 26
+
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x00000001
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x00000002
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x00000004
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x00000008
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x00000010
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x00000020
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x00000040
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x00000080
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x00000100
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x03000000
+#define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0x0c000000
+
+/* SQ_INTERRUPT_WORD_WAVE_CTXID */
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 12
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 13
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 14
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 18
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 20
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 24
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 26
+
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x00000fff
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x00001000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x00002000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x0003c000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x000c0000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x00f00000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000
+#define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000
+
+#define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \
+ ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000))
+
+#define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
+#define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -108,13 +172,15 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry)
{
uint16_t source_id, client_id, pasid, vmid;
- uint32_t context_id;
+ uint32_t context_id0, context_id1;
+ uint32_t sq_intr_err, sq_int_data, encoding;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
- context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
+ context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
client_id == SOC15_IH_CLIENTID_SE0SH ||
@@ -122,10 +188,59 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_SE2SH ||
client_id == SOC15_IH_CLIENTID_SE3SH) {
if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
- kfd_signal_event_interrupt(pasid, context_id, 32);
- else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG)
- kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24);
- else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
+ kfd_signal_event_interrupt(pasid, context_id0, 32);
+ else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
+ sq_int_data = KFD_CONTEXT_ID_GET_SQ_INT_DATA(context_id0, context_id1);
+ encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
+ switch (encoding) {
+ case SQ_INTERRUPT_WORD_ENCODING_AUTO:
+ pr_debug(
+ "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_INST:
+ pr_debug("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ sq_int_data);
+ break;
+ case SQ_INTERRUPT_WORD_ENCODING_ERROR:
+ sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
+ pr_warn("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
+ REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
+ sq_intr_err);
+ if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
+ sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
+ kfd_signal_hw_exception_event(pasid);
+ amdgpu_amdkfd_gpu_reset(dev->kgd);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
+ } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
} else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
client_id == SOC15_IH_CLIENTID_SDMA1 ||
@@ -136,7 +251,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
client_id == SOC15_IH_CLIENTID_SDMA6 ||
client_id == SOC15_IH_CLIENTID_SDMA7) {
if (source_id == SOC15_INTSRC_SDMA_TRAP)
- kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28);
+ kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 9318936aa805..73f2257acc23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -135,11 +135,11 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
*/
void kfd_iommu_unbind_process(struct kfd_process *p)
{
- struct kfd_process_device *pdd;
+ int i;
- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
- if (pdd->bound == PDD_BOUND)
- amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+ for (i = 0; i < p->n_pdds; i++)
+ if (p->pdds[i]->bound == PDD_BOUND)
+ amd_iommu_unbind_pasid(p->pdds[i]->dev->pdev, p->pasid);
}
/* Callback for process shutdown invoked by the IOMMU driver */
@@ -333,10 +333,6 @@ int kfd_iommu_resume(struct kfd_dev *kfd)
return 0;
}
-extern bool amd_iommu_pc_supported(void);
-extern u8 amd_iommu_pc_get_max_banks(u16 devid);
-extern u8 amd_iommu_pc_get_max_counters(u16 devid);
-
/** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology
*/
int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index d903f694acba..e840dd581719 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -348,7 +348,7 @@ fail_create_runlist_ib:
}
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
- uint32_t fence_value)
+ uint64_t fence_value)
{
uint32_t *buffer, size;
int retval = 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index dfaf771a42e6..e3ba0cd3b6fa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
}
static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value)
+ uint64_t fence_address, uint64_t fence_value)
{
struct pm4_mes_query_status *packet;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index a852e0d7d804..08442e7d9944 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
}
static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value)
+ uint64_t fence_address, uint64_t fence_value)
{
struct pm4_mes_query_status *packet;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d8c8b5ff449a..0b6595f7acda 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -45,6 +45,7 @@
#include <linux/swap.h>
#include "amd_shared.h"
+#include "amdgpu.h"
#define KFD_MAX_RING_ENTRY_SIZE 8
@@ -649,12 +650,6 @@ enum kfd_pdd_bound {
/* Data that is per-process-per device. */
struct kfd_process_device {
- /*
- * List of all per-device data for a process.
- * Starts from kfd_process.per_device_data.
- */
- struct list_head per_device_list;
-
/* The device that owns this data. */
struct kfd_dev *dev;
@@ -771,10 +766,11 @@ struct kfd_process {
u32 pasid;
/*
- * List of kfd_process_device structures,
+ * Array of kfd_process_device pointers,
* one for each device the process is using.
*/
- struct list_head per_device_data;
+ struct kfd_process_device *pdds[MAX_GPU_INSTANCE];
+ uint32_t n_pdds;
struct process_queue_manager pqm;
@@ -872,14 +868,6 @@ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
int handle);
-/* Process device data iterator */
-struct kfd_process_device *kfd_get_first_process_device_data(
- struct kfd_process *p);
-struct kfd_process_device *kfd_get_next_process_device_data(
- struct kfd_process *p,
- struct kfd_process_device *pdd);
-bool kfd_has_process_device_data(struct kfd_process *p);
-
/* PASIDs */
int kfd_pasid_init(void);
void kfd_pasid_exit(void);
@@ -1012,8 +1000,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
- unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+ uint64_t fence_value,
unsigned int timeout_ms);
/* Packet Manager */
@@ -1049,7 +1037,7 @@ struct packet_manager_funcs {
uint32_t filter_param, bool reset,
unsigned int sdma_engine);
int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value);
+ uint64_t fence_address, uint64_t fence_value);
int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
/* Packet sizes */
@@ -1071,7 +1059,7 @@ int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
- uint32_t fence_value);
+ uint64_t fence_value);
int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
enum kfd_unmap_queues_filter mode,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index f5237997fa18..d97e330a5022 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -505,7 +505,7 @@ static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
{
int ret = 0;
- struct kfd_process_device *pdd;
+ int i;
char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
if (!p)
@@ -520,7 +520,8 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
* - proc/<pid>/stats_<gpuid>/evicted_ms
* - proc/<pid>/stats_<gpuid>/cu_occupancy
*/
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
struct kobject *kobj_stats;
snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
@@ -571,7 +572,7 @@ err:
static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
{
int ret = 0;
- struct kfd_process_device *pdd;
+ int i;
if (!p)
return -EINVAL;
@@ -584,7 +585,9 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
* - proc/<pid>/vram_<gpuid>
* - proc/<pid>/sdma_<gpuid>
*/
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
pdd->dev->id);
ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
@@ -881,21 +884,23 @@ void kfd_unref_process(struct kfd_process *p)
kref_put(&p->ref, kfd_process_ref_release);
}
+
static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
{
struct kfd_process *p = pdd->process;
void *mem;
int id;
+ int i;
/*
* Remove all handles from idr and release appropriate
* local memory object
*/
idr_for_each_entry(&pdd->alloc_idr, mem, id) {
- struct kfd_process_device *peer_pdd;
- list_for_each_entry(peer_pdd, &p->per_device_data,
- per_device_list) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *peer_pdd = p->pdds[i];
+
if (!peer_pdd->vm)
continue;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
@@ -909,18 +914,19 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
{
- struct kfd_process_device *pdd;
+ int i;
- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
- kfd_process_device_free_bos(pdd);
+ for (i = 0; i < p->n_pdds; i++)
+ kfd_process_device_free_bos(p->pdds[i]);
}
static void kfd_process_destroy_pdds(struct kfd_process *p)
{
- struct kfd_process_device *pdd, *temp;
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
- list_for_each_entry_safe(pdd, temp, &p->per_device_data,
- per_device_list) {
pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
@@ -929,11 +935,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pdd->dev->kgd, pdd->vm);
fput(pdd->drm_file);
}
- else if (pdd->vm)
- amdgpu_amdkfd_gpuvm_destroy_process_vm(
- pdd->dev->kgd, pdd->vm);
-
- list_del(&pdd->per_device_list);
if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
@@ -955,7 +956,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
}
kfree(pdd);
+ p->pdds[i] = NULL;
}
+ p->n_pdds = 0;
}
/* No process locking is needed in this function, because the process
@@ -967,7 +970,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
- struct kfd_process_device *pdd;
+ int i;
/* Remove the procfs files */
if (p->kobj) {
@@ -976,7 +979,9 @@ static void kfd_process_wq_release(struct work_struct *work)
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
sysfs_remove_file(p->kobj, &pdd->attr_vram);
sysfs_remove_file(p->kobj, &pdd->attr_sdma);
sysfs_remove_file(p->kobj, &pdd->attr_evict);
@@ -1036,7 +1041,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct kfd_process *p;
- struct kfd_process_device *pdd = NULL;
+ int i;
/*
* The kfd_process structure can not be free because the
@@ -1060,8 +1065,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
* pdd is in debug mode, we should first force unregistration,
* then we will be able to destroy the queues
*/
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
- struct kfd_dev *dev = pdd->dev;
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_dev *dev = p->pdds[i]->dev;
mutex_lock(kfd_get_dbgmgr_mutex());
if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
@@ -1098,11 +1103,11 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
{
unsigned long offset;
- struct kfd_process_device *pdd;
+ int i;
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
- struct kfd_dev *dev = pdd->dev;
- struct qcm_process_device *qpd = &pdd->qpd;
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_dev *dev = p->pdds[i]->dev;
+ struct qcm_process_device *qpd = &p->pdds[i]->qpd;
if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
@@ -1199,7 +1204,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
mutex_init(&process->mutex);
process->mm = thread->mm;
process->lead_thread = thread->group_leader;
- INIT_LIST_HEAD(&process->per_device_data);
+ process->n_pdds = 0;
INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
process->last_restore_timestamp = get_jiffies_64();
@@ -1290,11 +1295,11 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process *p)
{
- struct kfd_process_device *pdd = NULL;
+ int i;
- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
- if (pdd->dev == dev)
- return pdd;
+ for (i = 0; i < p->n_pdds; i++)
+ if (p->pdds[i]->dev == dev)
+ return p->pdds[i];
return NULL;
}
@@ -1304,6 +1309,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
{
struct kfd_process_device *pdd = NULL;
+ if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
+ return NULL;
pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
if (!pdd)
return NULL;
@@ -1332,7 +1339,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->vram_usage = 0;
pdd->sdma_past_activity_counter = 0;
atomic64_set(&pdd->evict_duration_counter, 0);
- list_add(&pdd->per_device_list, &p->per_device_data);
+ p->pdds[p->n_pdds++] = pdd;
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
@@ -1365,19 +1372,18 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct kfd_dev *dev;
int ret;
+ if (!drm_file)
+ return -EINVAL;
+
if (pdd->vm)
- return drm_file ? -EBUSY : 0;
+ return -EBUSY;
p = pdd->process;
dev = pdd->dev;
- if (drm_file)
- ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
- dev->kgd, drm_file, p->pasid,
- &pdd->vm, &p->kgd_process_info, &p->ef);
- else
- ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
- &pdd->vm, &p->kgd_process_info, &p->ef);
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
+ dev->kgd, drm_file, p->pasid,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
@@ -1399,8 +1405,6 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
err_init_cwsr:
err_reserve_ib_mem:
kfd_process_device_free_bos(pdd);
- if (!drm_file)
- amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
pdd->vm = NULL;
return ret;
@@ -1425,6 +1429,9 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
+ if (!pdd->vm)
+ return ERR_PTR(-ENODEV);
+
/*
* signal runtime-pm system to auto resume and prevent
* further runtime suspend once device pdd is created until
@@ -1442,10 +1449,6 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
if (err)
goto out;
- err = kfd_process_device_init_vm(pdd, NULL);
- if (err)
- goto out;
-
/*
* make sure that runtime_usage counter is incremented just once
* per pdd
@@ -1464,28 +1467,6 @@ out:
return ERR_PTR(err);
}
-struct kfd_process_device *kfd_get_first_process_device_data(
- struct kfd_process *p)
-{
- return list_first_entry(&p->per_device_data,
- struct kfd_process_device,
- per_device_list);
-}
-
-struct kfd_process_device *kfd_get_next_process_device_data(
- struct kfd_process *p,
- struct kfd_process_device *pdd)
-{
- if (list_is_last(&pdd->per_device_list, &p->per_device_data))
- return NULL;
- return list_next_entry(pdd, per_device_list);
-}
-
-bool kfd_has_process_device_data(struct kfd_process *p)
-{
- return !(list_empty(&p->per_device_data));
-}
-
/* Create specific handle mapped to mem from process local memory idr
* Assumes that the process lock is held.
*/
@@ -1561,11 +1542,13 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
*/
int kfd_process_evict_queues(struct kfd_process *p)
{
- struct kfd_process_device *pdd;
int r = 0;
+ int i;
unsigned int n_evicted = 0;
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
&pdd->qpd);
if (r) {
@@ -1581,7 +1564,9 @@ fail:
/* To keep state consistent, roll back partial eviction by
* restoring queues
*/
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
+
if (n_evicted == 0)
break;
if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
@@ -1597,10 +1582,12 @@ fail:
/* kfd_process_restore_queues - Restore all user queues of a process */
int kfd_process_restore_queues(struct kfd_process *p)
{
- struct kfd_process_device *pdd;
int r, ret = 0;
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_process_device *pdd = p->pdds[i];
- list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
&pdd->qpd);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index eb1635ac8988..95a6c36cea4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -126,10 +126,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
{
- struct kfd_process_device *pdd;
+ int i;
- list_for_each_entry(pdd, &p->per_device_data, per_device_list)
- kfd_process_dequeue_from_device(pdd);
+ for (i = 0; i < p->n_pdds; i++)
+ kfd_process_dequeue_from_device(p->pdds[i]);
}
int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 17d1736367ea..246522423559 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -81,7 +81,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
struct kfd_smi_client *client = filep->private_data;
unsigned char *buf;
- buf = kmalloc(MAX_KFIFO_SIZE * sizeof(*buf), GFP_KERNEL);
+ buf = kmalloc_array(MAX_KFIFO_SIZE, sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 00edf78975b1..389eff96fcf6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -121,7 +121,7 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
* DOC: overview
*
* The AMDgpu display manager, **amdgpu_dm** (or even simpler,
- * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
* requests into DC requests, and DC responses into DRM responses.
*
* The root control structure is &struct amdgpu_display_manager.
@@ -130,6 +130,7 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
/* basic init/fini API */
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
{
@@ -371,14 +372,14 @@ static void dm_pflip_high_irq(void *interrupt_params)
/* IRQ could occur when in initial stage */
/* TODO work and BO cleanup */
if (amdgpu_crtc == NULL) {
- DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+ DC_LOG_PFLIP("CRTC is null, returning.\n");
return;
}
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+ DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED,
amdgpu_crtc->crtc_id,
@@ -449,9 +450,9 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
- DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
- amdgpu_crtc->crtc_id, amdgpu_crtc,
- vrr_active, (int) !e);
+ DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc,
+ vrr_active, (int) !e);
}
static void dm_vupdate_high_irq(void *interrupt_params)
@@ -459,6 +460,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
+ struct drm_device *drm_dev;
+ struct drm_vblank_crtc *vblank;
+ ktime_t frame_duration_ns, previous_timestamp;
unsigned long flags;
int vrr_active;
@@ -466,8 +470,19 @@ static void dm_vupdate_high_irq(void *interrupt_params)
if (acrtc) {
vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+ drm_dev = acrtc->base.dev;
+ vblank = &drm_dev->vblank[acrtc->base.index];
+ previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+ frame_duration_ns = vblank->time - previous_timestamp;
- DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ if (frame_duration_ns > 0) {
+ trace_amdgpu_refresh_rate_track(acrtc->base.index,
+ frame_duration_ns,
+ ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+ atomic64_set(&irq_params->previous_timestamp, vblank->time);
+ }
+
+ DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
acrtc->crtc_id,
vrr_active);
@@ -520,7 +535,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
- DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
vrr_active, acrtc->dm_irq_params.active_planes);
/**
@@ -923,6 +938,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define DMUB_TRACE_MAX_READ 64
+static void dm_dmub_trace_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dmcub_trace_buf_entry entry = { 0 };
+ uint32_t count = 0;
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ ASSERT(count <= DMUB_TRACE_MAX_READ);
+}
+
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
uint64_t pt_base;
@@ -987,13 +1028,12 @@ static void event_mall_stutter(struct work_struct *work)
if (vblank_work->enable)
dm->active_vblank_irq_count++;
- else
+ else if(dm->active_vblank_irq_count)
dm->active_vblank_irq_count--;
dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
- DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
-
+ DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
mutex_unlock(&dm->dc_lock);
}
@@ -1809,8 +1849,8 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
if (acrtc && state->stream_status[i].plane_count != 0) {
irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
- DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
- acrtc->crtc_id, enable ? "en" : "dis", rc);
+ DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
if (rc)
DRM_WARN("Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
@@ -2512,11 +2552,14 @@ static void handle_hpd_irq(void *param)
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct amdgpu_device *adev = drm_to_adev(dev);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
#endif
+ if (adev->dm.disable_hpd_irq)
+ return;
+
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
@@ -2656,6 +2699,10 @@ static void handle_hpd_rx_irq(void *param)
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -3104,6 +3151,28 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
}
+ if (dc->ctx->dmub_srv) {
+ i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add dmub trace irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.dmub_trace_params[0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dmub_trace_high_irq, c_irq_params);
+ }
+
/* HPD */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
@@ -3946,6 +4015,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;
+ /*
+ * For reasons we don't (yet) fully understand a non-zero
+ * src_y coordinate into an NV12 buffer can cause a
+ * system hang. To avoid hangs (and maybe be overly cautious)
+ * let's reject both non-zero src_x and src_y.
+ *
+ * We currently know of only one use-case to reproduce a
+ * scenario with non-zero src_x and src_y for NV12, which
+ * is to gesture the YouTube Android app into full screen
+ * on ChromeOS.
+ */
+ if (state->fb &&
+ state->fb->format->format == DRM_FORMAT_NV12 &&
+ (scaling_info->src_rect.x != 0 ||
+ scaling_info->src_rect.y != 0))
+ return -EINVAL;
+
scaling_info->src_rect.width = state->src_w >> 16;
if (scaling_info->src_rect.width == 0)
return -EINVAL;
@@ -4163,6 +4249,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
{
struct amdgpu_device *adev = drm_to_adev(plane->dev);
const struct drm_format_info *info = drm_format_info(format);
+ int i;
enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
@@ -4170,17 +4257,21 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
return false;
/*
- * We always have to allow this modifier, because core DRM still
- * checks LINEAR support if userspace does not provide modifers.
+ * We always have to allow these modifiers:
+ * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
+ * 2. Not passing any modifiers is the same as explicitly passing INVALID.
*/
- if (modifier == DRM_FORMAT_MOD_LINEAR)
+ if (modifier == DRM_FORMAT_MOD_LINEAR ||
+ modifier == DRM_FORMAT_MOD_INVALID) {
return true;
+ }
- /*
- * The arbitrary tiling support for multiplane formats has not been hooked
- * up.
- */
- if (info->num_planes > 1)
+ /* Check that the modifier is on the list of the plane's supported modifiers. */
+ for (i = 0; i < plane->modifier_count; i++) {
+ if (modifier == plane->modifiers[i])
+ break;
+ }
+ if (i == plane->modifier_count)
return false;
/*
@@ -4201,6 +4292,10 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
/* Per radeonsi comments 16/64 bpp are more complicated. */
if (info->cpp[0] != 4)
return false;
+ /* We support multi-planar formats, but not when combined with
+ * additional DCC metadata planes. */
+ if (info->num_planes > 1)
+ return false;
}
return true;
@@ -4401,7 +4496,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
- AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
@@ -4413,7 +4508,7 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
- AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
@@ -4892,8 +4987,8 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
stream->src = src;
stream->dst = dst;
- DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
- dst.x, dst.y, dst.width, dst.height);
+ DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
}
@@ -5106,15 +5201,27 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->hdmi_vic = hv_frame.vic;
}
- timing_out->h_addressable = mode_in->hdisplay;
- timing_out->h_total = mode_in->htotal;
- timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
- timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
- timing_out->v_total = mode_in->vtotal;
- timing_out->v_addressable = mode_in->vdisplay;
- timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
- timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
- timing_out->pix_clk_100hz = mode_in->clock * 10;
+ if (is_freesync_video_mode(mode_in, aconnector)) {
+ timing_out->h_addressable = mode_in->hdisplay;
+ timing_out->h_total = mode_in->htotal;
+ timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+ timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+ timing_out->v_total = mode_in->vtotal;
+ timing_out->v_addressable = mode_in->vdisplay;
+ timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+ timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+ timing_out->pix_clk_100hz = mode_in->clock * 10;
+ } else {
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+ }
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
@@ -5234,9 +5341,14 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
static void set_multisync_trigger_params(
struct dc_stream_state *stream)
{
+ struct dc_stream_state *master = NULL;
+
if (stream->triggered_crtc_reset.enabled) {
- stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
- stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
+ master = stream->triggered_crtc_reset.event_source;
+ stream->triggered_crtc_reset.event =
+ master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+ CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+ stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
}
}
@@ -5266,6 +5378,7 @@ static void set_master_stream(struct dc_stream_state *stream_set[],
static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
{
int i = 0;
+ struct dc_stream_state *stream;
if (context->stream_count < 2)
return;
@@ -5277,9 +5390,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
* crtc_sync_master.multi_sync_enabled flag
* For now it's set to false
*/
- set_multisync_trigger_params(context->streams[i]);
}
+
set_master_stream(context->streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count ; i++) {
+ stream = context->streams[i];
+
+ if (!stream)
+ continue;
+
+ set_multisync_trigger_params(stream);
+ }
}
static struct drm_display_mode *
@@ -5335,7 +5457,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
return m_pref;
}
-static bool is_freesync_video_mode(struct drm_display_mode *mode,
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
struct amdgpu_dm_connector *aconnector)
{
struct drm_display_mode *high_mode;
@@ -5458,7 +5580,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
- else
+ else if (!dm_state)
drm_mode_set_crtcinfo(&mode, 0);
/*
@@ -5636,8 +5758,8 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
- DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
- acrtc->crtc_id, enable ? "en" : "dis", rc);
+ DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
return rc;
}
@@ -6075,6 +6197,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
} while (stream == NULL && requested_bpc >= 6);
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+ aconnector->force_yuv420_output = true;
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv420_output = false;
+ }
+
return stream;
}
@@ -6577,7 +6708,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
int r;
if (!new_state->fb) {
- DRM_DEBUG_DRIVER("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -7295,7 +7426,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
if (!(amdgpu_freesync_vid_mode && edid))
return;
-
+
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
amdgpu_dm_connector->num_modes +=
add_fs_modes(amdgpu_dm_connector);
@@ -7810,11 +7941,11 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!plane->state->fb && !old_plane_state->fb)
return;
- DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
- __func__,
- amdgpu_crtc->crtc_id,
- plane->state->crtc_w,
- plane->state->crtc_h);
+ DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
+ __func__,
+ amdgpu_crtc->crtc_id,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
ret = get_cursor_position(plane, crtc, &position);
if (ret)
@@ -7872,8 +8003,8 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
/* Mark this event as consumed */
acrtc->base.state->event = NULL;
- DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
- acrtc->crtc_id);
+ DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
}
static void update_freesync_state_on_stream(
@@ -8179,7 +8310,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
&bundle->flip_addrs[planes_count].address,
afb->tmz_surface, false);
- DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+ DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
new_plane_state->plane->index,
bundle->plane_infos[planes_count].dcc.enable);
@@ -8213,7 +8344,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane,
bundle->flip_addrs[planes_count].flip_timestamp_in_us);
- DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
+ DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
__func__,
bundle->flip_addrs[planes_count].address.grph.addr.high_part,
bundle->flip_addrs[planes_count].address.grph.addr.low_part);
@@ -8535,7 +8666,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- DRM_DEBUG_DRIVER(
+ DRM_DEBUG_ATOMIC(
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
@@ -8569,7 +8700,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
- DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+ DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
if (!dm_new_crtc_state->stream) {
/*
@@ -8602,7 +8733,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
} else if (modereset_required(new_crtc_state)) {
- DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+ DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
/* i.e. reset mode */
if (dm_old_crtc_state->stream)
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -8619,6 +8750,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
WARN_ON(!dc_commit_state(dm->dc, dc_state));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc,true);
+#endif
mutex_unlock(&dm->dc_lock);
}
@@ -9207,7 +9343,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- DRM_DEBUG_DRIVER(
+ DRM_DEBUG_ATOMIC(
"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
"planes_changed:%d, mode_changed:%d,active_changed:%d,"
"connectors_changed:%d\n",
@@ -9291,8 +9427,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
dc_stream_retain(new_stream);
- DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
- crtc->base.id);
+ DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+ crtc->base.id);
if (dc_add_stream_to_ctx(
dm->dc,
@@ -9637,8 +9773,8 @@ static int dm_update_plane_state(struct dc *dc,
if (!dc_new_plane_state)
return -ENOMEM;
- DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
- plane->base.id, new_plane_crtc->base.id);
+ DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
ret = fill_dc_plane_attributes(
drm_to_adev(new_plane_crtc->dev),
@@ -9701,7 +9837,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
- if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
+ if (!new_cursor_state || !new_primary_state ||
+ !new_cursor_state->fb || !new_primary_state->fb) {
return 0;
}
@@ -9749,6 +9886,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
#endif
+static int validate_overlay(struct drm_atomic_state *state)
+{
+ int i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *primary_state, *overlay_state = NULL;
+
+ /* Check if primary plane is contained inside overlay */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ overlay_state = new_plane_state;
+ continue;
+ }
+ }
+
+ /* check if we're making changes to the overlay plane */
+ if (!overlay_state)
+ return 0;
+
+ /* check if overlay plane is enabled */
+ if (!overlay_state->crtc)
+ return 0;
+
+ /* find the primary plane for the CRTC that the overlay is enabled on */
+ primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+
+ /* check if primary plane is enabled */
+ if (!primary_state->crtc)
+ return 0;
+
+ /* Perform the bounds check to ensure the overlay plane covers the primary */
+ if (primary_state->crtc_x < overlay_state->crtc_x ||
+ primary_state->crtc_y < overlay_state->crtc_y ||
+ primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
+ primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
+ DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -9923,6 +10107,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ ret = validate_overlay(state);
+ if (ret)
+ goto fail;
+
/* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 8f98d44490aa..b2f2ccfc20bb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -66,6 +66,7 @@ struct dc_plane_state;
struct common_irq_params {
struct amdgpu_device *adev;
enum dc_irq_source irq_src;
+ atomic64_t previous_timestamp;
};
/**
@@ -339,6 +340,15 @@ struct amdgpu_display_manager {
struct common_irq_params
vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
+ /**
+ * @dmub_trace_params:
+ *
+ * DMUB trace event IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
+ struct common_irq_params
+ dmub_trace_params[1];
+
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;
@@ -385,6 +395,11 @@ struct amdgpu_display_manager {
#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /**
+ * @crc_rd_wrk:
+ *
+ * Work to be executed in a separate thread to communicate with PSP.
+ */
struct crc_rd_work *crc_rd_wrk;
#endif
@@ -395,6 +410,7 @@ struct amdgpu_display_manager {
*/
struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
bool force_timing_sync;
+ bool disable_hpd_irq;
bool dmcub_trace_event_en;
/**
* @da_list:
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index c6d6baab106e..5cd788b20c21 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -307,7 +307,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- aux = &aconn->dm_dp_aux.aux;
+ aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux;
if (!aux) {
DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 927de7678a4f..1b6b15708b96 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -34,6 +34,7 @@
#include "resource.h"
#include "dsc.h"
#include "dc_link_dp.h"
+#include "link_hwss.h"
#include "dc/dc_dmub_srv.h"
struct dmub_debugfs_trace_header {
@@ -149,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
*
* --- to get dp configuration
*
- * cat link_settings
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
*
* It will list current, verified, reported, preferred dp configuration.
* current -- for current video mode
@@ -162,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
* echo <lane_count> <link_rate> > link_settings
*
* for example, to force to 2 lane, 2.7GHz,
- * echo 4 0xa > link_settings
+ * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
*
* spread_spectrum could not be changed dynamically.
*
@@ -170,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
* done. please check link settings after force operation to see if HW get
* programming.
*
- * cat link_settings
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
*
* check current and preferred settings.
*
@@ -246,7 +247,6 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
{
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
- struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
@@ -254,7 +254,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
int max_param_num = 2;
uint8_t param_nums = 0;
long param[2];
- bool valid_input = false;
+ bool valid_input = true;
if (size == 0)
return -EINVAL;
@@ -281,9 +281,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
case LANE_COUNT_ONE:
case LANE_COUNT_TWO:
case LANE_COUNT_FOUR:
- valid_input = true;
break;
default:
+ valid_input = false;
break;
}
@@ -293,9 +293,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
case LINK_RATE_RBR2:
case LINK_RATE_HIGH2:
case LINK_RATE_HIGH3:
- valid_input = true;
break;
default:
+ valid_input = false;
break;
}
@@ -309,10 +309,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
* spread spectrum will not be changed
*/
prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+ prefer_link_settings.use_link_rate_set = false;
prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1];
- dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+ dp_retrain_link_dp_test(link, &prefer_link_settings, false);
kfree(wr_buf);
return size;
@@ -399,6 +400,70 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
return result;
}
+static int dp_lttpr_status_show(struct seq_file *m, void *d)
+{
+ char *data;
+ struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private;
+ struct dc_link *link = connector->dc_link;
+ uint32_t read_size = 1;
+ uint8_t repeater_count = 0;
+
+ data = kzalloc(read_size, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size);
+
+ switch ((uint8_t)*data) {
+ case 0x80:
+ repeater_count = 1;
+ break;
+ case 0x40:
+ repeater_count = 2;
+ break;
+ case 0x20:
+ repeater_count = 3;
+ break;
+ case 0x10:
+ repeater_count = 4;
+ break;
+ case 0x8:
+ repeater_count = 5;
+ break;
+ case 0x4:
+ repeater_count = 6;
+ break;
+ case 0x2:
+ repeater_count = 7;
+ break;
+ case 0x1:
+ repeater_count = 8;
+ break;
+ case 0x0:
+ repeater_count = 0;
+ break;
+ default:
+ repeater_count = (uint8_t)*data;
+ break;
+ }
+
+ seq_printf(m, "phy repeater count: %d\n", repeater_count);
+
+ dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size);
+
+ if ((uint8_t)*data == 0x55)
+ seq_printf(m, "phy repeater mode: transparent\n");
+ else if ((uint8_t)*data == 0xAA)
+ seq_printf(m, "phy repeater mode: non-transparent\n");
+ else if ((uint8_t)*data == 0x00)
+ seq_printf(m, "phy repeater mode: non lttpr\n");
+ else
+ seq_printf(m, "phy repeater mode: read error\n");
+
+ kfree(data);
+ return 0;
+}
+
static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
@@ -2300,6 +2365,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
+DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
#endif
@@ -2420,6 +2486,7 @@ static const struct {
} dp_debugfs_entries[] = {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
+ {"lttpr_status", &dp_lttpr_status_fops},
{"test_pattern", &dp_phy_test_pattern_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -2900,6 +2967,10 @@ static int mst_topo_show(struct seq_file *m, void *unused)
aconnector = to_amdgpu_dm_connector(connector);
+ /* Ensure we're only dumping the topology of a root mst node */
+ if (!aconnector->mst_mgr.mst_state)
+ continue;
+
seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
}
@@ -2909,7 +2980,73 @@ static int mst_topo_show(struct seq_file *m, void *unused)
}
/*
- * Sets the force_timing_sync debug optino from the given string.
+ * Sets trigger hpd for MST topologies.
+ * All connected connectors will be rediscovered and re started as needed if val of 1 is sent.
+ * All topologies will be disconnected if val of 0 is set .
+ * Usage to enable topologies: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ * Usage to disable topologies: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ */
+static int trigger_hpd_mst_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector_list_iter iter;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct dc_link *link = NULL;
+
+ if (val == 1) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ }
+ }
+ } else if (val == 0) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->dc_link)
+ continue;
+
+ if (!aconnector->mst_port)
+ continue;
+
+ link = aconnector->dc_link;
+ dp_receiver_power_ctrl(link, false);
+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false);
+ link->mst_stream_alloc_table.stream_count = 0;
+ memset(link->mst_stream_alloc_table.stream_allocations, 0,
+ sizeof(link->mst_stream_alloc_table.stream_allocations));
+ }
+ } else {
+ return 0;
+ }
+ drm_kms_helper_hotplug_event(dev);
+
+ return 0;
+}
+
+/*
+ * The interface doesn't need get function, so it will return the
+ * value of zero
+ * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ */
+static int trigger_hpd_mst_get(void *data, u64 *val)
+{
+ *val = 0;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(trigger_hpd_mst_ops, trigger_hpd_mst_get,
+ trigger_hpd_mst_set, "%llu\n");
+
+
+/*
+ * Sets the force_timing_sync debug option from the given string.
* All connected displays will be force synchronized immediately.
* Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync
*/
@@ -2940,6 +3077,37 @@ static int force_timing_sync_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get,
force_timing_sync_set, "%llu\n");
+
+/*
+ * Disables all HPD and HPD RX interrupt handling in the
+ * driver when set to 1. Default is 0.
+ */
+static int disable_hpd_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.disable_hpd_irq = (bool)val;
+
+ return 0;
+}
+
+
+/*
+ * Returns 1 if HPD and HPRX interrupt handling is disabled,
+ * 0 otherwise.
+ */
+static int disable_hpd_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.disable_hpd_irq;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,
+ disable_hpd_set, "%llu\n");
+
/*
* Sets the DC visual confirm debug option from the given string.
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
@@ -2972,6 +3140,64 @@ DEFINE_SHOW_ATTRIBUTE(mst_topo);
DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
visual_confirm_set, "%llu\n");
+/*
+ * Dumps the DCC_EN bit for each pipe.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en
+ */
+static ssize_t dcc_en_bits_read(
+ struct file *f,
+ char __user *buf,
+ size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ struct dc *dc = adev->dm.dc;
+ char *rd_buf = NULL;
+ const uint32_t rd_buf_size = 32;
+ uint32_t result = 0;
+ int offset = 0;
+ int num_pipes = dc->res_pool->pipe_count;
+ int *dcc_en_bits;
+ int i, r;
+
+ dcc_en_bits = kcalloc(num_pipes, sizeof(int), GFP_KERNEL);
+ if (!dcc_en_bits)
+ return -ENOMEM;
+
+ if (!dc->hwss.get_dcc_en_bits) {
+ kfree(dcc_en_bits);
+ return 0;
+ }
+
+ dc->hwss.get_dcc_en_bits(dc, dcc_en_bits);
+
+ rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+ if (!rd_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < num_pipes; i++)
+ offset += snprintf(rd_buf + offset, rd_buf_size - offset,
+ "%d ", dcc_en_bits[i]);
+ rd_buf[strlen(rd_buf)] = '\n';
+
+ kfree(dcc_en_bits);
+
+ while (size) {
+ if (*pos >= rd_buf_size)
+ break;
+ r = put_user(*(rd_buf + result), buf);
+ if (r)
+ return r; /* r = -EFAULT */
+ buf += 1;
+ size -= 1;
+ *pos += 1;
+ result += 1;
+ }
+
+ kfree(rd_buf);
+ return result;
+}
+
void dtn_debugfs_init(struct amdgpu_device *adev)
{
static const struct file_operations dtn_log_fops = {
@@ -2980,6 +3206,11 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
.write = dtn_log_write,
.llseek = default_llseek
};
+ static const struct file_operations dcc_en_bits_fops = {
+ .owner = THIS_MODULE,
+ .read = dcc_en_bits_read,
+ .llseek = default_llseek
+ };
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
@@ -3007,4 +3238,14 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root,
adev, &dmcub_trace_event_state_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_trigger_hpd_mst", 0644, root,
+ adev, &trigger_hpd_mst_ops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev,
+ &dcc_en_bits_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev,
+ &disable_hpd_ops);
+
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 0cdbfcd475ec..616f5b1ea3a8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -191,7 +191,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
&hdcp_work->srm_version);
- display->adjust.disable = 0;
+ display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
hdcp_w->link.adjust.hdcp1.disable = 0;
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -203,7 +203,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
} else {
- display->adjust.disable = 1;
+ display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
cancel_delayed_work(&hdcp_w->property_validate_dwork);
}
@@ -434,6 +434,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
int link_index = aconnector->dc_link->link_index;
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
+ struct drm_connector_state *conn_state;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -456,11 +457,16 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
- display->adjust.disable = 1;
+ display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 3;
link->adjust.hdcp1.disable = 0;
+ conn_state = aconnector->base.state;
- hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
+ pr_debug("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
+ (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
+ (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
+
+ hdcp_update_display(hdcp_work, link_index, aconnector, conn_state->hdcp_content_type, false);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 09bdffb3a09e..e8b325a828c1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -700,6 +700,39 @@ void dm_helpers_free_gpu_mem(
bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable)
{
- // TODO
- return true;
+ enum dc_irq_source irq_source;
+ bool ret;
+
+ irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
+
+ ret = dc_interrupt_set(ctx->dc, irq_source, enable);
+
+ DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
+ enable ? "en" : "dis", ret);
+ return ret;
+}
+
+void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
+{
+ /* TODO: virtual DPCD */
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
+
+ if (link->aux_access_disabled)
+ return;
+
+ if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw,
+ sizeof(old_downspread)))
+ return;
+
+ new_downspread.raw = old_downspread.raw;
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw)
+ dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw,
+ sizeof(new_downspread));
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index d3c687d07ee6..b3ed7e777720 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -73,6 +73,7 @@
* @handler_arg: Argument passed to the handler when triggered
* @dm: DM which this handler belongs to
* @irq_source: DC interrupt source that this handler is registered for
+ * @work: work struct
*/
struct amdgpu_dm_irq_handler_data {
struct list_head list;
@@ -184,6 +185,55 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
return hnd_list;
}
+/**
+ * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ *
+ * Go through low and high context IRQ tables and deallocate handlers.
+ */
+static void unregister_all_irq_handlers(struct amdgpu_device *adev)
+{
+ struct list_head *hnd_list_low;
+ struct list_head *hnd_list_high;
+ struct list_head *entry, *tmp;
+ struct amdgpu_dm_irq_handler_data *handler;
+ unsigned long irq_table_flags;
+ int i;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
+ hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
+ hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
+
+ list_for_each_safe(entry, tmp, hnd_list_low) {
+
+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+ list);
+
+ if (handler == NULL || handler->handler == NULL)
+ continue;
+
+ list_del(&handler->list);
+ kfree(handler);
+ }
+
+ list_for_each_safe(entry, tmp, hnd_list_high) {
+
+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+ list);
+
+ if (handler == NULL || handler->handler == NULL)
+ continue;
+
+ list_del(&handler->list);
+ kfree(handler);
+ }
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
static bool
validate_irq_registration_params(struct dc_interrupt_params *int_params,
void (*ih)(void *))
@@ -414,6 +464,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
}
}
}
+ /* Deallocate handlers from the table. */
+ unregister_all_irq_handlers(adev);
}
int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
@@ -731,6 +783,18 @@ static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
__func__);
}
+static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dc_interrupt_set(adev->dm.dc, irq_source, st);
+ return 0;
+}
+
static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
.set = amdgpu_dm_set_crtc_irq_state,
.process = amdgpu_dm_irq_handler,
@@ -746,6 +810,11 @@ static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
.process = amdgpu_dm_irq_handler,
};
+static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
+ .set = amdgpu_dm_set_dmub_trace_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
.set = amdgpu_dm_set_pflip_irq_state,
.process = amdgpu_dm_irq_handler,
@@ -768,6 +837,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
+ adev->dmub_trace_irq.num_types = 1;
+ adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
+
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 73cdb9fe981a..9b221db526dc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -229,6 +229,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
(aconnector->edid->extensions + 1) * EDID_LENGTH,
&init_params);
+ if (!dc_sink) {
+ DRM_ERROR("Unable to add a remote sink\n");
+ return 0;
+ }
+
dc_sink->priv = aconnector;
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
@@ -745,8 +750,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
- dsc_policy.min_target_bpp,
- dsc_policy.max_target_bpp,
+ dsc_policy.min_target_bpp * 16,
+ dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
&stream->timing, &params[count].bw_range))
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 607ec0999445..eba270121698 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -32,15 +32,12 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
#include "dm_pp_smu.h"
-#include "amdgpu_smu.h"
-
bool dm_pp_apply_display_requirements(
const struct dc_context *ctx,
const struct dm_pp_display_configuration *pp_display_cfg)
{
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
int i;
if (adev->pm.dpm_enabled) {
@@ -106,9 +103,6 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
- else if (adev->smu.ppt_funcs)
- smu_display_configuration_change(smu,
- &adev->pm.pm_display_cfg);
amdgpu_pm_compute_clocks(adev);
}
@@ -148,36 +142,6 @@ static void get_default_clock_levels(
}
}
-static enum smu_clk_type dc_to_smu_clock_type(
- enum dm_pp_clock_type dm_pp_clk_type)
-{
- enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
-
- switch (dm_pp_clk_type) {
- case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
- smu_clk_type = SMU_DISPCLK;
- break;
- case DM_PP_CLOCK_TYPE_ENGINE_CLK:
- smu_clk_type = SMU_GFXCLK;
- break;
- case DM_PP_CLOCK_TYPE_MEMORY_CLK:
- smu_clk_type = SMU_MCLK;
- break;
- case DM_PP_CLOCK_TYPE_DCEFCLK:
- smu_clk_type = SMU_DCEFCLK;
- break;
- case DM_PP_CLOCK_TYPE_SOCCLK:
- smu_clk_type = SMU_SOCCLK;
- break;
- default:
- DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
- dm_pp_clk_type);
- break;
- }
-
- return smu_clk_type;
-}
-
static enum amd_pp_clock_type dc_to_pp_clock_type(
enum dm_pp_clock_type dm_pp_clk_type)
{
@@ -417,14 +381,8 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
&pp_clks);
if (ret)
return false;
- } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
- if (smu_get_clock_by_type_with_latency(&adev->smu,
- dc_to_smu_clock_type(clk_type),
- &pp_clks))
- return false;
}
-
pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
return true;
@@ -502,10 +460,6 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
- else if (adev->smu.ppt_funcs &&
- adev->smu.ppt_funcs->display_clock_voltage_request)
- ret = smu_display_clock_voltage_request(&adev->smu,
- &pp_clock_request);
if (ret)
return false;
return true;
@@ -655,8 +609,11 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
+ if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
return PP_SMU_RESULT_OK;
}
@@ -665,13 +622,14 @@ static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!smu->ppt_funcs)
+ if (!pp_funcs || !pp_funcs->set_active_display_count)
return PP_SMU_RESULT_UNSUPPORTED;
/* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */
- if (smu_set_display_count(smu, count))
+ if (pp_funcs->set_active_display_count(pp_handle, count))
return PP_SMU_RESULT_FAIL;
return PP_SMU_RESULT_OK;
@@ -682,13 +640,14 @@ pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!smu->ppt_funcs)
+ if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
return PP_SMU_RESULT_UNSUPPORTED;
/* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
- if (smu_set_deep_sleep_dcefclk(smu, mhz))
+ if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz))
return PP_SMU_RESULT_FAIL;
return PP_SMU_RESULT_OK;
@@ -699,10 +658,11 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_display_clock_request clock_req;
- if (!smu->ppt_funcs)
+ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_dcef_clock;
@@ -711,7 +671,7 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
- if (smu_display_clock_voltage_request(smu, &clock_req))
+ if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
return PP_SMU_RESULT_FAIL;
return PP_SMU_RESULT_OK;
@@ -722,10 +682,11 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_display_clock_request clock_req;
- if (!smu->ppt_funcs)
+ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
return PP_SMU_RESULT_UNSUPPORTED;
clock_req.clock_type = amd_pp_mem_clock;
@@ -734,7 +695,7 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
- if (smu_display_clock_voltage_request(smu, &clock_req))
+ if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
return PP_SMU_RESULT_FAIL;
return PP_SMU_RESULT_OK;
@@ -745,10 +706,14 @@ static enum pp_smu_status pp_nv_set_pstate_handshake_support(
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
- return PP_SMU_RESULT_FAIL;
+ if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) {
+ if (pp_funcs->display_disable_memory_clock_switch(pp_handle,
+ !pstate_handshake_supported))
+ return PP_SMU_RESULT_FAIL;
+ }
return PP_SMU_RESULT_OK;
}
@@ -758,10 +723,11 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_display_clock_request clock_req;
- if (!smu->ppt_funcs)
+ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
return PP_SMU_RESULT_UNSUPPORTED;
switch (clock_id) {
@@ -782,7 +748,7 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
/* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
* 1: fail
*/
- if (smu_display_clock_voltage_request(smu, &clock_req))
+ if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
return PP_SMU_RESULT_FAIL;
return PP_SMU_RESULT_OK;
@@ -793,15 +759,13 @@ static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
-
- if (!smu->ppt_funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+ if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
+ if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
@@ -812,16 +776,15 @@ static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
-
- if (!smu->ppt_funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!smu->ppt_funcs->get_uclk_dpm_states)
+ if (!pp_funcs || !pp_funcs->get_uclk_dpm_states)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu_get_uclk_dpm_states(smu,
- clock_values_in_khz, num_states))
+ if (!pp_funcs->get_uclk_dpm_states(pp_handle,
+ clock_values_in_khz,
+ num_states))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
@@ -832,15 +795,13 @@ static enum pp_smu_status pp_rn_get_dpm_clock_table(
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
- struct smu_context *smu = &adev->smu;
-
- if (!smu->ppt_funcs)
- return PP_SMU_RESULT_UNSUPPORTED;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!smu->ppt_funcs->get_dpm_clock_table)
+ if (!pp_funcs || !pp_funcs->get_dpm_clock_table)
return PP_SMU_RESULT_UNSUPPORTED;
- if (!smu_get_dpm_clock_table(smu, clock_table))
+ if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table))
return PP_SMU_RESULT_OK;
return PP_SMU_RESULT_FAIL;
@@ -851,8 +812,11 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
{
const struct dc_context *ctx = pp->dm;
struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
+ if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
return PP_SMU_RESULT_OK;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 86960476823c..46a33f64cf8e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -597,6 +597,46 @@ TRACE_EVENT(amdgpu_dm_dce_clocks_state,
)
);
+TRACE_EVENT(amdgpu_dmub_trace_high_irq,
+ TP_PROTO(uint32_t trace_code, uint32_t tick_count, uint32_t param0,
+ uint32_t param1),
+ TP_ARGS(trace_code, tick_count, param0, param1),
+ TP_STRUCT__entry(
+ __field(uint32_t, trace_code)
+ __field(uint32_t, tick_count)
+ __field(uint32_t, param0)
+ __field(uint32_t, param1)
+ ),
+ TP_fast_assign(
+ __entry->trace_code = trace_code;
+ __entry->tick_count = tick_count;
+ __entry->param0 = param0;
+ __entry->param1 = param1;
+ ),
+ TP_printk("trace_code=%u tick_count=%u param0=%u param1=%u",
+ __entry->trace_code, __entry->tick_count,
+ __entry->param0, __entry->param1)
+);
+
+TRACE_EVENT(amdgpu_refresh_rate_track,
+ TP_PROTO(int crtc_index, ktime_t refresh_rate_ns, uint32_t refresh_rate_hz),
+ TP_ARGS(crtc_index, refresh_rate_ns, refresh_rate_hz),
+ TP_STRUCT__entry(
+ __field(int, crtc_index)
+ __field(ktime_t, refresh_rate_ns)
+ __field(uint32_t, refresh_rate_hz)
+ ),
+ TP_fast_assign(
+ __entry->crtc_index = crtc_index;
+ __entry->refresh_rate_ns = refresh_rate_ns;
+ __entry->refresh_rate_hz = refresh_rate_hz;
+ ),
+ TP_printk("crtc_index=%d refresh_rate=%dHz (%lld)",
+ __entry->crtc_index,
+ __entry->refresh_rate_hz,
+ __entry->refresh_rate_ns)
+);
+
#endif /* _AMDGPU_DM_TRACE_H_ */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index bbde6e6a4e43..f33847299bca 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -54,8 +54,9 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
+dc_link_enc_cfg.o
ifdef CONFIG_DRM_AMD_DC_DCN
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index b208f06ed514..d79f4fe06c47 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -916,6 +916,192 @@ static enum bp_result bios_parser_get_soc_bb_info(
return result;
}
+static enum bp_result get_disp_caps_v4_1(
+ struct bios_parser *bp,
+ uint8_t *dce_caps)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+
+ if (!dce_caps)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ *dce_caps = disp_cntl_tbl->display_caps;
+
+ return result;
+}
+
+static enum bp_result get_disp_caps_v4_2(
+ struct bios_parser *bp,
+ uint8_t *dce_caps)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
+
+ if (!dce_caps)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2,
+ DATA_TABLES(dce_info));
+
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ *dce_caps = disp_cntl_tbl->display_caps;
+
+ return result;
+}
+
+static enum bp_result get_disp_caps_v4_3(
+ struct bios_parser *bp,
+ uint8_t *dce_caps)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_3 *disp_cntl_tbl = NULL;
+
+ if (!dce_caps)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_3,
+ DATA_TABLES(dce_info));
+
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ *dce_caps = disp_cntl_tbl->display_caps;
+
+ return result;
+}
+
+static enum bp_result get_disp_caps_v4_4(
+ struct bios_parser *bp,
+ uint8_t *dce_caps)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL;
+
+ if (!dce_caps)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4,
+ DATA_TABLES(dce_info));
+
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ *dce_caps = disp_cntl_tbl->display_caps;
+
+ return result;
+}
+
+static enum bp_result bios_parser_get_lttpr_interop(
+ struct dc_bios *dcb,
+ uint8_t *dce_caps)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+ get_atom_data_table_revision(header, &tbl_revision);
+ switch (tbl_revision.major) {
+ case 4:
+ switch (tbl_revision.minor) {
+ case 1:
+ result = get_disp_caps_v4_1(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+ break;
+ case 2:
+ result = get_disp_caps_v4_2(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+ break;
+ case 3:
+ result = get_disp_caps_v4_3(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+ break;
+ case 4:
+ result = get_disp_caps_v4_4(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum bp_result bios_parser_get_lttpr_caps(
+ struct dc_bios *dcb,
+ uint8_t *dce_caps)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+ get_atom_data_table_revision(header, &tbl_revision);
+ switch (tbl_revision.major) {
+ case 4:
+ switch (tbl_revision.minor) {
+ case 1:
+ result = get_disp_caps_v4_1(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+ break;
+ case 2:
+ result = get_disp_caps_v4_2(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+ break;
+ case 3:
+ result = get_disp_caps_v4_3(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+ break;
+ case 4:
+ result = get_disp_caps_v4_4(bp, dce_caps);
+ *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
static enum bp_result get_embedded_panel_info_v2_1(
struct bios_parser *bp,
struct embedded_panel_info *info)
@@ -2531,6 +2717,10 @@ static const struct dc_vbios_funcs vbios_funcs = {
.get_soc_bb_info = bios_parser_get_soc_bb_info,
.get_disp_connector_caps_info = bios_parser_get_disp_connector_caps_info,
+
+ .get_lttpr_caps = bios_parser_get_lttpr_caps,
+
+ .get_lttpr_interop = bios_parser_get_lttpr_interop,
};
static bool bios_parser2_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index e633f8a51edb..1244fcb0f446 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -98,16 +98,16 @@ static void calculate_bandwidth(
int32_t num_cursor_lines;
int32_t i, j, k;
- struct bw_fixed yclk[3];
- struct bw_fixed sclk[8];
+ struct bw_fixed *yclk;
+ struct bw_fixed *sclk;
bool d0_underlay_enable;
bool d1_underlay_enable;
bool fbc_enabled;
bool lpt_enabled;
enum bw_defines sclk_message;
enum bw_defines yclk_message;
- enum bw_defines tiling_mode[maximum_number_of_surfaces];
- enum bw_defines surface_type[maximum_number_of_surfaces];
+ enum bw_defines *tiling_mode;
+ enum bw_defines *surface_type;
enum bw_defines voltage;
enum bw_defines pipe_check;
enum bw_defines hsr_check;
@@ -122,6 +122,22 @@ static void calculate_bandwidth(
int32_t number_of_displays_enabled_with_margin = 0;
int32_t number_of_aligned_displays_with_no_margin = 0;
+ yclk = kcalloc(3, sizeof(*yclk), GFP_KERNEL);
+ if (!yclk)
+ return;
+
+ sclk = kcalloc(8, sizeof(*sclk), GFP_KERNEL);
+ if (!sclk)
+ goto free_yclk;
+
+ tiling_mode = kcalloc(maximum_number_of_surfaces, sizeof(*tiling_mode), GFP_KERNEL);
+ if (!tiling_mode)
+ goto free_sclk;
+
+ surface_type = kcalloc(maximum_number_of_surfaces, sizeof(*surface_type), GFP_KERNEL);
+ if (!surface_type)
+ goto free_tiling_mode;
+
yclk[low] = vbios->low_yclk;
yclk[mid] = vbios->mid_yclk;
yclk[high] = vbios->high_yclk;
@@ -2013,6 +2029,14 @@ static void calculate_bandwidth(
}
}
}
+
+ kfree(surface_type);
+free_tiling_mode:
+ kfree(tiling_mode);
+free_yclk:
+ kfree(yclk);
+free_sclk:
+ kfree(sclk);
}
/*******************************************************************************
@@ -2022,707 +2046,719 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
struct bw_calcs_vbios *bw_vbios,
struct hw_asic_id asic_id)
{
- struct bw_calcs_dceip dceip = { 0 };
- struct bw_calcs_vbios vbios = { 0 };
+ struct bw_calcs_dceip *dceip;
+ struct bw_calcs_vbios *vbios;
enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
- dceip.version = version;
+ dceip = kzalloc(sizeof(*dceip), GFP_KERNEL);
+ if (!dceip)
+ return;
+
+ vbios = kzalloc(sizeof(*vbios), GFP_KERNEL);
+ if (!vbios) {
+ kfree(dceip);
+ return;
+ }
+
+ dceip->version = version;
switch (version) {
case BW_CALCS_VERSION_CARRIZO:
- vbios.memory_type = bw_def_gddr5;
- vbios.dram_channel_width_in_bits = 64;
- vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
- vbios.number_of_dram_banks = 8;
- vbios.high_yclk = bw_int_to_fixed(1600);
- vbios.mid_yclk = bw_int_to_fixed(1600);
- vbios.low_yclk = bw_frc_to_fixed(66666, 100);
- vbios.low_sclk = bw_int_to_fixed(200);
- vbios.mid1_sclk = bw_int_to_fixed(300);
- vbios.mid2_sclk = bw_int_to_fixed(300);
- vbios.mid3_sclk = bw_int_to_fixed(300);
- vbios.mid4_sclk = bw_int_to_fixed(300);
- vbios.mid5_sclk = bw_int_to_fixed(300);
- vbios.mid6_sclk = bw_int_to_fixed(300);
- vbios.high_sclk = bw_frc_to_fixed(62609, 100);
- vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
- vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
- vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
- vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
- vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.data_return_bus_width = bw_int_to_fixed(32);
- vbios.trc = bw_int_to_fixed(50);
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
- vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
- vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
- vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
- vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
- vbios.scatter_gather_enable = true;
- vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
- vbios.cursor_width = 32;
- vbios.average_compression_rate = 4;
- vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
- vbios.blackout_duration = bw_int_to_fixed(0); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
- dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
- dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
- dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
- dceip.large_cursor = false;
- dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
- dceip.dmif_pipe_en_fbc_chunk_tracker = false;
- dceip.cursor_max_outstanding_group_num = 1;
- dceip.lines_interleaved_into_lb = 2;
- dceip.chunk_width = 256;
- dceip.number_of_graphics_pipes = 3;
- dceip.number_of_underlay_pipes = 1;
- dceip.low_power_tiling_mode = 0;
- dceip.display_write_back_supported = false;
- dceip.argb_compression_support = false;
- dceip.underlay_vscaler_efficiency6_bit_per_component =
+ vbios->memory_type = bw_def_gddr5;
+ vbios->dram_channel_width_in_bits = 64;
+ vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+ vbios->number_of_dram_banks = 8;
+ vbios->high_yclk = bw_int_to_fixed(1600);
+ vbios->mid_yclk = bw_int_to_fixed(1600);
+ vbios->low_yclk = bw_frc_to_fixed(66666, 100);
+ vbios->low_sclk = bw_int_to_fixed(200);
+ vbios->mid1_sclk = bw_int_to_fixed(300);
+ vbios->mid2_sclk = bw_int_to_fixed(300);
+ vbios->mid3_sclk = bw_int_to_fixed(300);
+ vbios->mid4_sclk = bw_int_to_fixed(300);
+ vbios->mid5_sclk = bw_int_to_fixed(300);
+ vbios->mid6_sclk = bw_int_to_fixed(300);
+ vbios->high_sclk = bw_frc_to_fixed(62609, 100);
+ vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->data_return_bus_width = bw_int_to_fixed(32);
+ vbios->trc = bw_int_to_fixed(50);
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+ vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios->nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+ vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios->scatter_gather_enable = true;
+ vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios->cursor_width = 32;
+ vbios->average_compression_rate = 4;
+ vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+ dceip->large_cursor = false;
+ dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip->cursor_max_outstanding_group_num = 1;
+ dceip->lines_interleaved_into_lb = 2;
+ dceip->chunk_width = 256;
+ dceip->number_of_graphics_pipes = 3;
+ dceip->number_of_underlay_pipes = 1;
+ dceip->low_power_tiling_mode = 0;
+ dceip->display_write_back_supported = false;
+ dceip->argb_compression_support = false;
+ dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
- dceip.underlay_vscaler_efficiency8_bit_per_component =
+ dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.underlay_vscaler_efficiency10_bit_per_component =
+ dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.underlay_vscaler_efficiency12_bit_per_component =
+ dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.graphics_vscaler_efficiency6_bit_per_component =
+ dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
- dceip.graphics_vscaler_efficiency8_bit_per_component =
+ dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.graphics_vscaler_efficiency10_bit_per_component =
+ dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.graphics_vscaler_efficiency12_bit_per_component =
+ dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
- dceip.max_dmif_buffer_allocated = 2;
- dceip.graphics_dmif_size = 12288;
- dceip.underlay_luma_dmif_size = 19456;
- dceip.underlay_chroma_dmif_size = 23552;
- dceip.pre_downscaler_enabled = true;
- dceip.underlay_downscale_prefetch_enabled = true;
- dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
- dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
- dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
- dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip->max_dmif_buffer_allocated = 2;
+ dceip->graphics_dmif_size = 12288;
+ dceip->underlay_luma_dmif_size = 19456;
+ dceip->underlay_chroma_dmif_size = 23552;
+ dceip->pre_downscaler_enabled = true;
+ dceip->underlay_downscale_prefetch_enabled = true;
+ dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(0);
- dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.underlay420_chroma_lb_size_per_component =
+ dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
- dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.cursor_chunk_width = bw_int_to_fixed(64);
- dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
- dceip.underlay_maximum_width_efficient_for_tiling =
+ dceip->cursor_chunk_width = bw_int_to_fixed(64);
+ dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
- dceip.underlay_maximum_height_efficient_for_tiling =
+ dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
- dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
- dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
- dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
- dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
- dceip.limit_excessive_outstanding_dmif_requests = true;
- dceip.linear_mode_line_request_alternation_slice =
+ dceip->limit_excessive_outstanding_dmif_requests = true;
+ dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
- dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
- dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
- dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
- dceip.request_efficiency = bw_frc_to_fixed(8, 10);
- dceip.dispclk_per_request = bw_int_to_fixed(2);
- dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
- dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
- dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
- dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+ dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip->dispclk_per_request = bw_int_to_fixed(2);
+ dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
break;
case BW_CALCS_VERSION_POLARIS10:
/* TODO: Treat VEGAM the same as P10 for now
* Need to tune the para for VEGAM if needed */
case BW_CALCS_VERSION_VEGAM:
- vbios.memory_type = bw_def_gddr5;
- vbios.dram_channel_width_in_bits = 32;
- vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
- vbios.number_of_dram_banks = 8;
- vbios.high_yclk = bw_int_to_fixed(6000);
- vbios.mid_yclk = bw_int_to_fixed(3200);
- vbios.low_yclk = bw_int_to_fixed(1000);
- vbios.low_sclk = bw_int_to_fixed(300);
- vbios.mid1_sclk = bw_int_to_fixed(400);
- vbios.mid2_sclk = bw_int_to_fixed(500);
- vbios.mid3_sclk = bw_int_to_fixed(600);
- vbios.mid4_sclk = bw_int_to_fixed(700);
- vbios.mid5_sclk = bw_int_to_fixed(800);
- vbios.mid6_sclk = bw_int_to_fixed(974);
- vbios.high_sclk = bw_int_to_fixed(1154);
- vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
- vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
- vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
- vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
- vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.data_return_bus_width = bw_int_to_fixed(32);
- vbios.trc = bw_int_to_fixed(48);
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
- vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
- vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
- vbios.nbp_state_change_latency = bw_int_to_fixed(45);
- vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
- vbios.scatter_gather_enable = true;
- vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
- vbios.cursor_width = 32;
- vbios.average_compression_rate = 4;
- vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
- vbios.blackout_duration = bw_int_to_fixed(0); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
- dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
- dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
- dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
- dceip.large_cursor = false;
- dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
- dceip.dmif_pipe_en_fbc_chunk_tracker = false;
- dceip.cursor_max_outstanding_group_num = 1;
- dceip.lines_interleaved_into_lb = 2;
- dceip.chunk_width = 256;
- dceip.number_of_graphics_pipes = 6;
- dceip.number_of_underlay_pipes = 0;
- dceip.low_power_tiling_mode = 0;
- dceip.display_write_back_supported = false;
- dceip.argb_compression_support = true;
- dceip.underlay_vscaler_efficiency6_bit_per_component =
+ vbios->memory_type = bw_def_gddr5;
+ vbios->dram_channel_width_in_bits = 32;
+ vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+ vbios->number_of_dram_banks = 8;
+ vbios->high_yclk = bw_int_to_fixed(6000);
+ vbios->mid_yclk = bw_int_to_fixed(3200);
+ vbios->low_yclk = bw_int_to_fixed(1000);
+ vbios->low_sclk = bw_int_to_fixed(300);
+ vbios->mid1_sclk = bw_int_to_fixed(400);
+ vbios->mid2_sclk = bw_int_to_fixed(500);
+ vbios->mid3_sclk = bw_int_to_fixed(600);
+ vbios->mid4_sclk = bw_int_to_fixed(700);
+ vbios->mid5_sclk = bw_int_to_fixed(800);
+ vbios->mid6_sclk = bw_int_to_fixed(974);
+ vbios->high_sclk = bw_int_to_fixed(1154);
+ vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->data_return_bus_width = bw_int_to_fixed(32);
+ vbios->trc = bw_int_to_fixed(48);
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios->nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios->scatter_gather_enable = true;
+ vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios->cursor_width = 32;
+ vbios->average_compression_rate = 4;
+ vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+ dceip->large_cursor = false;
+ dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip->cursor_max_outstanding_group_num = 1;
+ dceip->lines_interleaved_into_lb = 2;
+ dceip->chunk_width = 256;
+ dceip->number_of_graphics_pipes = 6;
+ dceip->number_of_underlay_pipes = 0;
+ dceip->low_power_tiling_mode = 0;
+ dceip->display_write_back_supported = false;
+ dceip->argb_compression_support = true;
+ dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
- dceip.underlay_vscaler_efficiency8_bit_per_component =
+ dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.underlay_vscaler_efficiency10_bit_per_component =
+ dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.underlay_vscaler_efficiency12_bit_per_component =
+ dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.graphics_vscaler_efficiency6_bit_per_component =
+ dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
- dceip.graphics_vscaler_efficiency8_bit_per_component =
+ dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.graphics_vscaler_efficiency10_bit_per_component =
+ dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.graphics_vscaler_efficiency12_bit_per_component =
+ dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
- dceip.max_dmif_buffer_allocated = 4;
- dceip.graphics_dmif_size = 12288;
- dceip.underlay_luma_dmif_size = 19456;
- dceip.underlay_chroma_dmif_size = 23552;
- dceip.pre_downscaler_enabled = true;
- dceip.underlay_downscale_prefetch_enabled = true;
- dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
- dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
- dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
- dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip->max_dmif_buffer_allocated = 4;
+ dceip->graphics_dmif_size = 12288;
+ dceip->underlay_luma_dmif_size = 19456;
+ dceip->underlay_chroma_dmif_size = 23552;
+ dceip->pre_downscaler_enabled = true;
+ dceip->underlay_downscale_prefetch_enabled = true;
+ dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
- dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.underlay420_chroma_lb_size_per_component =
+ dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
- dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.cursor_chunk_width = bw_int_to_fixed(64);
- dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
- dceip.underlay_maximum_width_efficient_for_tiling =
+ dceip->cursor_chunk_width = bw_int_to_fixed(64);
+ dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
- dceip.underlay_maximum_height_efficient_for_tiling =
+ dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
- dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
- dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
- dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
- dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
- dceip.limit_excessive_outstanding_dmif_requests = true;
- dceip.linear_mode_line_request_alternation_slice =
+ dceip->limit_excessive_outstanding_dmif_requests = true;
+ dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
- dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
- dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
- dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
- dceip.request_efficiency = bw_frc_to_fixed(8, 10);
- dceip.dispclk_per_request = bw_int_to_fixed(2);
- dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
- dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
- dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
- dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip->dispclk_per_request = bw_int_to_fixed(2);
+ dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_POLARIS11:
- vbios.memory_type = bw_def_gddr5;
- vbios.dram_channel_width_in_bits = 32;
- vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
- vbios.number_of_dram_banks = 8;
- vbios.high_yclk = bw_int_to_fixed(6000);
- vbios.mid_yclk = bw_int_to_fixed(3200);
- vbios.low_yclk = bw_int_to_fixed(1000);
- vbios.low_sclk = bw_int_to_fixed(300);
- vbios.mid1_sclk = bw_int_to_fixed(400);
- vbios.mid2_sclk = bw_int_to_fixed(500);
- vbios.mid3_sclk = bw_int_to_fixed(600);
- vbios.mid4_sclk = bw_int_to_fixed(700);
- vbios.mid5_sclk = bw_int_to_fixed(800);
- vbios.mid6_sclk = bw_int_to_fixed(974);
- vbios.high_sclk = bw_int_to_fixed(1154);
- vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
- vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
- vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
- vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
- vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.data_return_bus_width = bw_int_to_fixed(32);
- vbios.trc = bw_int_to_fixed(48);
- if (vbios.number_of_dram_channels == 2) // 64-bit
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios->memory_type = bw_def_gddr5;
+ vbios->dram_channel_width_in_bits = 32;
+ vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+ vbios->number_of_dram_banks = 8;
+ vbios->high_yclk = bw_int_to_fixed(6000);
+ vbios->mid_yclk = bw_int_to_fixed(3200);
+ vbios->low_yclk = bw_int_to_fixed(1000);
+ vbios->low_sclk = bw_int_to_fixed(300);
+ vbios->mid1_sclk = bw_int_to_fixed(400);
+ vbios->mid2_sclk = bw_int_to_fixed(500);
+ vbios->mid3_sclk = bw_int_to_fixed(600);
+ vbios->mid4_sclk = bw_int_to_fixed(700);
+ vbios->mid5_sclk = bw_int_to_fixed(800);
+ vbios->mid6_sclk = bw_int_to_fixed(974);
+ vbios->high_sclk = bw_int_to_fixed(1154);
+ vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->data_return_bus_width = bw_int_to_fixed(32);
+ vbios->trc = bw_int_to_fixed(48);
+ if (vbios->number_of_dram_channels == 2) // 64-bit
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
else
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
- vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
- vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
- vbios.nbp_state_change_latency = bw_int_to_fixed(45);
- vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
- vbios.scatter_gather_enable = true;
- vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
- vbios.cursor_width = 32;
- vbios.average_compression_rate = 4;
- vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
- vbios.blackout_duration = bw_int_to_fixed(0); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
- dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
- dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
- dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
- dceip.large_cursor = false;
- dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
- dceip.dmif_pipe_en_fbc_chunk_tracker = false;
- dceip.cursor_max_outstanding_group_num = 1;
- dceip.lines_interleaved_into_lb = 2;
- dceip.chunk_width = 256;
- dceip.number_of_graphics_pipes = 5;
- dceip.number_of_underlay_pipes = 0;
- dceip.low_power_tiling_mode = 0;
- dceip.display_write_back_supported = false;
- dceip.argb_compression_support = true;
- dceip.underlay_vscaler_efficiency6_bit_per_component =
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios->nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios->scatter_gather_enable = true;
+ vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios->cursor_width = 32;
+ vbios->average_compression_rate = 4;
+ vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+ dceip->large_cursor = false;
+ dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip->cursor_max_outstanding_group_num = 1;
+ dceip->lines_interleaved_into_lb = 2;
+ dceip->chunk_width = 256;
+ dceip->number_of_graphics_pipes = 5;
+ dceip->number_of_underlay_pipes = 0;
+ dceip->low_power_tiling_mode = 0;
+ dceip->display_write_back_supported = false;
+ dceip->argb_compression_support = true;
+ dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
- dceip.underlay_vscaler_efficiency8_bit_per_component =
+ dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.underlay_vscaler_efficiency10_bit_per_component =
+ dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.underlay_vscaler_efficiency12_bit_per_component =
+ dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.graphics_vscaler_efficiency6_bit_per_component =
+ dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
- dceip.graphics_vscaler_efficiency8_bit_per_component =
+ dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.graphics_vscaler_efficiency10_bit_per_component =
+ dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.graphics_vscaler_efficiency12_bit_per_component =
+ dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
- dceip.max_dmif_buffer_allocated = 4;
- dceip.graphics_dmif_size = 12288;
- dceip.underlay_luma_dmif_size = 19456;
- dceip.underlay_chroma_dmif_size = 23552;
- dceip.pre_downscaler_enabled = true;
- dceip.underlay_downscale_prefetch_enabled = true;
- dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
- dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
- dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
- dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip->max_dmif_buffer_allocated = 4;
+ dceip->graphics_dmif_size = 12288;
+ dceip->underlay_luma_dmif_size = 19456;
+ dceip->underlay_chroma_dmif_size = 23552;
+ dceip->pre_downscaler_enabled = true;
+ dceip->underlay_downscale_prefetch_enabled = true;
+ dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
- dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.underlay420_chroma_lb_size_per_component =
+ dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
- dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.cursor_chunk_width = bw_int_to_fixed(64);
- dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
- dceip.underlay_maximum_width_efficient_for_tiling =
+ dceip->cursor_chunk_width = bw_int_to_fixed(64);
+ dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
- dceip.underlay_maximum_height_efficient_for_tiling =
+ dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
- dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
- dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
- dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
- dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
- dceip.limit_excessive_outstanding_dmif_requests = true;
- dceip.linear_mode_line_request_alternation_slice =
+ dceip->limit_excessive_outstanding_dmif_requests = true;
+ dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
- dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
- dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
- dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
- dceip.request_efficiency = bw_frc_to_fixed(8, 10);
- dceip.dispclk_per_request = bw_int_to_fixed(2);
- dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
- dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
- dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
- dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip->dispclk_per_request = bw_int_to_fixed(2);
+ dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_POLARIS12:
- vbios.memory_type = bw_def_gddr5;
- vbios.dram_channel_width_in_bits = 32;
- vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
- vbios.number_of_dram_banks = 8;
- vbios.high_yclk = bw_int_to_fixed(6000);
- vbios.mid_yclk = bw_int_to_fixed(3200);
- vbios.low_yclk = bw_int_to_fixed(1000);
- vbios.low_sclk = bw_int_to_fixed(678);
- vbios.mid1_sclk = bw_int_to_fixed(864);
- vbios.mid2_sclk = bw_int_to_fixed(900);
- vbios.mid3_sclk = bw_int_to_fixed(920);
- vbios.mid4_sclk = bw_int_to_fixed(940);
- vbios.mid5_sclk = bw_int_to_fixed(960);
- vbios.mid6_sclk = bw_int_to_fixed(980);
- vbios.high_sclk = bw_int_to_fixed(1049);
- vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
- vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
- vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
- vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
- vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.data_return_bus_width = bw_int_to_fixed(32);
- vbios.trc = bw_int_to_fixed(48);
- if (vbios.number_of_dram_channels == 2) // 64-bit
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios->memory_type = bw_def_gddr5;
+ vbios->dram_channel_width_in_bits = 32;
+ vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+ vbios->number_of_dram_banks = 8;
+ vbios->high_yclk = bw_int_to_fixed(6000);
+ vbios->mid_yclk = bw_int_to_fixed(3200);
+ vbios->low_yclk = bw_int_to_fixed(1000);
+ vbios->low_sclk = bw_int_to_fixed(678);
+ vbios->mid1_sclk = bw_int_to_fixed(864);
+ vbios->mid2_sclk = bw_int_to_fixed(900);
+ vbios->mid3_sclk = bw_int_to_fixed(920);
+ vbios->mid4_sclk = bw_int_to_fixed(940);
+ vbios->mid5_sclk = bw_int_to_fixed(960);
+ vbios->mid6_sclk = bw_int_to_fixed(980);
+ vbios->high_sclk = bw_int_to_fixed(1049);
+ vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->data_return_bus_width = bw_int_to_fixed(32);
+ vbios->trc = bw_int_to_fixed(48);
+ if (vbios->number_of_dram_channels == 2) // 64-bit
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
else
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
- vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
- vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
- vbios.nbp_state_change_latency = bw_int_to_fixed(250);
- vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
- vbios.scatter_gather_enable = false;
- vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
- vbios.cursor_width = 32;
- vbios.average_compression_rate = 4;
- vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
- vbios.blackout_duration = bw_int_to_fixed(0); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
- dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
- dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
- dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
- dceip.large_cursor = false;
- dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
- dceip.dmif_pipe_en_fbc_chunk_tracker = false;
- dceip.cursor_max_outstanding_group_num = 1;
- dceip.lines_interleaved_into_lb = 2;
- dceip.chunk_width = 256;
- dceip.number_of_graphics_pipes = 5;
- dceip.number_of_underlay_pipes = 0;
- dceip.low_power_tiling_mode = 0;
- dceip.display_write_back_supported = true;
- dceip.argb_compression_support = true;
- dceip.underlay_vscaler_efficiency6_bit_per_component =
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios->nbp_state_change_latency = bw_int_to_fixed(250);
+ vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios->scatter_gather_enable = false;
+ vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios->cursor_width = 32;
+ vbios->average_compression_rate = 4;
+ vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+ dceip->large_cursor = false;
+ dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip->cursor_max_outstanding_group_num = 1;
+ dceip->lines_interleaved_into_lb = 2;
+ dceip->chunk_width = 256;
+ dceip->number_of_graphics_pipes = 5;
+ dceip->number_of_underlay_pipes = 0;
+ dceip->low_power_tiling_mode = 0;
+ dceip->display_write_back_supported = true;
+ dceip->argb_compression_support = true;
+ dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
- dceip.underlay_vscaler_efficiency8_bit_per_component =
+ dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.underlay_vscaler_efficiency10_bit_per_component =
+ dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.underlay_vscaler_efficiency12_bit_per_component =
+ dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.graphics_vscaler_efficiency6_bit_per_component =
+ dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
- dceip.graphics_vscaler_efficiency8_bit_per_component =
+ dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.graphics_vscaler_efficiency10_bit_per_component =
+ dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.graphics_vscaler_efficiency12_bit_per_component =
+ dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
- dceip.max_dmif_buffer_allocated = 4;
- dceip.graphics_dmif_size = 12288;
- dceip.underlay_luma_dmif_size = 19456;
- dceip.underlay_chroma_dmif_size = 23552;
- dceip.pre_downscaler_enabled = true;
- dceip.underlay_downscale_prefetch_enabled = true;
- dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
- dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
- dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
- dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip->max_dmif_buffer_allocated = 4;
+ dceip->graphics_dmif_size = 12288;
+ dceip->underlay_luma_dmif_size = 19456;
+ dceip->underlay_chroma_dmif_size = 23552;
+ dceip->pre_downscaler_enabled = true;
+ dceip->underlay_downscale_prefetch_enabled = true;
+ dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
- dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.underlay420_chroma_lb_size_per_component =
+ dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
- dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.cursor_chunk_width = bw_int_to_fixed(64);
- dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
- dceip.underlay_maximum_width_efficient_for_tiling =
+ dceip->cursor_chunk_width = bw_int_to_fixed(64);
+ dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
- dceip.underlay_maximum_height_efficient_for_tiling =
+ dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
- dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
- dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
- dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
- dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
- dceip.limit_excessive_outstanding_dmif_requests = true;
- dceip.linear_mode_line_request_alternation_slice =
+ dceip->limit_excessive_outstanding_dmif_requests = true;
+ dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
- dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
- dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
- dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
- dceip.request_efficiency = bw_frc_to_fixed(8, 10);
- dceip.dispclk_per_request = bw_int_to_fixed(2);
- dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
- dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
- dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
- dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip->dispclk_per_request = bw_int_to_fixed(2);
+ dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_STONEY:
- vbios.memory_type = bw_def_gddr5;
- vbios.dram_channel_width_in_bits = 64;
- vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
- vbios.number_of_dram_banks = 8;
- vbios.high_yclk = bw_int_to_fixed(1866);
- vbios.mid_yclk = bw_int_to_fixed(1866);
- vbios.low_yclk = bw_int_to_fixed(1333);
- vbios.low_sclk = bw_int_to_fixed(200);
- vbios.mid1_sclk = bw_int_to_fixed(600);
- vbios.mid2_sclk = bw_int_to_fixed(600);
- vbios.mid3_sclk = bw_int_to_fixed(600);
- vbios.mid4_sclk = bw_int_to_fixed(600);
- vbios.mid5_sclk = bw_int_to_fixed(600);
- vbios.mid6_sclk = bw_int_to_fixed(600);
- vbios.high_sclk = bw_int_to_fixed(800);
- vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
- vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
- vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
- vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
- vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.data_return_bus_width = bw_int_to_fixed(32);
- vbios.trc = bw_int_to_fixed(50);
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
- vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
- vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
- vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
- vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
- vbios.scatter_gather_enable = true;
- vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
- vbios.cursor_width = 32;
- vbios.average_compression_rate = 4;
- vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
- vbios.blackout_duration = bw_int_to_fixed(0); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
- dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
- dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
- dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
- dceip.large_cursor = false;
- dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
- dceip.dmif_pipe_en_fbc_chunk_tracker = false;
- dceip.cursor_max_outstanding_group_num = 1;
- dceip.lines_interleaved_into_lb = 2;
- dceip.chunk_width = 256;
- dceip.number_of_graphics_pipes = 2;
- dceip.number_of_underlay_pipes = 1;
- dceip.low_power_tiling_mode = 0;
- dceip.display_write_back_supported = false;
- dceip.argb_compression_support = true;
- dceip.underlay_vscaler_efficiency6_bit_per_component =
+ vbios->memory_type = bw_def_gddr5;
+ vbios->dram_channel_width_in_bits = 64;
+ vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+ vbios->number_of_dram_banks = 8;
+ vbios->high_yclk = bw_int_to_fixed(1866);
+ vbios->mid_yclk = bw_int_to_fixed(1866);
+ vbios->low_yclk = bw_int_to_fixed(1333);
+ vbios->low_sclk = bw_int_to_fixed(200);
+ vbios->mid1_sclk = bw_int_to_fixed(600);
+ vbios->mid2_sclk = bw_int_to_fixed(600);
+ vbios->mid3_sclk = bw_int_to_fixed(600);
+ vbios->mid4_sclk = bw_int_to_fixed(600);
+ vbios->mid5_sclk = bw_int_to_fixed(600);
+ vbios->mid6_sclk = bw_int_to_fixed(600);
+ vbios->high_sclk = bw_int_to_fixed(800);
+ vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->data_return_bus_width = bw_int_to_fixed(32);
+ vbios->trc = bw_int_to_fixed(50);
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+ vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios->nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+ vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios->scatter_gather_enable = true;
+ vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios->cursor_width = 32;
+ vbios->average_compression_rate = 4;
+ vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+ dceip->large_cursor = false;
+ dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip->cursor_max_outstanding_group_num = 1;
+ dceip->lines_interleaved_into_lb = 2;
+ dceip->chunk_width = 256;
+ dceip->number_of_graphics_pipes = 2;
+ dceip->number_of_underlay_pipes = 1;
+ dceip->low_power_tiling_mode = 0;
+ dceip->display_write_back_supported = false;
+ dceip->argb_compression_support = true;
+ dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
- dceip.underlay_vscaler_efficiency8_bit_per_component =
+ dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.underlay_vscaler_efficiency10_bit_per_component =
+ dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.underlay_vscaler_efficiency12_bit_per_component =
+ dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.graphics_vscaler_efficiency6_bit_per_component =
+ dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
- dceip.graphics_vscaler_efficiency8_bit_per_component =
+ dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.graphics_vscaler_efficiency10_bit_per_component =
+ dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.graphics_vscaler_efficiency12_bit_per_component =
+ dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
- dceip.max_dmif_buffer_allocated = 2;
- dceip.graphics_dmif_size = 12288;
- dceip.underlay_luma_dmif_size = 19456;
- dceip.underlay_chroma_dmif_size = 23552;
- dceip.pre_downscaler_enabled = true;
- dceip.underlay_downscale_prefetch_enabled = true;
- dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
- dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
- dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
- dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip->max_dmif_buffer_allocated = 2;
+ dceip->graphics_dmif_size = 12288;
+ dceip->underlay_luma_dmif_size = 19456;
+ dceip->underlay_chroma_dmif_size = 23552;
+ dceip->pre_downscaler_enabled = true;
+ dceip->underlay_downscale_prefetch_enabled = true;
+ dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(0);
- dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.underlay420_chroma_lb_size_per_component =
+ dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
- dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.cursor_chunk_width = bw_int_to_fixed(64);
- dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
- dceip.underlay_maximum_width_efficient_for_tiling =
+ dceip->cursor_chunk_width = bw_int_to_fixed(64);
+ dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
- dceip.underlay_maximum_height_efficient_for_tiling =
+ dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
- dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
- dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
- dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
- dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
- dceip.limit_excessive_outstanding_dmif_requests = true;
- dceip.linear_mode_line_request_alternation_slice =
+ dceip->limit_excessive_outstanding_dmif_requests = true;
+ dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
- dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
- dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
- dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
- dceip.request_efficiency = bw_frc_to_fixed(8, 10);
- dceip.dispclk_per_request = bw_int_to_fixed(2);
- dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
- dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
- dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
- dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip->dispclk_per_request = bw_int_to_fixed(2);
+ dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
case BW_CALCS_VERSION_VEGA10:
- vbios.memory_type = bw_def_hbm;
- vbios.dram_channel_width_in_bits = 128;
- vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
- vbios.number_of_dram_banks = 16;
- vbios.high_yclk = bw_int_to_fixed(2400);
- vbios.mid_yclk = bw_int_to_fixed(1700);
- vbios.low_yclk = bw_int_to_fixed(1000);
- vbios.low_sclk = bw_int_to_fixed(300);
- vbios.mid1_sclk = bw_int_to_fixed(350);
- vbios.mid2_sclk = bw_int_to_fixed(400);
- vbios.mid3_sclk = bw_int_to_fixed(500);
- vbios.mid4_sclk = bw_int_to_fixed(600);
- vbios.mid5_sclk = bw_int_to_fixed(700);
- vbios.mid6_sclk = bw_int_to_fixed(760);
- vbios.high_sclk = bw_int_to_fixed(776);
- vbios.low_voltage_max_dispclk = bw_int_to_fixed(460);
- vbios.mid_voltage_max_dispclk = bw_int_to_fixed(670);
- vbios.high_voltage_max_dispclk = bw_int_to_fixed(1133);
- vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
- vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
- vbios.data_return_bus_width = bw_int_to_fixed(32);
- vbios.trc = bw_int_to_fixed(48);
- vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
- vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
- vbios.stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
- vbios.nbp_state_change_latency = bw_int_to_fixed(39);
- vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
- vbios.scatter_gather_enable = false;
- vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
- vbios.cursor_width = 32;
- vbios.average_compression_rate = 4;
- vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
- vbios.blackout_duration = bw_int_to_fixed(0); /* us */
- vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
- dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
- dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
- dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
- dceip.large_cursor = false;
- dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
- dceip.dmif_pipe_en_fbc_chunk_tracker = true;
- dceip.cursor_max_outstanding_group_num = 1;
- dceip.lines_interleaved_into_lb = 2;
- dceip.chunk_width = 256;
- dceip.number_of_graphics_pipes = 6;
- dceip.number_of_underlay_pipes = 0;
- dceip.low_power_tiling_mode = 0;
- dceip.display_write_back_supported = true;
- dceip.argb_compression_support = true;
- dceip.underlay_vscaler_efficiency6_bit_per_component =
+ vbios->memory_type = bw_def_hbm;
+ vbios->dram_channel_width_in_bits = 128;
+ vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+ vbios->number_of_dram_banks = 16;
+ vbios->high_yclk = bw_int_to_fixed(2400);
+ vbios->mid_yclk = bw_int_to_fixed(1700);
+ vbios->low_yclk = bw_int_to_fixed(1000);
+ vbios->low_sclk = bw_int_to_fixed(300);
+ vbios->mid1_sclk = bw_int_to_fixed(350);
+ vbios->mid2_sclk = bw_int_to_fixed(400);
+ vbios->mid3_sclk = bw_int_to_fixed(500);
+ vbios->mid4_sclk = bw_int_to_fixed(600);
+ vbios->mid5_sclk = bw_int_to_fixed(700);
+ vbios->mid6_sclk = bw_int_to_fixed(760);
+ vbios->high_sclk = bw_int_to_fixed(776);
+ vbios->low_voltage_max_dispclk = bw_int_to_fixed(460);
+ vbios->mid_voltage_max_dispclk = bw_int_to_fixed(670);
+ vbios->high_voltage_max_dispclk = bw_int_to_fixed(1133);
+ vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios->data_return_bus_width = bw_int_to_fixed(32);
+ vbios->trc = bw_int_to_fixed(48);
+ vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
+ vbios->stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
+ vbios->nbp_state_change_latency = bw_int_to_fixed(39);
+ vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios->scatter_gather_enable = false;
+ vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios->cursor_width = 32;
+ vbios->average_compression_rate = 4;
+ vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
+ vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+ dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+ dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+ dceip->large_cursor = false;
+ dceip->dmif_request_buffer_size = bw_int_to_fixed(2304);
+ dceip->dmif_pipe_en_fbc_chunk_tracker = true;
+ dceip->cursor_max_outstanding_group_num = 1;
+ dceip->lines_interleaved_into_lb = 2;
+ dceip->chunk_width = 256;
+ dceip->number_of_graphics_pipes = 6;
+ dceip->number_of_underlay_pipes = 0;
+ dceip->low_power_tiling_mode = 0;
+ dceip->display_write_back_supported = true;
+ dceip->argb_compression_support = true;
+ dceip->underlay_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35556, 10000);
- dceip.underlay_vscaler_efficiency8_bit_per_component =
+ dceip->underlay_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.underlay_vscaler_efficiency10_bit_per_component =
+ dceip->underlay_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.underlay_vscaler_efficiency12_bit_per_component =
+ dceip->underlay_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.graphics_vscaler_efficiency6_bit_per_component =
+ dceip->graphics_vscaler_efficiency6_bit_per_component =
bw_frc_to_fixed(35, 10);
- dceip.graphics_vscaler_efficiency8_bit_per_component =
+ dceip->graphics_vscaler_efficiency8_bit_per_component =
bw_frc_to_fixed(34286, 10000);
- dceip.graphics_vscaler_efficiency10_bit_per_component =
+ dceip->graphics_vscaler_efficiency10_bit_per_component =
bw_frc_to_fixed(32, 10);
- dceip.graphics_vscaler_efficiency12_bit_per_component =
+ dceip->graphics_vscaler_efficiency12_bit_per_component =
bw_int_to_fixed(3);
- dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
- dceip.max_dmif_buffer_allocated = 4;
- dceip.graphics_dmif_size = 24576;
- dceip.underlay_luma_dmif_size = 19456;
- dceip.underlay_chroma_dmif_size = 23552;
- dceip.pre_downscaler_enabled = true;
- dceip.underlay_downscale_prefetch_enabled = false;
- dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
- dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
- dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
- dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip->max_dmif_buffer_allocated = 4;
+ dceip->graphics_dmif_size = 24576;
+ dceip->underlay_luma_dmif_size = 19456;
+ dceip->underlay_chroma_dmif_size = 23552;
+ dceip->pre_downscaler_enabled = true;
+ dceip->underlay_downscale_prefetch_enabled = false;
+ dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
bw_int_to_fixed(1);
- dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.underlay420_chroma_lb_size_per_component =
+ dceip->underlay420_chroma_lb_size_per_component =
bw_int_to_fixed(164352);
- dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
82176);
- dceip.cursor_chunk_width = bw_int_to_fixed(64);
- dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
- dceip.underlay_maximum_width_efficient_for_tiling =
+ dceip->cursor_chunk_width = bw_int_to_fixed(64);
+ dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip->underlay_maximum_width_efficient_for_tiling =
bw_int_to_fixed(1920);
- dceip.underlay_maximum_height_efficient_for_tiling =
+ dceip->underlay_maximum_height_efficient_for_tiling =
bw_int_to_fixed(1080);
- dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
bw_frc_to_fixed(3, 10);
- dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
bw_int_to_fixed(25);
- dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
2);
- dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
bw_int_to_fixed(128);
- dceip.limit_excessive_outstanding_dmif_requests = true;
- dceip.linear_mode_line_request_alternation_slice =
+ dceip->limit_excessive_outstanding_dmif_requests = true;
+ dceip->linear_mode_line_request_alternation_slice =
bw_int_to_fixed(64);
- dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
32;
- dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
- dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
- dceip.request_efficiency = bw_frc_to_fixed(8, 10);
- dceip.dispclk_per_request = bw_int_to_fixed(2);
- dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
- dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
- dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
- dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip->dispclk_per_request = bw_int_to_fixed(2);
+ dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
break;
default:
break;
}
- *bw_dceip = dceip;
- *bw_vbios = vbios;
+ *bw_dceip = *dceip;
+ *bw_vbios = *vbios;
+ kfree(dceip);
+ kfree(vbios);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index f7c728d4f50a..7d6c68c5dea9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -125,87 +125,136 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
{
struct hw_asic_id asic_id = ctx->asic_id;
- struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
-
- if (clk_mgr == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
switch (asic_id.chip_family) {
#if defined(CONFIG_DRM_AMD_DC_SI)
- case FAMILY_SI:
+ case FAMILY_SI: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
dce60_clk_mgr_construct(ctx, clk_mgr);
- break;
+ dce_clk_mgr_construct(ctx, clk_mgr);
+ return &clk_mgr->base;
+ }
#endif
case FAMILY_CI:
- case FAMILY_KV:
+ case FAMILY_KV: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
dce_clk_mgr_construct(ctx, clk_mgr);
- break;
- case FAMILY_CZ:
+ return &clk_mgr->base;
+ }
+ case FAMILY_CZ: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
dce110_clk_mgr_construct(ctx, clk_mgr);
- break;
- case FAMILY_VI:
+ return &clk_mgr->base;
+ }
+ case FAMILY_VI: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
dce_clk_mgr_construct(ctx, clk_mgr);
- break;
+ return &clk_mgr->base;
}
if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
dce112_clk_mgr_construct(ctx, clk_mgr);
- break;
+ return &clk_mgr->base;
}
if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
dce112_clk_mgr_construct(ctx, clk_mgr);
- break;
+ return &clk_mgr->base;
+ }
+ return &clk_mgr->base;
+ }
+ case FAMILY_AI: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
}
- break;
- case FAMILY_AI:
if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
dce121_clk_mgr_construct(ctx, clk_mgr);
else
dce120_clk_mgr_construct(ctx, clk_mgr);
- break;
-
+ return &clk_mgr->base;
+ }
#if defined(CONFIG_DRM_AMD_DC_DCN)
- case FAMILY_RV:
+ case FAMILY_RV: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- break;
+ return &clk_mgr->base;
}
if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- break;
+ return &clk_mgr->base;
}
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
- break;
+ return &clk_mgr->base;
}
if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
- break;
+ return &clk_mgr->base;
}
- break;
+ return &clk_mgr->base;
+ }
+ case FAMILY_NV: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
- case FAMILY_NV:
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- break;
+ return &clk_mgr->base;
}
if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) {
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- break;
+ return &clk_mgr->base;
}
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- break;
-
+ return &clk_mgr->base;
+ }
case FAMILY_VGH:
- if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev))
+ if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) {
+ struct clk_mgr_vgh *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
vg_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base.base;
+ }
break;
#endif
default:
@@ -213,7 +262,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
break;
}
- return &clk_mgr->base;
+ return NULL;
}
void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
@@ -226,6 +275,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
dcn3_clk_mgr_destroy(clk_mgr);
}
+ if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+ dcn3_clk_mgr_destroy(clk_mgr);
+ }
break;
case FAMILY_VGH:
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 01b1853b7750..a06e86853bb9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -128,7 +128,7 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count, i;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
@@ -210,6 +210,14 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dppclk_khz,
safe_to_lower);
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->signal == SIGNAL_TYPE_EDP &&
+ context->streams[i]->apply_seamless_boot_optimization) {
+ dc_wait_for_vblank(dc, context->streams[i]);
+ break;
+ }
+ }
+
clk_mgr_base->clks.actual_dppclk_khz =
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
@@ -761,6 +769,43 @@ static struct wm_table ddr4_wm_table_rn = {
}
};
+static struct wm_table ddr4_1R_wm_table_rn = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 13.90,
+ .sr_enter_plus_exit_time_us = 14.80,
+ .valid = true,
+ },
+ }
+};
+
static struct wm_table lpddr4_wm_table_rn = {
.entries = {
{
@@ -797,7 +842,18 @@ static struct wm_table lpddr4_wm_table_rn = {
},
}
};
+static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
+{
+ int i;
+ for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) {
+ if (clock_table->SocClocks[i].Vol == voltage)
+ return clock_table->SocClocks[i].Freq;
+ }
+
+ ASSERT(0);
+ return 0;
+}
static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
{
int i;
@@ -841,6 +897,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
+ bw_params->clk_table.entries[i].socclk_mhz = find_socclk_for_voltage(clock_table,
+ bw_params->clk_table.entries[i].voltage);
}
bw_params->vram_type = bios_info->memory_type;
@@ -932,8 +990,12 @@ void rn_clk_mgr_construct(
} else {
if (is_green_sardine)
rn_bw_params.wm_table = ddr4_wm_table_gs;
- else
- rn_bw_params.wm_table = ddr4_wm_table_rn;
+ else {
+ if (ctx->dc->config.is_single_rank_dimm)
+ rn_bw_params.wm_table = ddr4_1R_wm_table_rn;
+ else
+ rn_bw_params.wm_table = ddr4_wm_table_rn;
+ }
}
/* Saved clocks configured at boot for debug purposes */
rn_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
@@ -951,6 +1013,9 @@ void rn_clk_mgr_construct(
if (status == PP_SMU_RESULT_OK &&
ctx->dc_bios && ctx->dc_bios->integrated_info) {
rn_clk_mgr_helper_populate_bw_params (clk_mgr->base.bw_params, &clock_table, ctx->dc_bios->integrated_info);
+ /* treat memory config as single channel if memory is asymmetrics. */
+ if (ctx->dc->config.is_asymmetric_memory)
+ clk_mgr->base.bw_params->num_channels = 1;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 81ea5d3a1947..652fa89fae5f 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -432,6 +432,12 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
}
+static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ return clk_mgr->smu_present;
+}
+
static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
{
@@ -494,6 +500,7 @@ static struct clk_mgr_funcs dcn3_funcs = {
.are_clock_states_equal = dcn3_are_clock_states_equal,
.enable_pme_wa = dcn3_enable_pme_wa,
.notify_link_rate_change = dcn30_notify_link_rate_change,
+ .is_smu_present = dcn3_is_smu_present
};
static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
index 68942bbc7472..07774fa2c2cf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
@@ -113,10 +113,13 @@ int dcn301_smu_send_msg_with_param(
int dcn301_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
{
- return dcn301_smu_send_msg_with_param(
- clk_mgr,
- VBIOSSMC_MSG_GetSmuVersion,
- 0);
+ int smu_version = dcn301_smu_send_msg_with_param(clk_mgr,
+ VBIOSSMC_MSG_GetSmuVersion,
+ 0);
+
+ DC_LOG_DEBUG("%s %x\n", __func__, smu_version);
+
+ return smu_version;
}
@@ -124,6 +127,8 @@ int dcn301_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispc
{
int actual_dispclk_set_mhz = -1;
+ DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dispclk_khz);
+
/* Unit of SMU msg parameter is Mhz */
actual_dispclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
@@ -137,6 +142,8 @@ int dcn301_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
{
int actual_dprefclk_set_mhz = -1;
+ DC_LOG_DEBUG("%s %d\n", __func__, clk_mgr->base.dprefclk_khz / 1000);
+
actual_dprefclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDprefclkFreq,
@@ -151,6 +158,8 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
{
int actual_dcfclk_set_mhz = -1;
+ DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dcfclk_khz);
+
actual_dcfclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
@@ -163,6 +172,8 @@ int dcn301_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int r
{
int actual_min_ds_dcfclk_mhz = -1;
+ DC_LOG_DEBUG("%s(%d)\n", __func__, requested_min_ds_dcfclk_khz);
+
actual_min_ds_dcfclk_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
@@ -175,6 +186,8 @@ int dcn301_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_kh
{
int actual_dppclk_set_mhz = -1;
+ DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dpp_khz);
+
actual_dppclk_set_mhz = dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDppclkFreq,
@@ -187,6 +200,8 @@ void dcn301_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr,
{
//TODO: Work with smu team to define optimization options.
+ DC_LOG_DEBUG("%s(%x)\n", __func__, idle_info);
+
dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
@@ -202,6 +217,8 @@ void dcn301_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool
idle_info.idle_info.phy_ref_clk_off = 1;
}
+ DC_LOG_DEBUG("%s(%d)\n", __func__, enable);
+
dcn301_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
@@ -218,12 +235,16 @@ void dcn301_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
void dcn301_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
{
+ DC_LOG_DEBUG("%s(%x)\n", __func__, addr_high);
+
dcn301_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
}
void dcn301_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
{
+ DC_LOG_DEBUG("%s(%x)\n", __func__, addr_low);
+
dcn301_smu_send_msg_with_param(clk_mgr,
VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index aadb801447a7..c636b589d69d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -32,9 +32,8 @@
// For dcn20_update_clocks_update_dpp_dto
#include "dcn20/dcn20_clk_mgr.h"
-
-
#include "vg_clk_mgr.h"
+#include "dcn301_smu.h"
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
@@ -50,11 +49,14 @@
/* Macros */
+#define TO_CLK_MGR_VGH(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_vgh, base)
+
#define REG(reg_name) \
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
-int vg_get_active_display_cnt_wa(
+static int vg_get_active_display_cnt_wa(
struct dc *dc,
struct dc_state *context)
{
@@ -134,13 +136,13 @@ void vg_update_clocks(struct clk_mgr *clk_mgr_base,
}
}
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) && !dc->debug.disable_min_fclk) {
clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
dcn301_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
}
if (should_set_clock(safe_to_lower,
- new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) && !dc->debug.disable_min_fclk) {
clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
dcn301_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
@@ -377,7 +379,7 @@ void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
s->dprefclk_khz = sb.dprefclk * 1000;
}
-void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
@@ -449,15 +451,16 @@ static void vg_build_watermark_ranges(struct clk_bw_params *bw_params, struct wa
}
-void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+static void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- struct watermarks *table = clk_mgr_base->smu_wm_set.wm_set;
+ struct clk_mgr_vgh *clk_mgr_vgh = TO_CLK_MGR_VGH(clk_mgr);
+ struct watermarks *table = clk_mgr_vgh->smu_wm_set.wm_set;
if (!clk_mgr->smu_ver)
return;
- if (!table || clk_mgr_base->smu_wm_set.mc_address.quad_part == 0)
+ if (!table || clk_mgr_vgh->smu_wm_set.mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
@@ -465,9 +468,9 @@ void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
vg_build_watermark_ranges(clk_mgr_base->bw_params, table);
dcn301_smu_set_dram_addr_high(clk_mgr,
- clk_mgr_base->smu_wm_set.mc_address.high_part);
+ clk_mgr_vgh->smu_wm_set.mc_address.high_part);
dcn301_smu_set_dram_addr_low(clk_mgr,
- clk_mgr_base->smu_wm_set.mc_address.low_part);
+ clk_mgr_vgh->smu_wm_set.mc_address.low_part);
dcn301_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
@@ -625,7 +628,7 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
return 0;
}
-void vg_clk_mgr_helper_populate_bw_params(
+static void vg_clk_mgr_helper_populate_bw_params(
struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
const struct vg_dpm_clocks *clock_table)
@@ -703,7 +706,7 @@ static struct vg_dpm_clocks dummy_clocks = {
static struct watermarks dummy_wms = { 0 };
-void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+static void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
struct smu_dpm_clks *smu_dpm_clks)
{
struct vg_dpm_clocks *table = smu_dpm_clks->dpm_clks;
@@ -725,39 +728,39 @@ void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
void vg_clk_mgr_construct(
struct dc_context *ctx,
- struct clk_mgr_internal *clk_mgr,
+ struct clk_mgr_vgh *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg)
{
struct smu_dpm_clks smu_dpm_clks = { 0 };
- clk_mgr->base.ctx = ctx;
- clk_mgr->base.funcs = &vg_funcs;
+ clk_mgr->base.base.ctx = ctx;
+ clk_mgr->base.base.funcs = &vg_funcs;
- clk_mgr->pp_smu = pp_smu;
+ clk_mgr->base.pp_smu = pp_smu;
- clk_mgr->dccg = dccg;
- clk_mgr->dfs_bypass_disp_clk = 0;
+ clk_mgr->base.dccg = dccg;
+ clk_mgr->base.dfs_bypass_disp_clk = 0;
- clk_mgr->dprefclk_ss_percentage = 0;
- clk_mgr->dprefclk_ss_divider = 1000;
- clk_mgr->ss_on_dprefclk = false;
- clk_mgr->dfs_ref_freq_khz = 48000;
+ clk_mgr->base.dprefclk_ss_percentage = 0;
+ clk_mgr->base.dprefclk_ss_divider = 1000;
+ clk_mgr->base.ss_on_dprefclk = false;
+ clk_mgr->base.dfs_ref_freq_khz = 48000;
- clk_mgr->base.smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem(
- clk_mgr->base.ctx,
+ clk_mgr->smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(struct watermarks),
- &clk_mgr->base.smu_wm_set.mc_address.quad_part);
+ &clk_mgr->smu_wm_set.mc_address.quad_part);
- if (clk_mgr->base.smu_wm_set.wm_set == 0) {
- clk_mgr->base.smu_wm_set.wm_set = &dummy_wms;
- clk_mgr->base.smu_wm_set.mc_address.quad_part = 0;
+ if (clk_mgr->smu_wm_set.wm_set == 0) {
+ clk_mgr->smu_wm_set.wm_set = &dummy_wms;
+ clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
- ASSERT(clk_mgr->base.smu_wm_set.wm_set);
+ ASSERT(clk_mgr->smu_wm_set.wm_set);
smu_dpm_clks.dpm_clks = (struct vg_dpm_clocks *)dm_helpers_allocate_gpu_mem(
- clk_mgr->base.ctx,
+ clk_mgr->base.base.ctx,
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
sizeof(struct vg_dpm_clocks),
&smu_dpm_clks.mc_address.quad_part);
@@ -771,21 +774,21 @@ void vg_clk_mgr_construct(
if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
vg_funcs.update_clocks = dcn2_update_clocks_fpga;
- clk_mgr->base.dentist_vco_freq_khz = 3600000;
+ clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
} else {
struct clk_log_info log_info = {0};
- clk_mgr->smu_ver = dcn301_smu_get_smu_version(clk_mgr);
+ clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base);
- if (clk_mgr->smu_ver)
- clk_mgr->smu_present = true;
+ if (clk_mgr->base.smu_ver)
+ clk_mgr->base.smu_present = true;
/* TODO: Check we get what we expect during bringup */
- clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
/* in case we don't get a value from the register, use default */
- if (clk_mgr->base.dentist_vco_freq_khz == 0)
- clk_mgr->base.dentist_vco_freq_khz = 3600000;
+ if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
vg_bw_params.wm_table = lpddr5_wm_table;
@@ -793,36 +796,38 @@ void vg_clk_mgr_construct(
vg_bw_params.wm_table = ddr4_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- vg_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
+ vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
}
- clk_mgr->base.dprefclk_khz = 600000;
- dce_clock_read_ss_info(clk_mgr);
+ clk_mgr->base.base.dprefclk_khz = 600000;
+ dce_clock_read_ss_info(&clk_mgr->base);
- clk_mgr->base.bw_params = &vg_bw_params;
+ clk_mgr->base.base.bw_params = &vg_bw_params;
- vg_get_dpm_table_from_smu(clk_mgr, &smu_dpm_clks);
+ vg_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
vg_clk_mgr_helper_populate_bw_params(
- clk_mgr,
+ &clk_mgr->base,
ctx->dc_bios->integrated_info,
smu_dpm_clks.dpm_clks);
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
- dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
smu_dpm_clks.dpm_clks);
/*
- if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver) {
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) {
enable powerfeatures when displaycount goes to 0
dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
}
*/
}
-void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
{
- if (clk_mgr->base.smu_wm_set.wm_set && clk_mgr->base.smu_wm_set.mc_address.quad_part != 0)
- dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
- clk_mgr->base.smu_wm_set.wm_set);
+ struct clk_mgr_vgh *clk_mgr = TO_CLK_MGR_VGH(clk_mgr_int);
+
+ if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ clk_mgr->smu_wm_set.wm_set);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h
index b5115b3123a1..7255477307f1 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h
@@ -25,29 +25,25 @@
#ifndef __VG_CLK_MGR_H__
#define __VG_CLK_MGR_H__
+#include "clk_mgr_internal.h"
-int vg_get_active_display_cnt_wa(
- struct dc *dc,
- struct dc_state *context);
+struct watermarks;
-void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base);
+struct smu_watermark_set {
+ struct watermarks *wm_set;
+ union large_integer mc_address;
+};
+
+struct clk_mgr_vgh {
+ struct clk_mgr_internal base;
+ struct smu_watermark_set smu_wm_set;
+};
void vg_clk_mgr_construct(struct dc_context *ctx,
- struct clk_mgr_internal *clk_mgr,
+ struct clk_mgr_vgh *clk_mgr,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
-#include "dcn301_smu.h"
-void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base);
-
-void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
- struct smu_dpm_clks *smu_dpm_clks);
-
-void vg_clk_mgr_helper_populate_bw_params(
- struct clk_mgr_internal *clk_mgr,
- struct integrated_info *bios_info,
- const struct vg_dpm_clocks *clock_table);
-
#endif //__VG_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8e6c815b55d2..4713f09bcbf1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -49,10 +49,13 @@
#include "timing_generator.h"
#include "abm.h"
#include "virtual/virtual_link_encoder.h"
+#include "hubp.h"
#include "link_hwss.h"
#include "link_encoder.h"
+#include "link_enc_cfg.h"
+#include "dc_link.h"
#include "dc_link_ddc.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -304,7 +307,10 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
int i = 0;
bool ret = false;
- stream->adjust = *adjust;
+ stream->adjust.v_total_max = adjust->v_total_max;
+ stream->adjust.v_total_mid = adjust->v_total_mid;
+ stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
+ stream->adjust.v_total_min = adjust->v_total_min;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -312,10 +318,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
if (pipe->stream == stream && pipe->stream_res.tg) {
dc->hwss.set_drr(&pipe,
1,
- adjust->v_total_min,
- adjust->v_total_max,
- adjust->v_total_mid,
- adjust->v_total_mid_frame_num);
+ *adjust);
ret = true;
}
@@ -870,6 +873,9 @@ static bool dc_construct(struct dc *dc,
if (!create_links(dc, init_params->num_virtual_links))
goto fail;
+ /* Initialise DIG link encoder resource tracking variables. */
+ link_enc_cfg_init(dc, dc->current_state);
+
return true;
fail:
@@ -1317,11 +1323,10 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
struct dc_link *link = sink->link;
unsigned int i, enc_inst, tg_inst = 0;
- // Seamless port only support single DP and EDP so far
- if ((sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT &&
- sink->sink_signal != SIGNAL_TYPE_EDP) ||
- sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ /* Support seamless boot on EDP displays only */
+ if (sink->sink_signal != SIGNAL_TYPE_EDP) {
return false;
+ }
/* Check for enabled DIG to identify enabled display */
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
@@ -1394,6 +1399,10 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
return false;
+ /* block DSC for now, as VBIOS does not currently support DSC timings */
+ if (crtc_timing->flags.DSC)
+ return false;
+
if (dc_is_dp_signal(link->connector_signal)) {
unsigned int pix_clk_100hz;
@@ -1424,6 +1433,11 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc,
return false;
}
+ if (is_edp_ilr_optimization_required(link, crtc_timing)) {
+ DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
+ return false;
+ }
+
return true;
}
@@ -2091,6 +2105,10 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_status == NULL || stream_status->plane_count != surface_count)
overall_type = UPDATE_TYPE_FULL;
+ if (stream_update && stream_update->pending_test_pattern) {
+ overall_type = UPDATE_TYPE_FULL;
+ }
+
/* some stream updates require passive update */
if (stream_update) {
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
@@ -2390,6 +2408,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->dither_option)
stream->dither_option = *update->dither_option;
+ if (update->pending_test_pattern)
+ stream->test_pattern = *update->pending_test_pattern;
/* update current stream with writeback info */
if (update->wb_update) {
int i;
@@ -2485,6 +2505,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -2492,6 +2513,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dsc_config)
dp_update_dsc_config(pipe_ctx);
+ if (stream_update->pending_test_pattern) {
+ dc_link_dp_set_test_pattern(stream->link,
+ stream->test_pattern.type,
+ stream->test_pattern.color_space,
+ stream->test_pattern.p_link_settings,
+ stream->test_pattern.p_custom_pattern,
+ stream->test_pattern.cust_pattern_size);
+ }
+
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx);
@@ -2578,6 +2608,17 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+ struct pipe_ctx *mpcc_pipe;
+ struct pipe_ctx *odm_pipe;
+
+ for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+ for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
+ }
+#endif
+
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
if (should_use_dmub_lock(stream->link)) {
@@ -2646,6 +2687,10 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = true;
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* force vsync flip when reconfiguring pipes to prevent underflow */
+ plane_state->flip_immediate = false;
+ }
}
}
@@ -2784,9 +2829,13 @@ static void commit_planes_for_stream(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+ if (!pipe_ctx->plane_state)
+ continue;
+
if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
!pipe_ctx->stream || pipe_ctx->stream != stream ||
- !pipe_ctx->plane_state->update_flags.bits.addr_update)
+ !pipe_ctx->plane_state->update_flags.bits.addr_update ||
+ pipe_ctx->plane_state->skip_manual_trigger)
continue;
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
@@ -3170,6 +3219,19 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
}
}
+void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ struct timing_generator *tg =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ break;
+ }
+}
+
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
@@ -3225,6 +3287,10 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
if (dc->debug.disable_idle_power_optimizations)
return;
+ if (dc->clk_mgr->funcs->is_smu_present)
+ if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
+ return;
+
if (allow == dc->idle_optimizations_allowed)
return;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f9a33dc52c45..f4374d83662a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -92,11 +92,14 @@ static void dc_link_destruct(struct dc_link *link)
link->panel_cntl->funcs->destroy(&link->panel_cntl);
if (link->link_enc) {
- /* Update link encoder tracking variables. These are used for the dynamic
- * assignment of link encoders to streams.
+ /* Update link encoder resource tracking variables. These are used for
+ * the dynamic assignment of link encoders to streams. Virtual links
+ * are not assigned encoder resources on creation.
*/
- link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = NULL;
- link->dc->res_pool->dig_link_enc_count--;
+ if (link->link_id.id != CONNECTOR_ID_VIRTUAL) {
+ link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL;
+ link->dc->res_pool->dig_link_enc_count--;
+ }
link->link_enc->funcs->destroy(&link->link_enc);
}
@@ -1407,6 +1410,8 @@ static bool dc_link_construct(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ link->ep_type = DISPLAY_ENDPOINT_PHY;
+
DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
if (bios->funcs->get_disp_connector_caps_info) {
@@ -1506,10 +1511,12 @@ static bool dc_link_construct(struct dc_link *link,
(link->link_id.id == CONNECTOR_ID_EDP ||
link->link_id.id == CONNECTOR_ID_LVDS)) {
panel_cntl_init_data.ctx = dc_ctx;
- panel_cntl_init_data.inst = link->link_index;
+ panel_cntl_init_data.inst =
+ panel_cntl_init_data.ctx->dc_edp_id_count;
link->panel_cntl =
link->dc->res_pool->funcs->panel_cntl_create(
&panel_cntl_init_data);
+ panel_cntl_init_data.ctx->dc_edp_id_count++;
if (link->panel_cntl == NULL) {
DC_ERROR("Failed to create link panel_cntl!\n");
@@ -1541,7 +1548,8 @@ static bool dc_link_construct(struct dc_link *link,
/* Update link encoder tracking variables. These are used for the dynamic
* assignment of link encoders to streams.
*/
- link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = link->link_enc;
+ link->eng_id = link->link_enc->preferred_engine;
+ link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc;
link->dc->res_pool->dig_link_enc_count++;
link->link_enc_hw_inst = link->link_enc->transmitter;
@@ -1671,21 +1679,27 @@ void link_destroy(struct dc_link **link)
static void enable_stream_features(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- union down_spread_ctrl old_downspread;
- union down_spread_ctrl new_downspread;
- core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
- &old_downspread.raw, sizeof(old_downspread));
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
- new_downspread.raw = old_downspread.raw;
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw, sizeof(old_downspread));
- new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
- (stream->ignore_msa_timing_param) ? 1 : 0;
+ new_downspread.raw = old_downspread.raw;
- if (new_downspread.raw != old_downspread.raw) {
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &new_downspread.raw, sizeof(new_downspread));
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw) {
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw, sizeof(new_downspread));
+ }
+
+ } else {
+ dm_helpers_mst_enable_stream_features(stream);
}
}
@@ -2805,12 +2819,9 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->psr_level.u32all = 0;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
/*skip power down the single pipe since it blocks the cstate*/
- if ((link->ctx->asic_id.chip_family == FAMILY_RV) &&
- ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ if (link->ctx->asic_id.chip_family >= FAMILY_RV)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
/* SMU will perform additional powerdown sequence.
* For unsupported ASICs, set psr_level flag to skip PSR
@@ -2883,8 +2894,8 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
{
struct fixed31_32 peak_kbps;
- uint32_t numerator;
- uint32_t denominator;
+ uint32_t numerator = 0;
+ uint32_t denominator = 1;
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -3131,50 +3142,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
return DC_OK;
}
-enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe_ctx;
-
- // Clear all of MST payload then reallocate
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-
- /* driver enable split pipe for external monitors
- * we have to check pipe_ctx is split pipe or not
- * If it's split pipe, driver using top pipe to
- * reaallocate.
- */
- if (!pipe_ctx || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- deallocate_mst_payload(pipe_ctx);
- }
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx || pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream && pipe_ctx->stream->link == link &&
- pipe_ctx->stream->dpms_off == false &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- /* enable/disable PHY will clear connection between BE and FE
- * need to restore it.
- */
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
- dc_link_allocate_mst_payload(pipe_ctx);
- }
- }
-
- return DC_OK;
-}
#if defined(CONFIG_DRM_AMD_DC_HDCP)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
@@ -3288,7 +3255,8 @@ void core_link_enable_stream(
/* eDP lit up by bios already, no need to enable again. */
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
- apply_edp_fast_boot_optimization) {
+ apply_edp_fast_boot_optimization &&
+ !pipe_ctx->stream->timing.flags.DSC) {
pipe_ctx->stream->dpms_off = false;
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, false);
@@ -3350,8 +3318,10 @@ void core_link_enable_stream(
/* Set DPS PPS SDP (AKA "info frames") */
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ dp_set_dsc_on_rx(pipe_ctx, true);
dp_set_dsc_pps_sdp(pipe_ctx, true);
+ }
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
@@ -3746,7 +3716,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
- link->connector_signal == SIGNAL_TYPE_EDP) // Disable FEC for eDP
+ (link->connector_signal == SIGNAL_TYPE_EDP &&
+ link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP
is_fec_disable = true;
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 47e6c33f73cb..3ff3d9e90983 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -284,7 +284,7 @@ static uint8_t dc_dp_initialize_scrambling_data_symbols(
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
{
- return (link->lttpr_non_transparent_mode && offset != 0);
+ return (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
}
static void dpcd_set_lt_pattern_and_lane_settings(
@@ -1072,7 +1072,7 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 3. wait receiver to lock-on*/
wait_time_microsec = lt_settings->cr_pattern_time;
- if (link->lttpr_non_transparent_mode)
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
wait_for_training_aux_rd_interval(
@@ -1098,11 +1098,13 @@ static enum link_training_result perform_clock_recovery_sequence(
if (is_max_vs_reached(lt_settings))
break;
- /* 7. same voltage*/
- /* Note: VS same for all lanes,
- * so comparing first lane is sufficient*/
- if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+ /* 7. same lane settings*/
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient*/
+ if ((lt_settings->lane_settings[0].VOLTAGE_SWING ==
req_settings.lane_settings[0].VOLTAGE_SWING)
+ && (lt_settings->lane_settings[0].PRE_EMPHASIS ==
+ req_settings.lane_settings[0].PRE_EMPHASIS))
retries_cr++;
else
retries_cr = 0;
@@ -1130,11 +1132,6 @@ static inline enum link_training_result perform_link_training_int(
enum link_training_result status)
{
union lane_count_set lane_count_set = { {0} };
- union dpcd_training_pattern dpcd_pattern = { {0} };
-
- /* 3. set training not in progress*/
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
- dpcd_set_training_pattern(link, dpcd_pattern);
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1324,7 +1321,17 @@ static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
-static void configure_lttpr_mode(struct dc_link *link)
+static void configure_lttpr_mode_transparent(struct dc_link *link)
+{
+ uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+ core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+}
+
+static void configure_lttpr_mode_non_transparent(struct dc_link *link)
{
/* aux timeout is already set to extended */
/* RESET/SET lttpr mode to enable non transparent mode */
@@ -1344,7 +1351,7 @@ static void configure_lttpr_mode(struct dc_link *link)
link->dpcd_caps.lttpr_caps.mode = repeater_mode;
}
- if (link->lttpr_non_transparent_mode) {
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
@@ -1548,6 +1555,7 @@ enum link_training_result dc_link_dp_perform_link_training(
{
enum link_training_result status = LINK_TRAINING_SUCCESS;
struct link_training_settings lt_settings;
+ union dpcd_training_pattern dpcd_pattern = { { 0 } };
bool fec_enable;
uint8_t repeater_cnt;
@@ -1560,8 +1568,10 @@ enum link_training_result dc_link_dp_perform_link_training(
&lt_settings);
/* Configure lttpr mode */
- if (link->lttpr_non_transparent_mode)
- configure_lttpr_mode(link);
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ configure_lttpr_mode_non_transparent(link);
+ else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ configure_lttpr_mode_transparent(link);
if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
@@ -1576,7 +1586,7 @@ enum link_training_result dc_link_dp_perform_link_training(
dp_set_fec_ready(link, fec_enable);
- if (link->lttpr_non_transparent_mode) {
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
/* 2. perform link training (set link training done
* to false is done as well)
@@ -1610,6 +1620,9 @@ enum link_training_result dc_link_dp_perform_link_training(
}
}
+ /* 3. set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ dpcd_set_training_pattern(link, dpcd_pattern);
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
status = perform_link_training_int(link,
&lt_settings,
@@ -1633,6 +1646,42 @@ enum link_training_result dc_link_dp_perform_link_training(
return status;
}
+static enum dp_panel_mode try_enable_assr(struct dc_stream_state *stream)
+{
+ struct dc_link *link = stream->link;
+ enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ struct cp_psp *cp_psp = &stream->ctx->cp_psp;
+#endif
+
+ /* ASSR must be supported on the panel */
+ if (panel_mode == DP_PANEL_MODE_DEFAULT)
+ return panel_mode;
+
+ /* eDP or internal DP only */
+ if (link->connector_signal != SIGNAL_TYPE_EDP &&
+ !(link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->is_internal_display))
+ return DP_PANEL_MODE_DEFAULT;
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (cp_psp && cp_psp->funcs.enable_assr) {
+ if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
+ /* since eDP implies ASSR on, change panel
+ * mode to disable ASSR
+ */
+ panel_mode = DP_PANEL_MODE_DEFAULT;
+ }
+ } else
+ panel_mode = DP_PANEL_MODE_DEFAULT;
+
+#else
+ /* turn off ASSR if the implementation is not compiled in */
+ panel_mode = DP_PANEL_MODE_DEFAULT;
+#endif
+ return panel_mode;
+}
+
bool perform_link_training_with_retries(
const struct dc_link_settings *link_setting,
bool skip_video_pattern,
@@ -1644,7 +1693,7 @@ bool perform_link_training_with_retries(
uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+ enum dp_panel_mode panel_mode;
/* We need to do this before the link training to ensure the idle pattern in SST
* mode will be sent right after the link training
@@ -1669,32 +1718,25 @@ bool perform_link_training_with_retries(
msleep(delay_dp_power_up_in_ms);
}
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (panel_mode == DP_PANEL_MODE_EDP) {
- struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
- if (cp_psp && cp_psp->funcs.enable_assr) {
- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
- /* since eDP implies ASSR on, change panel
- * mode to disable ASSR
- */
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- } else
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
-#endif
-
+ panel_mode = try_enable_assr(stream);
dp_set_panel_mode(link, panel_mode);
+ DC_LOG_DETECTION_DP_CAPS("Link: %d ASSR enabled: %d\n",
+ link->link_index,
+ panel_mode != DP_PANEL_MODE_DEFAULT);
if (link->aux_access_disabled) {
dc_link_dp_perform_link_training_skip_aux(link, link_setting);
return true;
- } else if (dc_link_dp_perform_link_training(
- link,
- link_setting,
- skip_video_pattern) == LINK_TRAINING_SUCCESS)
- return true;
+ } else {
+ enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
+
+ status = dc_link_dp_perform_link_training(
+ link,
+ link_setting,
+ skip_video_pattern);
+ if (status == LINK_TRAINING_SUCCESS)
+ return true;
+ }
/* latest link training still fail, skip delay and keep PHY on
*/
@@ -1873,7 +1915,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
* account for lttpr repeaters cap
* notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
*/
- if (link->lttpr_non_transparent_mode) {
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
@@ -2031,7 +2073,7 @@ bool dp_verify_link_cap(
max_link_cap = get_max_link_cap(link);
/* Grant extended timeout request */
- if (link->lttpr_non_transparent_mode && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -2447,7 +2489,7 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
return false;
}
-static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
{
struct dc_link_settings initial_link_setting;
struct dc_link_settings current_link_setting;
@@ -2782,10 +2824,27 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
enum dp_test_pattern test_pattern;
enum dp_test_pattern_color_space test_pattern_color_space =
DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
+ enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ int i;
memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
+ if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ if (pipe_ctx == NULL)
+ return;
+
/* get link test pattern and pattern parameters */
core_link_read_dpcd(
link,
@@ -2823,6 +2882,33 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+ switch (dpcd_test_params.bits.BPC) {
+ case 0: // 6 bits
+ requestColorDepth = COLOR_DEPTH_666;
+ break;
+ case 1: // 8 bits
+ requestColorDepth = COLOR_DEPTH_888;
+ break;
+ case 2: // 10 bits
+ requestColorDepth = COLOR_DEPTH_101010;
+ break;
+ case 3: // 12 bits
+ requestColorDepth = COLOR_DEPTH_121212;
+ break;
+ default:
+ break;
+ }
+
+ if (requestColorDepth != COLOR_DEPTH_UNDEFINED
+ && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
+ DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
+ __func__,
+ pipe_ctx->stream->timing.display_color_depth,
+ requestColorDepth);
+ pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
+ dp_update_dsc_config(pipe_ctx);
+ }
+
dc_link_dp_set_test_pattern(
link,
test_pattern,
@@ -3369,6 +3455,9 @@ static bool retrieve_link_cap(struct dc_link *link)
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
bool is_lttpr_present = false;
const uint32_t post_oui_delay = 30; // 30ms
+ bool vbios_lttpr_enable = false;
+ bool vbios_lttpr_interop = false;
+ struct dc_bios *bios = link->dc->ctx->dc_bios;
memset(dpcd_data, '\0', sizeof(dpcd_data));
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
@@ -3416,13 +3505,45 @@ static bool retrieve_link_cap(struct dc_link *link)
return false;
}
- if (link->dc->caps.extended_aux_timeout_support &&
- link->dc->config.allow_lttpr_non_transparent_mode) {
+ /* Query BIOS to determine if LTTPR functionality is forced on by system */
+ if (bios->funcs->get_lttpr_caps) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_lttpr_enable = 0;
+
+ bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
+ vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+ }
+
+ if (bios->funcs->get_lttpr_interop) {
+ enum bp_result bp_query_result;
+ uint8_t is_vbios_interop_enabled = 0;
+
+ bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
+ vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+ }
+
+ /*
+ * Logic to determine LTTPR mode
+ */
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ if (vbios_lttpr_enable && vbios_lttpr_interop)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+ if (link->dc->config.allow_lttpr_non_transparent_mode)
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ else
+ link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
+ } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+ if (!link->dc->config.allow_lttpr_non_transparent_mode
+ || !link->dc->caps.extended_aux_timeout_support)
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+ else
+ link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+ }
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
/* By reading LTTPR capability, RX assumes that we will enable
- * LTTPR non transparent if LTTPR is present.
- * Therefore, only query LTTPR capability when both LTTPR
- * extended aux timeout and
- * non transparent mode is supported by hardware
+ * LTTPR extended aux timeout if LTTPR is present.
*/
status = core_link_read_dpcd(
link,
@@ -3460,11 +3581,10 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
if (is_lttpr_present)
CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+ else
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
}
- /* decide lttpr non transparent mode */
- link->lttpr_non_transparent_mode = is_lttpr_present;
-
if (!is_lttpr_present)
dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
@@ -3773,7 +3893,7 @@ void detect_edp_sink_caps(struct dc_link *link)
memset(supported_link_rates, 0, sizeof(supported_link_rates));
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- (link->dc->config.optimize_edp_link_rate ||
+ (link->dc->debug.optimize_edp_link_rate ||
link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
@@ -4599,3 +4719,51 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link)
}
return false;
}
+
+bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing)
+{
+ struct dc_link_settings link_setting;
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+ uint32_t req_bw;
+ union lane_count_set lane_count_set = { {0} };
+
+ ASSERT(link || crtc_timing); // invalid input
+
+ if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ !link->dc->debug.optimize_edp_link_rate)
+ return false;
+
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n");
+ return true;
+ }
+
+ // Read DPCD 00115h to find the edp link rate set used
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+
+ decide_edp_link_settings(link, &link_setting, req_bw);
+
+ if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
+ lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n");
+ return true;
+ }
+
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n");
+ return false;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
new file mode 100644
index 000000000000..1361b87d86d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "link_enc_cfg.h"
+#include "resource.h"
+#include "dc_link_dp.h"
+
+/* Check whether stream is supported by DIG link encoders. */
+static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
+{
+ bool is_dig_stream = false;
+ struct link_encoder *link_enc = NULL;
+ int i;
+
+ /* Loop over created link encoder objects. */
+ for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ link_enc = stream->ctx->dc->res_pool->link_encoders[i];
+
+ if (link_enc &&
+ ((uint32_t)stream->signal & link_enc->output_signals)) {
+ if (dc_is_dp_signal(stream->signal)) {
+ /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
+ struct dc_link_settings link_settings = {0};
+
+ decide_link_settings(stream, &link_settings);
+ if ((link_settings.link_rate >= LINK_RATE_LOW) &&
+ link_settings.link_rate <= LINK_RATE_HIGH3) {
+ is_dig_stream = true;
+ break;
+ }
+ } else {
+ is_dig_stream = true;
+ break;
+ }
+ }
+ }
+
+ return is_dig_stream;
+}
+
+/* Update DIG link encoder resource tracking variables in dc_state. */
+static void update_link_enc_assignment(
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ enum engine_id eng_id,
+ bool add_enc)
+{
+ int eng_idx;
+ int stream_idx;
+ int i;
+
+ if (eng_id != ENGINE_ID_UNKNOWN) {
+ eng_idx = eng_id - ENGINE_ID_DIGA;
+ stream_idx = -1;
+
+ /* Index of stream in dc_state used to update correct entry in
+ * link_enc_assignments table.
+ */
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i]) {
+ stream_idx = i;
+ break;
+ }
+ }
+
+ /* Update link encoder assignments table, link encoder availability
+ * pool and link encoder assigned to stream in state.
+ * Add/remove encoder resource to/from stream.
+ */
+ if (stream_idx != -1) {
+ if (add_enc) {
+ state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){
+ .valid = true,
+ .ep_id = (struct display_endpoint_id) {
+ .link_id = stream->link->link_id,
+ .ep_type = stream->link->ep_type},
+ .eng_id = eng_id};
+ state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
+ stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
+ } else {
+ state->res_ctx.link_enc_assignments[stream_idx].valid = false;
+ state->res_ctx.link_enc_avail[eng_idx] = eng_id;
+ stream->link_enc = NULL;
+ }
+ } else {
+ dm_output_to_console("%s: Stream not found in dc_state.\n", __func__);
+ }
+ }
+}
+
+/* Return first available DIG link encoder. */
+static enum engine_id find_first_avail_link_enc(
+ struct dc_context *ctx,
+ struct dc_state *state)
+{
+ enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+ int i;
+
+ for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ eng_id = state->res_ctx.link_enc_avail[i];
+ if (eng_id != ENGINE_ID_UNKNOWN)
+ break;
+ }
+
+ return eng_id;
+}
+
+/* Return stream using DIG link encoder resource. NULL if unused. */
+static struct dc_stream_state *get_stream_using_link_enc(
+ struct dc_state *state,
+ enum engine_id eng_id)
+{
+ struct dc_stream_state *stream = NULL;
+ int stream_idx = -1;
+ int i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+ if (assignment.valid && (assignment.eng_id == eng_id)) {
+ stream_idx = i;
+ break;
+ }
+ }
+
+ if (stream_idx != -1)
+ stream = state->streams[stream_idx];
+ else
+ dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id);
+
+ return stream;
+}
+
+void link_enc_cfg_init(
+ struct dc *dc,
+ struct dc_state *state)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ if (dc->res_pool->link_encoders[i])
+ state->res_ctx.link_enc_avail[i] = (enum engine_id) i;
+ else
+ state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
+ }
+}
+
+void link_enc_cfg_link_encs_assign(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+ int i;
+
+ /* Release DIG link encoder resources before running assignment algorithm. */
+ for (i = 0; i < stream_count; i++)
+ dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+
+ /* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */
+ for (i = 0; i < stream_count; i++) {
+ struct dc_stream_state *stream = streams[i];
+
+ /* Skip stream if not supported by DIG link encoder. */
+ if (!is_dig_link_enc_stream(stream))
+ continue;
+
+ /* Physical endpoints have a fixed mapping to DIG link encoders. */
+ if (!stream->link->is_dig_mapping_flexible) {
+ eng_id = stream->link->eng_id;
+ update_link_enc_assignment(state, stream, eng_id, true);
+ }
+ }
+
+ /* (b) Then assign encoders to mappable endpoints. */
+ eng_id = ENGINE_ID_UNKNOWN;
+
+ for (i = 0; i < stream_count; i++) {
+ struct dc_stream_state *stream = streams[i];
+
+ /* Skip stream if not supported by DIG link encoder. */
+ if (!is_dig_link_enc_stream(stream))
+ continue;
+
+ /* Mappable endpoints have a flexible mapping to DIG link encoders. */
+ if (stream->link->is_dig_mapping_flexible) {
+ eng_id = find_first_avail_link_enc(stream->ctx, state);
+ update_link_enc_assignment(state, stream, eng_id, true);
+ }
+ }
+}
+
+void link_enc_cfg_link_enc_unassign(
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+
+ /* Only DIG link encoders. */
+ if (!is_dig_link_enc_stream(stream))
+ return;
+
+ if (stream->link_enc)
+ eng_id = stream->link_enc->preferred_engine;
+
+ update_link_enc_assignment(state, stream, eng_id, false);
+}
+
+bool link_enc_cfg_is_transmitter_mappable(
+ struct dc_state *state,
+ struct link_encoder *link_enc)
+{
+ bool is_mappable = false;
+ enum engine_id eng_id = link_enc->preferred_engine;
+ struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id);
+
+ if (stream)
+ is_mappable = stream->link->is_dig_mapping_flexible;
+
+ return is_mappable;
+}
+
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+ struct dc_state *state,
+ enum engine_id eng_id)
+{
+ struct dc_link *link = NULL;
+ int stream_idx = -1;
+ int i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+ if (assignment.valid && (assignment.eng_id == eng_id)) {
+ stream_idx = i;
+ break;
+ }
+ }
+
+ if (stream_idx != -1)
+ link = state->streams[stream_idx]->link;
+ else
+ dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
+
+ return link;
+}
+
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
+ struct dc_state *state,
+ struct dc_link *link)
+{
+ struct link_encoder *link_enc = NULL;
+ struct display_endpoint_id ep_id;
+ int stream_idx = -1;
+ int i;
+
+ ep_id = (struct display_endpoint_id) {
+ .link_id = link->link_id,
+ .ep_type = link->ep_type};
+
+ for (i = 0; i < state->stream_count; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+ if (assignment.valid &&
+ assignment.ep_id.link_id.id == ep_id.link_id.id &&
+ assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id &&
+ assignment.ep_id.link_id.type == ep_id.link_id.type &&
+ assignment.ep_id.ep_type == ep_id.ep_type) {
+ stream_idx = i;
+ break;
+ }
+ }
+
+ if (stream_idx != -1)
+ link_enc = state->streams[stream_idx]->link_enc;
+ else
+ dm_output_to_console("%s: No link encoder used by link(%d).\n", __func__, link->link_index);
+
+ return link_enc;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 124ce215fca5..b426f878fb99 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -14,6 +14,7 @@
#include "dpcd_defs.h"
#include "dsc.h"
#include "resource.h"
+#include "link_enc_cfg.h"
#include "clk_mgr.h"
static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
@@ -95,7 +96,7 @@ void dp_enable_link_phy(
enum clock_source_id clock_source,
const struct dc_link_settings *link_settings)
{
- struct link_encoder *link_enc = link->link_enc;
+ struct link_encoder *link_enc;
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
@@ -105,6 +106,13 @@ void dp_enable_link_phy(
link->dc->res_pool->dp_clock_source;
unsigned int i;
+ /* Link should always be assigned encoder when en-/disabling. */
+ if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+
if (link->connector_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -227,6 +235,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct link_encoder *link_enc;
+
+ /* Link should always be assigned encoder when en-/disabling. */
+ if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
@@ -234,13 +250,13 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
if (signal == SIGNAL_TYPE_EDP) {
if (link->dc->hwss.edp_backlight_control)
link->dc->hwss.edp_backlight_control(link, false);
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ link_enc->funcs->disable_output(link_enc, signal);
link->dc->hwss.edp_power_control(link, false);
} else {
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
- link->link_enc->funcs->disable_output(link->link_enc, signal);
+ link_enc->funcs->disable_output(link_enc, signal);
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
@@ -302,7 +318,7 @@ void dp_set_hw_lane_settings(
{
struct link_encoder *encoder = link->link_enc;
- if (link->lttpr_non_transparent_mode && !is_immediate_downstream(link, offset))
+ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
return;
/* call Encoder to set lane settings */
@@ -415,7 +431,7 @@ static void dsc_optc_config_log(struct display_stream_compressor *dsc,
DC_LOG_DSC("\tslice_width %d", config->slice_width);
}
-static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -525,7 +541,7 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
goto out;
if (enable) {
- if (dp_set_dsc_on_rx(pipe_ctx, true)) {
+ {
dp_set_dsc_on_stream(pipe_ctx, true);
result = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 3c91d16c2710..8cb937c046aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1930,6 +1930,9 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool,
del_pipe->stream_res.stream_enc,
false);
+ /* Release link encoder from stream in new dc_state. */
+ if (dc->res_pool->funcs->link_enc_unassign)
+ dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
if (del_pipe->stream_res.audio)
update_audio_usage(
@@ -2503,26 +2506,31 @@ static void set_avi_info_frame(
hdmi_info.bits.ITC = itc_value;
}
+ if (stream->qs_bit == 1) {
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ else
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ } else
+ hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+
/* TODO : We should handle YCC quantization */
/* but we do not have matrix calculation */
- if (stream->qs_bit == 1 &&
- stream->qy_bit == 1) {
+ if (stream->qy_bit == 1) {
if (color_space == COLOR_SPACE_SRGB ||
- color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE)
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
- color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- } else {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ else
hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- }
- } else {
- hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
- hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
- }
+ } else
+ hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
///VIC
format = stream->timing.timing_3d_format;
@@ -2842,6 +2850,10 @@ bool pipe_need_reprogram(
if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
return true;
+ /* DIG link encoder resource assignment for stream changed. */
+ if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index d163007e057c..100d434f7a03 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -45,7 +45,7 @@
/* forward declaration */
struct aux_payload;
-#define DC_VER "3.2.127"
+#define DC_VER "3.2.132"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -293,7 +293,6 @@ struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
- bool optimize_edp_link_rate;
bool disable_fractional_pwm;
bool allow_seamless_boot_optimization;
bool power_down_display_on_boot;
@@ -309,6 +308,8 @@ struct dc_config {
#endif
uint64_t vblank_alignment_dto_params;
uint8_t vblank_alignment_max_frame_time_diff;
+ bool is_asymmetric_memory;
+ bool is_single_rank_dimm;
};
enum visual_confirm {
@@ -460,6 +461,7 @@ struct dc_debug_options {
enum pipe_split_policy pipe_split_policy;
bool force_single_disp_pipe_split;
bool voltage_align_fclk;
+ bool disable_min_fclk;
bool disable_dfs_bypass;
bool disable_dpp_power_gate;
@@ -540,6 +542,11 @@ struct dc_debug_options {
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
+ bool optimize_edp_link_rate; /* eDP ILR */
+ /* force enable edp FEC */
+ bool force_enable_edp_fec;
+ /* FEC/PSR1 sequence enable delay in 100us */
+ uint8_t fec_enable_delay_in100us;
};
struct dc_debug_data {
@@ -712,6 +719,7 @@ void dc_init_callbacks(struct dc *dc,
void dc_deinit_callbacks(struct dc *dc);
void dc_destroy(struct dc **dc);
+void dc_wait_for_vblank(struct dc *dc, struct dc_stream_state *stream);
/*******************************************************************************
* Surface Interfaces
******************************************************************************/
@@ -899,6 +907,8 @@ struct dc_plane_state {
union surface_update_flags update_flags;
bool flip_int_enabled;
+ bool skip_manual_trigger;
+
/* private to DC core */
struct dc_plane_status status;
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 86ab8f16f621..67abda44eb1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -150,6 +150,12 @@ struct dc_vbios_funcs {
struct dc_bios *dcb,
struct graphics_object_id object_id,
struct bp_disp_connector_caps_info *info);
+ enum bp_result (*get_lttpr_caps)(
+ struct dc_bios *dcb,
+ uint8_t *dce_caps);
+ enum bp_result (*get_lttpr_interop)(
+ struct dc_bios *dcb,
+ uint8_t *dce_caps);
};
struct bios_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index c50ef5a909a6..054bab45ee17 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -35,6 +35,13 @@ enum dc_link_fec_state {
dc_link_fec_ready,
dc_link_fec_enabled
};
+
+enum lttpr_mode {
+ LTTPR_MODE_NON_LTTPR,
+ LTTPR_MODE_TRANSPARENT,
+ LTTPR_MODE_NON_TRANSPARENT,
+};
+
struct dc_link_status {
bool link_active;
struct dpcd_caps *dpcd_caps;
@@ -100,7 +107,7 @@ struct dc_link {
bool link_state_valid;
bool aux_access_disabled;
bool sync_lt_in_progress;
- bool lttpr_non_transparent_mode;
+ enum lttpr_mode lttpr_mode;
bool is_internal_display;
/* TODO: Rename. Flag an endpoint as having a programmable mapping to a
@@ -125,6 +132,11 @@ struct dc_link {
uint8_t hpd_src;
uint8_t link_enc_hw_inst;
+ /* DIG link encoder ID. Used as index in link encoder resource pool.
+ * For links with fixed mapping to DIG, this is not changed after dc_link
+ * object creation.
+ */
+ enum engine_id eng_id;
bool test_pattern_enabled;
union compliance_test_state compliance_test_state;
@@ -144,6 +156,11 @@ struct dc_link {
struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
struct graphics_object_id link_id;
+ /* Endpoint type distinguishes display endpoints which do not have entries
+ * in the BIOS connector table from those that do. Helps when tracking link
+ * encoder to display endpoint assignments.
+ */
+ enum display_endpoint_type ep_type;
union ddi_channel_mapping ddi_channel_mapping;
struct connector_device_tag_info device_tag;
struct dpcd_caps dpcd_caps;
@@ -259,7 +276,6 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
-enum dc_status dc_link_reallocate_mst_payload(struct dc_link *link);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e747370fc43b..13dae7238a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -130,12 +130,24 @@ union stream_update_flags {
uint32_t raw;
};
+struct test_pattern {
+ enum dp_test_pattern type;
+ enum dp_test_pattern_color_space color_space;
+ struct link_training_settings const *p_link_settings;
+ unsigned char const *p_custom_pattern;
+ unsigned int cust_pattern_size;
+};
+
struct dc_stream_state {
// sink is deprecated, new code should not reference
// this pointer
struct dc_sink *sink;
struct dc_link *link;
+ /* For dynamic link encoder assignment, update the link encoder assigned to
+ * a stream via the volatile dc_state rather than the static dc_link.
+ */
+ struct link_encoder *link_enc;
struct dc_panel_patch sink_patches;
union display_content_support content_support;
struct dc_crtc_timing timing;
@@ -226,7 +238,8 @@ struct dc_stream_state {
bool apply_seamless_boot_optimization;
uint32_t stream_id;
- bool is_dsc_enabled;
+
+ struct test_pattern test_pattern;
union stream_update_flags update_flags;
bool has_non_synchronizable_pclk;
@@ -264,6 +277,8 @@ struct dc_stream_update {
struct dc_dsc_config *dsc_config;
struct dc_transfer_func *func_shaper;
struct dc_3dlut *lut3d_func;
+
+ struct test_pattern *pending_test_pattern;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 80757a0ea7c6..432754eaf10b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -113,6 +113,7 @@ struct dc_context {
struct gpio_service *gpio_service;
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
+ uint32_t dc_edp_id_count;
uint64_t fbc_gpu_addr;
struct dc_dmub_srv *dmub_srv;
@@ -687,7 +688,8 @@ enum dc_psr_state {
PSR_STATE5,
PSR_STATE5a,
PSR_STATE5b,
- PSR_STATE5c
+ PSR_STATE5c,
+ PSR_STATE_INVALID = 0xFF
};
struct psr_config {
@@ -934,4 +936,19 @@ enum dc_psr_version {
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
+/* Possible values of display_endpoint_id.endpoint */
+enum display_endpoint_type {
+ DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
+ DISPLAY_ENDPOINT_UNKNOWN = -1
+};
+
+/* Extends graphics_object_id with an additional member 'ep_type' for
+ * distinguishing between physical endpoints (with entries in BIOS connector table) and
+ * logical endpoints.
+ */
+struct display_endpoint_id {
+ struct graphics_object_id link_id;
+ enum display_endpoint_type ep_type;
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 4e87e70237e3..874b132fe1d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
if (abm_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 4f864501e046..8cd841320ded 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -1133,7 +1133,7 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1154,7 +1154,7 @@ struct dmcu *dcn20_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1175,7 +1175,7 @@ struct dmcu *dcn21_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 15ed09b7a452..5e99553fcdd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -80,19 +80,26 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
{
struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
- uint32_t raw_state;
+ uint32_t raw_state = 0;
+ uint32_t retry_count = 0;
enum dmub_status status;
- // Send gpint command and wait for ack
- status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
-
- if (status == DMUB_STATUS_OK) {
- // GPINT was executed, get response
- dmub_srv_get_gpint_response(srv, &raw_state);
- *state = convert_psr_state(raw_state);
- } else
- // Return invalid state when GPINT times out
- *state = 0xFF;
+ do {
+ // Send gpint command and wait for ack
+ status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+ if (status == DMUB_STATUS_OK) {
+ // GPINT was executed, get response
+ dmub_srv_get_gpint_response(srv, &raw_state);
+ *state = convert_psr_state(raw_state);
+ } else
+ // Return invalid state when GPINT times out
+ *state = PSR_STATE_INVALID;
+
+ // Assert if max retry hit
+ if (retry_count >= 1000)
+ ASSERT(0);
+ } while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
}
/*
@@ -277,6 +284,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->debug.u32All = 0;
copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR;
copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1;
+ copy_settings_data->fec_enable_status = (link->fec_state == dc_link_fec_enabled);
+ copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 804092f81f85..5ddeee96bf23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -48,6 +48,7 @@
#include "stream_encoder.h"
#include "link_encoder.h"
#include "link_hwss.h"
+#include "dc_link_dp.h"
#include "clock_source.h"
#include "clk_mgr.h"
#include "abm.h"
@@ -1694,6 +1695,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ DC_LOGGER_INIT();
+
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
get_edp_links(dc, edp_links, &edp_num);
@@ -1714,8 +1717,11 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
/* Set optimization flag on eDP stream*/
if (edp_stream_num && edp_link->link_status.link_active) {
edp_stream = edp_streams[0];
- edp_stream->apply_edp_fast_boot_optimization = true;
- can_apply_edp_fast_boot = true;
+ can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing);
+ edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot;
+ if (can_apply_edp_fast_boot)
+ DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n");
+
break;
}
}
@@ -1846,8 +1852,7 @@ void dce110_set_safe_displaymarks(
******************************************************************************/
static void set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, unsigned int vmin, unsigned int vmax,
- unsigned int vmid, unsigned int vmid_frame_number)
+ int num_pipes, struct dc_crtc_timing_adjust adjust)
{
int i = 0;
struct drr_params params = {0};
@@ -1856,8 +1861,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
// Note DRR trigger events are generated regardless of whether num frames met.
unsigned int num_frames = 2;
- params.vertical_total_max = vmax;
- params.vertical_total_min = vmin;
+ params.vertical_total_max = adjust.v_total_max;
+ params.vertical_total_min = adjust.v_total_min;
/* TODO: If multiple pipes are to be supported, you need
* some GSL stuff. Static screen triggers may be programmed differently
@@ -1867,7 +1872,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
pipe_ctx[i]->stream_res.tg->funcs->set_drr(
pipe_ctx[i]->stream_res.tg, &params);
- if (vmax != 0 && vmin != 0)
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
event_triggers, num_frames);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 612450f99278..725d92e40cd3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -526,7 +526,7 @@ static struct output_pixel_processor *dce80_opp_create(
return &opp->base;
}
-struct dce_aux *dce80_aux_engine_create(
+static struct dce_aux *dce80_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -564,7 +564,7 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
-struct dce_i2c_hw *dce80_i2c_hw_create(
+static struct dce_i2c_hw *dce80_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
{
@@ -580,7 +580,7 @@ struct dce_i2c_hw *dce80_i2c_hw_create(
return dce_i2c_hw;
}
-struct dce_i2c_sw *dce80_i2c_sw_create(
+static struct dce_i2c_sw *dce80_i2c_sw_create(
struct dc_context *ctx)
{
struct dce_i2c_sw *dce_i2c_sw =
@@ -714,7 +714,7 @@ static const struct encoder_feature_support link_enc_feature = {
.flags.bits.IS_TPS3_CAPABLE = true
};
-struct link_encoder *dce80_link_encoder_create(
+static struct link_encoder *dce80_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dce110_link_encoder *enc110 =
@@ -753,7 +753,7 @@ static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_d
return &panel_cntl->base;
}
-struct clock_source *dce80_clock_source_create(
+static struct clock_source *dce80_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
enum clock_source_id id,
@@ -777,7 +777,7 @@ struct clock_source *dce80_clock_source_create(
return NULL;
}
-void dce80_clock_source_destroy(struct clock_source **clk_src)
+static void dce80_clock_source_destroy(struct clock_source **clk_src)
{
kfree(TO_DCE110_CLK_SRC(*clk_src));
*clk_src = NULL;
@@ -867,7 +867,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-bool dce80_validate_bandwidth(
+static bool dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
bool fast_validate)
@@ -912,7 +912,7 @@ static bool dce80_validate_surface_sets(
return true;
}
-enum dc_status dce80_validate_global(
+static enum dc_status dce80_validate_global(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 9eb33eae0e81..7c939c0a977b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1893,7 +1893,7 @@ uint64_t reduceSizeAndFraction(
num = *numerator;
denom = *denominator;
for (i = 0; i < count; i++) {
- uint32_t num_reminder, denom_reminder;
+ uint32_t num_remainder, denom_remainder;
uint64_t num_result, denom_result;
if (checkUint32Bounary &&
num <= max_int32 && denom <= max_int32) {
@@ -1901,13 +1901,13 @@ uint64_t reduceSizeAndFraction(
break;
}
do {
- num_result = div_u64_rem(num, prime_numbers[i], &num_reminder);
- denom_result = div_u64_rem(denom, prime_numbers[i], &denom_reminder);
- if (num_reminder == 0 && denom_reminder == 0) {
+ num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
+ denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
+ if (num_remainder == 0 && denom_remainder == 0) {
num = num_result;
denom = denom_result;
}
- } while (num_reminder == 0 && denom_reminder == 0);
+ } while (num_remainder == 0 && denom_remainder == 0);
}
*numerator = num;
*denominator = denom;
@@ -3271,8 +3271,7 @@ void dcn10_optimize_bandwidth(
}
void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, unsigned int vmin, unsigned int vmax,
- unsigned int vmid, unsigned int vmid_frame_number)
+ int num_pipes, struct dc_crtc_timing_adjust adjust)
{
int i = 0;
struct drr_params params = {0};
@@ -3281,11 +3280,10 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
// Note DRR trigger events are generated regardless of whether num frames met.
unsigned int num_frames = 2;
- params.vertical_total_max = vmax;
- params.vertical_total_min = vmin;
- params.vertical_total_mid = vmid;
- params.vertical_total_mid_frame_num = vmid_frame_number;
-
+ params.vertical_total_max = adjust.v_total_max;
+ params.vertical_total_min = adjust.v_total_min;
+ params.vertical_total_mid = adjust.v_total_mid;
+ params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
/* TODO: If multiple pipes are to be supported, you need
* some GSL stuff. Static screen triggers may be programmed differently
* as well.
@@ -3293,7 +3291,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
for (i = 0; i < num_pipes; i++) {
pipe_ctx[i]->stream_res.tg->funcs->set_drr(
pipe_ctx[i]->stream_res.tg, &params);
- if (vmax != 0 && vmin != 0)
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx[i]->stream_res.tg,
event_triggers, num_frames);
@@ -3981,3 +3979,19 @@ void dcn10_get_clock(struct dc *dc,
dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
}
+
+void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
+{
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+ hubp->funcs->hubp_read_state(hubp);
+
+ if (!s->blank_en)
+ dcc_en_bits[i] = s->dcc_en ? 1 : 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index e0800cd1cc02..37bec421fde8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -1,5 +1,5 @@
/*
-* Copyright 2016 Advanced Micro Devices, Inc.
+* Copyright 2016-2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -145,8 +145,7 @@ bool dcn10_dummy_display_power_gating(
struct dc_bios *dcb,
enum pipe_gating_control power_gating);
void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, unsigned int vmin, unsigned int vmax,
- unsigned int vmid, unsigned int vmid_frame_number);
+ int num_pipes, struct dc_crtc_timing_adjust adjust);
void dcn10_get_position(struct pipe_ctx **pipe_ctx,
int num_pipes,
struct crtc_position *position);
@@ -210,4 +209,6 @@ void dcn10_wait_for_pending_cleared(struct dc *dc,
void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
void dcn10_verify_allow_pstate_change_high(struct dc *dc);
+void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index 254300b06b43..d532c78ee764 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -79,6 +79,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_backlight_level = dce110_set_backlight_level,
.set_abm_immediate_disable = dce110_set_abm_immediate_disable,
.set_pipe = dce110_set_pipe,
+ .get_dcc_en_bits = dcn10_get_dcc_en_bits,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 6138f4887de7..677663cc7bff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -132,6 +132,22 @@ void optc1_setup_vertical_interrupt2(
}
/**
+ * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
+ * Start offset begins with vstartup and goes for x number of clocks,
+ * end offset starts from end of vupdate to x number of clocks.
+ */
+void optc1_set_vupdate_keepout(struct timing_generator *optc,
+ struct vupdate_keepout_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
+}
+
+/**
* program_timing_generator used by mode timing set
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
* Including SYNC. Call BIOS command table to program Timings.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 2529723beeb1..cabfe83fd634 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -194,6 +194,9 @@ struct dcn_optc_registers {
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index d079f4e491e5..f962b905e79e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -82,7 +82,7 @@ const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
.meta_chunk_size_kbytes = 2,
.writeback_chunk_size_kbytes = 2,
.line_buffer_size_bits = 589824,
- .max_line_buffer_lines = 32,
+ .max_line_buffer_lines = 12,
.IsLineBufferBppFixed = 0,
.LineBufferFixedBpp = -1,
.writeback_luma_buffer_size_kbytes = 12,
@@ -619,7 +619,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .use_max_lb = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -631,7 +630,6 @@ static const struct dc_debug_options debug_defaults_diags = {
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .use_max_lb = true
};
static void dcn10_dpp_destroy(struct dpp **dpp)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 62cc2651e00c..8774406120fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -112,7 +112,7 @@ struct dccg *dccg2_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
struct dccg *base;
if (dccg_dcn == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index bec7059f6d5d..a1318c31bcfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-17 Advanced Micro Devices, Inc.
+ * Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
- if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
- + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
- value = 1;
- } else
- value = 0;
+ if (pipe_dest->htotal != 0) {
+ if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ value = 1;
+ } else
+ value = 0;
+ }
+
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 7218ed9e43dc..b5bb613eed4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -95,6 +95,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
#endif
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .get_dcc_en_bits = dcn10_get_dcc_en_bits,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index fa013496e26b..2f9bfaeaba8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
} else {
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
- AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
-
+ AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
}
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index ea7eaf7d755f..3139d90017ee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -134,22 +134,6 @@ void optc2_set_gsl_window(struct timing_generator *optc,
OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y);
}
-/**
- * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
- * Start offset begins with vstartup and goes for x number of clocks,
- * end offset starts from end of vupdate to x number of clocks.
- */
-void optc2_set_vupdate_keepout(struct timing_generator *optc,
- const struct vupdate_keepout_params *params)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
- REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
- MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
- OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
-}
-
void optc2_set_gsl_source_select(
struct timing_generator *optc,
int group_idx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index e0a0a8a8e2c6..3dee2ec2a1bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -56,9 +56,6 @@
SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
- SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
- SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
- SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 2307b3517821..527e56c353cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -112,7 +112,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
- .max_line_buffer_lines = 32,
+ .max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
@@ -180,7 +180,7 @@ static struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
- .max_line_buffer_lines = 32,
+ .max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
@@ -1075,7 +1075,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .use_max_lb = true
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1092,7 +1091,6 @@ static const struct dc_debug_options debug_defaults_diags = {
.scl_reset_length10 = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = true,
- .use_max_lb = true
};
void dcn20_dpp_destroy(struct dpp **dpp)
@@ -1106,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
uint32_t inst)
{
struct dcn20_dpp *dpp =
- kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
if (!dpp)
return NULL;
@@ -1124,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
if (!ipp) {
BREAK_TO_DEBUGGER();
@@ -1141,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
- kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
if (!opp) {
BREAK_TO_DEBUGGER();
@@ -1158,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
if (!aux_engine)
return NULL;
@@ -1196,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
if (!dce_i2c_hw)
return NULL;
@@ -1209,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
struct mpc *dcn20_mpc_create(struct dc_context *ctx)
{
struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!mpc20)
return NULL;
@@ -1227,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!hubbub)
return NULL;
@@ -1255,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_KERNEL);
+ kzalloc(sizeof(struct optc), GFP_ATOMIC);
if (!tgn10)
return NULL;
@@ -1334,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
if (!clk_src)
return NULL;
@@ -1440,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
- kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
if (!dsc) {
BREAK_TO_DEBUGGER();
@@ -1574,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
- kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
if (!hubp2)
return NULL;
@@ -2203,10 +2201,11 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2;
break;
case PIXEL_ENCODING_YCBCR422:
- if (true) /* todo */
- pipes[pipe_cnt].dout.output_format = dm_s422;
- else
+ if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC &&
+ !res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.ycbcr422_simple)
pipes[pipe_cnt].dout.output_format = dm_n422;
+ else
+ pipes[pipe_cnt].dout.output_format = dm_s422;
pipes[pipe_cnt].dout.output_bpp = output_bpc * 2;
break;
default:
@@ -2218,7 +2217,7 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
/* todo: default max for now, until there is logic reflecting this in dc*/
- pipes[pipe_cnt].dout.output_bpc = 12;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
/*fill up the audio sample rate (unit in kHz)*/
get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
@@ -3396,7 +3395,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
if (!pp_smu)
return pp_smu;
@@ -4042,7 +4041,7 @@ struct resource_pool *dcn20_create_resource_pool(
struct dc *dc)
{
struct dcn20_resource_pool *pool =
- kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
+ kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index d3b643089603..8fccee5a3036 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -218,6 +218,8 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_pwm_u16_16;
+ cmd.abm_set_backlight.abm_set_backlight_data.version = DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1;
+ cmd.abm_set_backlight.abm_set_backlight_data.panel_mask = (0x01 << panel_cntl->inst);
cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 074e2713257f..4f20a85ff396 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
#endif
.is_abm_supported = dcn21_is_abm_supported,
.set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .get_dcc_en_bits = dcn10_get_dcc_en_bits,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index e62f931fc269..8e3f1d0b4cc3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -55,7 +55,6 @@
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
-#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
#include "dcn20/dcn20_dccg.h"
#include "dcn21/dcn21_dccg.h"
@@ -115,7 +114,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.is_line_buffer_bpp_fixed = 0,
.line_buffer_fixed_bpp = 0,
.dcc_supported = true,
- .max_line_buffer_lines = 32,
+ .max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12,
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
index 705fbfc37502..8a32772d4e91 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
@@ -134,6 +134,7 @@
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index c4c14e9c1309..bf7fa98b39eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.hardware_release = dcn30_hardware_release,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .get_dcc_en_bits = dcn10_get_dcc_en_bits,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 263c2986682d..4a5fa23d8e7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -120,7 +120,7 @@ struct _vcs_dpi_ip_params_st dcn3_0_ip = {
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 0,
- .max_line_buffer_lines = 32,
+ .max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index bdad72140cbc..70b053d9ba40 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -98,6 +98,9 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .get_dcc_en_bits = dcn10_get_dcc_en_bits,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
};
static const struct hwseq_private_funcs dcn301_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 622a5bf9737f..5b54b7fc5105 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -116,7 +116,7 @@ struct _vcs_dpi_ip_params_st dcn3_01_ip = {
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 656640,
- .max_line_buffer_lines = 32,
+ .max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 0723e29fd42e..fc2dea243d1b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -101,7 +101,7 @@ struct _vcs_dpi_ip_params_st dcn3_02_ip = {
.dcc_supported = true,
.writeback_interface_buffer_size_kbytes = 90,
.writeback_line_buffer_buffer_size = 0,
- .max_line_buffer_lines = 32,
+ .max_line_buffer_lines = 12,
.writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
.writeback_chroma_buffer_size_kbytes = 8,
.writeback_chroma_line_buffer_width_pixels = 4,
@@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
.min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
.num_states = 1,
- .sr_exit_time_us = 12,
+ .sr_exit_time_us = 15.5,
.sr_enter_plus_exit_time_us = 20,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index f41db27c44de..7617fab9e1f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -147,6 +147,8 @@ bool dm_helpers_dp_write_dsc_enable(
bool dm_helpers_is_dp_sink_present(
struct dc_link *link);
+void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream);
+
enum dc_edid_status dm_helpers_read_local_edid(
struct dc_context *ctx,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 72423dc425dc..799bae229e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
- } else if (!surf_vert) {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
} else {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ unsigned int swath_height_l;
+ unsigned int swath_height_c;
+
+ if (!surf_vert) {
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
+ } else {
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
+ }
+
+ if (swath_height_l > 0)
+ log2_swath_height_l = dml_log2(swath_height_l);
+
+ if (req128_l && log2_swath_height_l > 0)
+ log2_swath_height_l -= 1;
+
+ if (swath_height_c > 0)
+ log2_swath_height_c = dml_log2(swath_height_c);
+
+ if (req128_c && log2_swath_height_c > 0)
+ log2_swath_height_c -= 1;
}
+
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 9c78446c3a9d..6a6d5970d1d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
- } else if (!surf_vert) {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
} else {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ unsigned int swath_height_l;
+ unsigned int swath_height_c;
+
+ if (!surf_vert) {
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
+ } else {
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
+ }
+
+ if (swath_height_l > 0)
+ log2_swath_height_l = dml_log2(swath_height_l);
+
+ if (req128_l && log2_swath_height_l > 0)
+ log2_swath_height_l -= 1;
+
+ if (swath_height_c > 0)
+ log2_swath_height_c = dml_log2(swath_height_c);
+
+ if (req128_c && log2_swath_height_c > 0)
+ log2_swath_height_c -= 1;
}
+
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index edd41d358291..dc1c81a6e377 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -277,13 +277,31 @@ static void handle_det_buf_split(
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
- } else if (!surf_vert) {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
} else {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ unsigned int swath_height_l;
+ unsigned int swath_height_c;
+
+ if (!surf_vert) {
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
+ } else {
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
+ }
+
+ if (swath_height_l > 0)
+ log2_swath_height_l = dml_log2(swath_height_l);
+
+ if (req128_l && log2_swath_height_l > 0)
+ log2_swath_height_l -= 1;
+
+ if (swath_height_c > 0)
+ log2_swath_height_c = dml_log2(swath_height_c);
+
+ if (req128_c && log2_swath_height_c > 0)
+ log2_swath_height_c -= 1;
}
+
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 0f14f205ebe5..04601a767a8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
- } else if (!surf_vert) {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
} else {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ unsigned int swath_height_l;
+ unsigned int swath_height_c;
+
+ if (!surf_vert) {
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
+ } else {
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
+ }
+
+ if (swath_height_l > 0)
+ log2_swath_height_l = dml_log2(swath_height_l);
+
+ if (req128_l && log2_swath_height_l > 0)
+ log2_swath_height_l -= 1;
+
+ if (swath_height_c > 0)
+ log2_swath_height_c = dml_log2(swath_height_c);
+
+ if (req128_c && log2_swath_height_c > 0)
+ log2_swath_height_c -= 1;
}
+
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 098d6433f7f3..1f7b6ddf3020 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -226,7 +226,7 @@ void dml_log_pipe_params(
dml_print("DML PARAMS: PIPE [%d] DISPLAY OUTPUT PARAMS:\n", i);
dml_print("DML PARAMS: output_type = %d\n", dout->output_type);
dml_print("DML PARAMS: output_format = %d\n", dout->output_format);
- dml_print("DML PARAMS: output_bpc = %d\n", dout->output_bpc);
+ dml_print("DML PARAMS: dsc_input_bpc = %d\n", dout->dsc_input_bpc);
dml_print("DML PARAMS: output_bpp = %3.4f\n", dout->output_bpp);
dml_print("DML PARAMS: dp_lanes = %d\n", dout->dp_lanes);
dml_print("DML PARAMS: dsc_enable = %d\n", dout->dsc_enable);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 0c5128187e08..2ece3690bfa3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -164,7 +164,7 @@ struct _vcs_dpi_ip_params_st {
double writeback_max_vscl_ratio;
double writeback_min_hscl_ratio;
double writeback_min_vscl_ratio;
- double maximum_dsc_bits_per_component;
+ unsigned int maximum_dsc_bits_per_component;
unsigned int writeback_max_hscl_taps;
unsigned int writeback_max_vscl_taps;
unsigned int writeback_line_buffer_luma_buffer_size;
@@ -292,10 +292,10 @@ struct writeback_st {
struct _vcs_dpi_display_output_params_st {
int dp_lanes;
double output_bpp;
+ unsigned int dsc_input_bpc;
int dsc_enable;
int wb_enable;
int num_active_wb;
- int output_bpc;
int output_type;
int is_virtual;
int output_format;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 94036a9612cf..2a967458065b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -471,7 +471,13 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
dout->dsc_slices;
- mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpc;
+ if (!dout->dsc_input_bpc) {
+ mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+ ip->maximum_dsc_bits_per_component;
+ } else {
+ mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+ dout->dsc_input_bpc;
+ }
mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
mode_lib->vba.ActiveWritebacksPerPlane[mode_lib->vba.NumberOfActivePlanes] =
dout->num_active_wb;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 4c3e9cc30167..414da64f5734 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -344,13 +344,31 @@ static void handle_det_buf_split(
if (surf_linear) {
log2_swath_height_l = 0;
log2_swath_height_c = 0;
- } else if (!surf_vert) {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
} else {
- log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
- log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ unsigned int swath_height_l;
+ unsigned int swath_height_c;
+
+ if (!surf_vert) {
+ swath_height_l = rq_param->misc.rq_l.blk256_height;
+ swath_height_c = rq_param->misc.rq_c.blk256_height;
+ } else {
+ swath_height_l = rq_param->misc.rq_l.blk256_width;
+ swath_height_c = rq_param->misc.rq_c.blk256_width;
+ }
+
+ if (swath_height_l > 0)
+ log2_swath_height_l = dml_log2(swath_height_l);
+
+ if (req128_l && log2_swath_height_l > 0)
+ log2_swath_height_l -= 1;
+
+ if (swath_height_c > 0)
+ log2_swath_height_c = dml_log2(swath_height_c);
+
+ if (req128_c && log2_swath_height_c > 0)
+ log2_swath_height_c -= 1;
}
+
rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index eb1a19bf0d81..81b92f20d5b6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -118,6 +118,27 @@ struct resource_funcs {
display_e2e_pipe_params_st *pipes,
bool fast_validate);
+ /*
+ * Algorithm for assigning available link encoders to links.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ */
+ void (*link_encs_assign)(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count);
+ /*
+ * Unassign a link encoder from a stream.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ */
+ void (*link_enc_unassign)(
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
enum dc_status (*validate_global)(
struct dc *dc,
struct dc_state *context);
@@ -358,6 +379,12 @@ struct resource_context {
uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
+ /* A table/array of encoder-to-link assignments. One entry per stream.
+ * Indexed by stream index in dc_state.
+ */
+ struct link_enc_assignment link_enc_assignments[MAX_PIPES];
+ /* List of available link encoders. Uses engine ID as encoder identifier. */
+ enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_mpc_3dlut_acquired[MAX_PIPES];
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index b970a32177af..3ae05c96d557 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -52,6 +52,10 @@ bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing);
+bool decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw);
+
void decide_link_settings(
struct dc_stream_state *stream,
struct dc_link_settings *link_setting);
@@ -71,6 +75,8 @@ void detect_edp_sink_caps(struct dc_link *link);
bool is_dp_active_dongle(const struct dc_link *link);
+bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing);
+
void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
@@ -86,5 +92,7 @@ bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
+bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
+
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 3a29f379d0c8..5dc8d02b40c3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -262,14 +262,9 @@ struct clk_mgr_funcs {
/* Get current memclk states from PMFW, update relevant structures */
void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr);
-};
-
-struct dpm_clocks;
-struct wartermarks;
-struct smu_watermark_set {
- struct watermarks *wm_set;
- union large_integer mc_address;
+ /* Get SMU present */
+ bool (*is_smu_present)(struct clk_mgr *clk_mgr);
};
struct clk_mgr {
@@ -283,7 +278,6 @@ struct clk_mgr {
struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
struct pp_smu_wm_range_sets ranges;
- struct smu_watermark_set smu_wm_set;
};
/* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 346dcd87dc10..80e1a32bc63d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -29,6 +29,7 @@
#include "mem_input.h"
#define OPP_ID_INVALID 0xf
+#define MAX_TTU 0xffffff
enum cursor_pitch {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 7f5acd8fb918..80bc99500645 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -187,4 +187,17 @@ struct link_encoder_funcs {
struct link_encoder *enc);
};
+/*
+ * Used to track assignments of links (display endpoints) to link encoders.
+ *
+ * Entry in link_enc_assignments table in struct resource_context.
+ * Entries only marked valid once encoder assigned to a link and invalidated once unassigned.
+ * Uses engine ID as identifier since PHY ID not relevant for USB4 DPIA endpoint.
+ */
+struct link_enc_assignment {
+ bool valid;
+ struct display_endpoint_id ep_id;
+ enum engine_id eng_id;
+};
+
#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 2fedfcac6705..1d5853c95448 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -118,8 +118,7 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx,
enum vline_select vline);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
- unsigned int vmin, unsigned int vmax,
- unsigned int vmid, unsigned int vmid_frame_number);
+ struct dc_crtc_timing_adjust adjust);
void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
int num_pipes,
const struct dc_static_screen_params *events);
@@ -218,6 +217,8 @@ struct hw_sequencer_funcs {
void (*set_pipe)(struct pipe_ctx *pipe_ctx);
+ void (*get_dcc_en_bits)(struct dc *dc, int *dcc_en_bits);
+
/* Idle Optimization Related */
bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
new file mode 100644
index 000000000000..7d36e55f3097
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INC_LINK_ENC_CFG_H_
+#define DC_INC_LINK_ENC_CFG_H_
+
+/* This module implements functionality for dynamically assigning DIG link
+ * encoder resources to display endpoints (links).
+ */
+
+#include "core_types.h"
+
+/*
+ * Initialise link encoder resource tracking.
+ */
+void link_enc_cfg_init(
+ struct dc *dc,
+ struct dc_state *state);
+
+/*
+ * Algorithm for assigning available DIG link encoders to streams.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ *
+ * Loop over all streams twice:
+ * a) First assign encoders to unmappable endpoints.
+ * b) Then assign encoders to mappable endpoints.
+ */
+void link_enc_cfg_link_encs_assign(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count);
+
+/*
+ * Unassign a link encoder from a stream.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ */
+void link_enc_cfg_link_enc_unassign(
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
+/*
+ * Check whether the transmitter driven by a link encoder is a mappable
+ * endpoint.
+ */
+bool link_enc_cfg_is_transmitter_mappable(
+ struct dc_state *state,
+ struct link_encoder *link_enc);
+
+/* Return link using DIG link encoder resource. NULL if unused. */
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+ struct dc_state *state,
+ enum engine_id eng_id);
+
+/* Return DIG link encoder used by link. NULL if unused. */
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
+ struct dc_state *state,
+ struct dc_link *link);
+
+#endif /* DC_INC_LINK_ENC_CFG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 6ee9dd833b85..1a5be2792055 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -187,6 +187,10 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
@@ -205,6 +209,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define SRI_DMUB(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
@@ -220,7 +227,19 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
-
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI_DMUB(reg1),\
+ .enable_mask = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ reg1 ## __ ## mask1 ## _MASK,\
+ ~reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI_DMUB(reg2),\
+ .ack_mask = \
+ reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -282,6 +301,13 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vline0_irq_info_funcs\
}
+#define dmub_trace_int_entry()\
+ [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+ IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+ .funcs = &dmub_trace_irq_info_funcs\
+ }
+
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
@@ -400,6 +426,7 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
vline0_int_entry(3),
vline0_int_entry(4),
vline0_int_entry(5),
+ dmub_trace_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn21 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
index 4ec6f6ad8c48..914ce2ce1c2f 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
@@ -215,6 +215,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define SRI_DMUB(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
@@ -230,7 +233,19 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack_value = \
block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
-
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI_DMUB(reg1),\
+ .enable_mask = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ reg1 ## __ ## mask1 ## _MASK,\
+ ~reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI_DMUB(reg2),\
+ .ack_mask = \
+ reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -284,6 +299,13 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.funcs = &vline0_irq_info_funcs\
}
+#define dmub_trace_int_entry()\
+ [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+ IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+ .funcs = &dmub_trace_irq_info_funcs\
+ }
+
#define dummy_irq_entry() \
{\
.funcs = &dummy_irq_info_funcs\
@@ -398,6 +420,7 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
vline0_int_entry(3),
vline0_int_entry(4),
vline0_int_entry(5),
+ dmub_trace_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn30 = {
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
index 2313a5664f44..40fd34fb1d5e 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
@@ -50,6 +50,8 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT:
+ return DC_IRQ_SOURCE_DMCUB_OUTBOX0;
case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
return DC_IRQ_SOURCE_DC1_VLINE0;
case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
@@ -166,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.set = NULL,
.ack = NULL
@@ -181,6 +188,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define SRI_DMUB(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
.enable_reg = SRI(reg1, block, reg_num),\
@@ -193,7 +203,26 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
.ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
.ack_value = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+#define dmub_trace_int_entry()\
+ [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+ IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+ DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+ .funcs = &dmub_trace_irq_info_funcs\
+ }
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI_DMUB(reg1),\
+ .enable_mask = \
+ reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ reg1 ## __ ## mask1 ## _MASK,\
+ ~reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI_DMUB(reg2),\
+ .ack_mask = \
+ reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ reg2 ## __ ## mask2 ## _MASK \
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -348,6 +377,7 @@ static const struct irq_source_info irq_source_info_dcn302[DAL_IRQ_SOURCES_NUMBE
vline0_int_entry(2),
vline0_int_entry(3),
vline0_int_entry(4),
+ dmub_trace_int_entry(),
};
static const struct irq_service_funcs irq_service_funcs_dcn302 = {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index f07b348f7c29..4195ff10c514 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xc29b1734b
+#define DMUB_FW_VERSION_GIT_HASH 0x23db9b126
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 56
+#define DMUB_FW_VERSION_REVISION 62
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -121,6 +121,16 @@
#define TRACE_BUFFER_ENTRY_OFFSET 16
/**
+ * ABM backlight control version legacy
+ */
+#define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_UNKNOWN 0x0
+
+/**
+ * ABM backlight control version with multi edp support
+ */
+#define DMUB_CMD_ABM_SET_BACKLIGHT_VERSION_1 0x1
+
+/**
* Physical framebuffer address location, 64-bit.
*/
#ifndef PHYSICAL_ADDRESS_LOC
@@ -202,12 +212,7 @@ struct dmub_feature_caps {
* Max PSR version supported by FW.
*/
uint8_t psr;
-#ifndef TRIM_FAMS
- uint8_t fw_assisted_mclk_switch;
- uint8_t reserved[6];
-#else
uint8_t reserved[7];
-#endif
};
#if defined(__cplusplus)
@@ -532,10 +537,6 @@ enum dmub_cmd_type {
* Command type used for OUTBOX1 notification enable
*/
DMUB_CMD__OUTBOX1_ENABLE = 71,
-#ifndef TRIM_FAMS
- DMUB_CMD__FW_ASSISTED_MCLK_SWITCH = 76,
-#endif
-
/**
* Command type used for all VBIOS interface commands.
*/
@@ -1115,13 +1116,6 @@ enum dmub_cmd_psr_type {
DMUB_CMD__PSR_FORCE_STATIC = 5,
};
-#ifndef TRIM_FAMS
-enum dmub_cmd_fams_type {
- DMUB_CMD__FAMS_SETUP_FW_CTRL = 0,
- DMUB_CMD__FAMS_DRR_UPDATE = 1,
-};
-#endif
-
/**
* PSR versions.
*/
@@ -1245,6 +1239,19 @@ struct dmub_cmd_psr_copy_settings_data {
* Length of each horizontal line in us.
*/
uint32_t line_time_in_us;
+ /**
+ * FEC enable status in driver
+ */
+ uint8_t fec_enable_status;
+ /**
+ * FEC re-enable delay when PSR exit.
+ * unit is 100us, range form 0~255(0xFF).
+ */
+ uint8_t fec_enable_delay_in100us;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad3[2];
};
/**
@@ -1628,6 +1635,23 @@ struct dmub_cmd_abm_set_backlight_data {
* Requested backlight level from user.
*/
uint32_t backlight_user_level;
+
+ /**
+ * Backlight data version.
+ */
+ uint8_t version;
+
+ /**
+ * Panel Control HW instance mask.
+ * Bit 0 is Panel Control HW instance 0.
+ * Bit 1 is Panel Control HW instance 1.
+ */
+ uint8_t panel_mask;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -1791,24 +1815,6 @@ struct dmub_rb_cmd_drr_update {
struct dmub_optc_state dmub_optc_state_req;
};
-#ifndef TRIM_FAMS
-struct dmub_cmd_fw_assisted_mclk_switch_pipe_data {
- uint32_t pix_clk_100hz;
- uint32_t min_refresh_in_uhz;
- uint32_t max_ramp_step;
-};
-
-struct dmub_cmd_fw_assisted_mclk_switch_config {
- uint32_t fams_enabled;
- struct dmub_cmd_fw_assisted_mclk_switch_pipe_data pipe_data[DMUB_MAX_STREAMS];
-};
-
-struct dmub_rb_cmd_fw_assisted_mclk_switch {
- struct dmub_cmd_header header;
- struct dmub_cmd_fw_assisted_mclk_switch_config config_data;
-};
-#endif
-
/**
* Data passed from driver to FW in a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
*/
@@ -1951,9 +1957,6 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_query_feature_caps query_feature_caps;
struct dmub_rb_cmd_drr_update drr_update;
-#ifndef TRIM_FAMS
- struct dmub_rb_cmd_fw_assisted_mclk_switch fw_assisted_mclk_switch;
-#endif
/**
* Definition of a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
*/
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 8ba0a9e2da54..1cbb125b4063 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -415,6 +415,12 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
+ if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
+ !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
+ ASSERT(0);
+ return DMUB_STATUS_INVALID;
+ }
+
dmub->fb_base = params->fb_base;
dmub->fb_offset = params->fb_offset;
dmub->psp_version = params->psp_version;
@@ -422,101 +428,89 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
- if (inst_fb && data_fb) {
- cw0.offset.quad_part = inst_fb->gpu_addr;
- cw0.region.base = DMUB_CW0_BASE;
- cw0.region.top = cw0.region.base + inst_fb->size - 1;
-
- cw1.offset.quad_part = stack_fb->gpu_addr;
- cw1.region.base = DMUB_CW1_BASE;
- cw1.region.top = cw1.region.base + stack_fb->size - 1;
-
- if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
- /**
- * Read back all the instruction memory so we don't hang the
- * DMCUB when backdoor loading if the write from x86 hasn't been
- * flushed yet. This only occurs in backdoor loading.
- */
- dmub_flush_buffer_mem(inst_fb);
- dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
- }
-
- }
-
- if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
- fw_state_fb && scratch_mem_fb) {
- cw2.offset.quad_part = data_fb->gpu_addr;
- cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
- cw2.region.top = cw2.region.base + data_fb->size;
-
- cw3.offset.quad_part = bios_fb->gpu_addr;
- cw3.region.base = DMUB_CW3_BASE;
- cw3.region.top = cw3.region.base + bios_fb->size;
+ cw0.offset.quad_part = inst_fb->gpu_addr;
+ cw0.region.base = DMUB_CW0_BASE;
+ cw0.region.top = cw0.region.base + inst_fb->size - 1;
- cw4.offset.quad_part = mail_fb->gpu_addr;
- cw4.region.base = DMUB_CW4_BASE;
- cw4.region.top = cw4.region.base + mail_fb->size;
+ cw1.offset.quad_part = stack_fb->gpu_addr;
+ cw1.region.base = DMUB_CW1_BASE;
+ cw1.region.top = cw1.region.base + stack_fb->size - 1;
+ if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
/**
- * Doubled the mailbox region to accomodate inbox and outbox.
- * Note: Currently, currently total mailbox size is 16KB. It is split
- * equally into 8KB between inbox and outbox. If this config is
- * changed, then uncached base address configuration of outbox1
- * has to be updated in funcs->setup_out_mailbox.
+ * Read back all the instruction memory so we don't hang the
+ * DMCUB when backdoor loading if the write from x86 hasn't been
+ * flushed yet. This only occurs in backdoor loading.
*/
- inbox1.base = cw4.region.base;
- inbox1.top = cw4.region.base + DMUB_RB_SIZE;
- outbox1.base = inbox1.top;
- outbox1.top = cw4.region.top;
+ dmub_flush_buffer_mem(inst_fb);
+ dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
+ }
- cw5.offset.quad_part = tracebuff_fb->gpu_addr;
- cw5.region.base = DMUB_CW5_BASE;
- cw5.region.top = cw5.region.base + tracebuff_fb->size;
+ cw2.offset.quad_part = data_fb->gpu_addr;
+ cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
+ cw2.region.top = cw2.region.base + data_fb->size;
- outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
- outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
+ cw3.offset.quad_part = bios_fb->gpu_addr;
+ cw3.region.base = DMUB_CW3_BASE;
+ cw3.region.top = cw3.region.base + bios_fb->size;
+ cw4.offset.quad_part = mail_fb->gpu_addr;
+ cw4.region.base = DMUB_CW4_BASE;
+ cw4.region.top = cw4.region.base + mail_fb->size;
- cw6.offset.quad_part = fw_state_fb->gpu_addr;
- cw6.region.base = DMUB_CW6_BASE;
- cw6.region.top = cw6.region.base + fw_state_fb->size;
+ /**
+ * Doubled the mailbox region to accomodate inbox and outbox.
+ * Note: Currently, currently total mailbox size is 16KB. It is split
+ * equally into 8KB between inbox and outbox. If this config is
+ * changed, then uncached base address configuration of outbox1
+ * has to be updated in funcs->setup_out_mailbox.
+ */
+ inbox1.base = cw4.region.base;
+ inbox1.top = cw4.region.base + DMUB_RB_SIZE;
+ outbox1.base = inbox1.top;
+ outbox1.top = cw4.region.top;
- dmub->fw_state = fw_state_fb->cpu_addr;
+ cw5.offset.quad_part = tracebuff_fb->gpu_addr;
+ cw5.region.base = DMUB_CW5_BASE;
+ cw5.region.top = cw5.region.base + tracebuff_fb->size;
- dmub->scratch_mem_fb = *scratch_mem_fb;
+ outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
+ outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
- if (dmub->hw_funcs.setup_windows)
- dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
- &cw5, &cw6);
+ cw6.offset.quad_part = fw_state_fb->gpu_addr;
+ cw6.region.base = DMUB_CW6_BASE;
+ cw6.region.top = cw6.region.base + fw_state_fb->size;
- if (dmub->hw_funcs.setup_outbox0)
- dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
+ dmub->fw_state = fw_state_fb->cpu_addr;
- if (dmub->hw_funcs.setup_mailbox)
- dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
- if (dmub->hw_funcs.setup_out_mailbox)
- dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
- }
+ dmub->scratch_mem_fb = *scratch_mem_fb;
- if (mail_fb) {
- dmub_memset(&rb_params, 0, sizeof(rb_params));
- rb_params.ctx = dmub;
- rb_params.base_address = mail_fb->cpu_addr;
- rb_params.capacity = DMUB_RB_SIZE;
+ if (dmub->hw_funcs.setup_windows)
+ dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
- dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+ if (dmub->hw_funcs.setup_outbox0)
+ dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
- // Initialize outbox1 ring buffer
- rb_params.ctx = dmub;
- rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
- rb_params.capacity = DMUB_RB_SIZE;
- dmub_rb_init(&dmub->outbox1_rb, &rb_params);
+ if (dmub->hw_funcs.setup_mailbox)
+ dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
+ if (dmub->hw_funcs.setup_out_mailbox)
+ dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
- }
+ dmub_memset(&rb_params, 0, sizeof(rb_params));
+ rb_params.ctx = dmub;
+ rb_params.base_address = mail_fb->cpu_addr;
+ rb_params.capacity = DMUB_RB_SIZE;
+ dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+
+ // Initialize outbox1 ring buffer
+ rb_params.ctx = dmub;
+ rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
+ rb_params.capacity = DMUB_RB_SIZE;
+ dmub_rb_init(&dmub->outbox1_rb, &rb_params);
dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params));
outbox0_rb_params.ctx = dmub;
- outbox0_rb_params.base_address = (void *)((uint64_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
+ outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64);
dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params);
@@ -653,6 +647,8 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub,
dmub->hw_funcs.set_gpint(dmub, reg);
for (i = 0; i < timeout_us; ++i) {
+ udelay(1);
+
if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
return DMUB_STATUS_OK;
}
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 21bbee17c527..571fcf23cea9 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -36,6 +36,9 @@
#define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
+#define DC_LOG_CURSOR(...) pr_debug("[CURSOR]:"__VA_ARGS__)
+#define DC_LOG_PFLIP(...) pr_debug("[PFLIP]:"__VA_ARGS__)
+#define DC_LOG_VBLANK(...) pr_debug("[VBLANK]:"__VA_ARGS__)
#define DC_LOG_HW_HOTPLUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
#define DC_LOG_HW_SET_MODE(...) DRM_DEBUG_KMS(__VA_ARGS__)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 5c67e12b2e55..ef742d95ef05 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -942,7 +942,7 @@ static void hermite_spline_eetf(struct fixed31_32 input_x,
static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x,
- const struct freesync_hdr_tf_params *fs_params,
+ const struct hdr_tm_params *fs_params,
struct calculate_buffer *cal_buffer)
{
uint32_t i;
@@ -2027,7 +2027,7 @@ rgb_user_alloc_fail:
static bool calculate_curve(enum dc_transfer_func_predefined trans,
struct dc_transfer_func_distributed_points *points,
struct pwl_float_data_ex *rgb_regamma,
- const struct freesync_hdr_tf_params *fs_params,
+ const struct hdr_tm_params *fs_params,
uint32_t sdr_ref_white_level,
struct calculate_buffer *cal_buffer)
{
@@ -2106,7 +2106,7 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct freesync_hdr_tf_params *fs_params,
+ const struct hdr_tm_params *fs_params,
struct calculate_buffer *cal_buffer)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 7563457e2ff4..2893abf48208 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -76,7 +76,7 @@ struct regamma_lut {
};
};
-struct freesync_hdr_tf_params {
+struct hdr_tm_params {
unsigned int sdr_white_level;
unsigned int min_content; // luminance in 1/10000 nits
unsigned int max_content; // luminance in nits
@@ -108,7 +108,7 @@ void precompute_de_pq(void);
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct freesync_hdr_tf_params *fs_params,
+ const struct hdr_tm_params *fs_params,
struct calculate_buffer *cal_buffer);
bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index e5f9d7704a63..3f4f44b44e6a 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -118,7 +118,7 @@ static unsigned int calc_duration_in_us_from_v_total(
return duration_in_us;
}
-static unsigned int calc_v_total_from_refresh(
+unsigned int mod_freesync_calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz)
{
@@ -280,10 +280,10 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Restore FreeSync */
in_out_vrr->adjust.v_total_min =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
/* BTR set to "active" so engage */
} else {
@@ -442,16 +442,16 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
if (update) {
if (in_out_vrr->fixed.fixed_active) {
in_out_vrr->adjust.v_total_min =
- calc_v_total_from_refresh(
+ mod_freesync_calc_v_total_from_refresh(
stream, in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
in_out_vrr->adjust.v_total_min;
} else {
in_out_vrr->adjust.v_total_min =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
}
}
@@ -543,8 +543,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
infopacket->sb[6] |= 0x02;
/* PB6 = [Bit 2 = FreeSync Active] */
- if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
- vrr->state == VRR_STATE_ACTIVE_FIXED)
+ if (vrr->state != VRR_STATE_DISABLED &&
+ vrr->state != VRR_STATE_UNSUPPORTED)
infopacket->sb[6] |= 0x04;
// For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
@@ -1082,10 +1082,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range >= MIN_REFRESH_RANGE) {
in_out_vrr->adjust.v_total_min =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
in_out_vrr->fixed.target_refresh_in_uhz =
@@ -1099,7 +1099,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
} else {
in_out_vrr->fixed.fixed_active = true;
in_out_vrr->adjust.v_total_min =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->fixed.target_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
in_out_vrr->adjust.v_total_min;
@@ -1206,10 +1206,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
/* Restore FreeSync */
if (in_out_vrr->btr.frame_counter == 0) {
in_out_vrr->adjust.v_total_min =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->max_refresh_in_uhz);
in_out_vrr->adjust.v_total_max =
- calc_v_total_from_refresh(stream,
+ mod_freesync_calc_v_total_from_refresh(stream,
in_out_vrr->min_refresh_in_uhz);
}
}
@@ -1267,6 +1267,21 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
return nominal_field_rate_in_uhz;
}
+unsigned long long mod_freesync_calc_field_rate_from_timing(
+ unsigned int vtotal, unsigned int htotal, unsigned int pix_clk)
+{
+ unsigned long long field_rate_in_uhz = 0;
+ unsigned int total = htotal * vtotal;
+
+ /* Calculate nominal field rate for stream, rounded up to nearest integer */
+ field_rate_in_uhz = pix_clk;
+ field_rate_in_uhz *= 1000000ULL;
+
+ field_rate_in_uhz = div_u64(field_rate_in_uhz, total);
+
+ return field_rate_in_uhz;
+}
+
bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
uint32_t max_refresh_cap_in_uhz,
uint32_t nominal_field_rate_in_uhz)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 20e554e771d1..68a6481d7f8f 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -53,7 +53,7 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->displays[i].adjust.disable) {
+ hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
is_auth_needed = 1;
break;
}
@@ -74,7 +74,7 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->displays[i].adjust.disable) {
+ hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
is_auth_needed = 1;
break;
}
@@ -314,6 +314,9 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
goto out;
}
+ /* save current encryption states to restore after next authentication */
+ mod_hdcp_save_current_encryption_states(hdcp);
+
/* reset existing authentication status */
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
@@ -360,6 +363,9 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
goto out;
}
+ /* save current encryption states to restore after next authentication */
+ mod_hdcp_save_current_encryption_states(hdcp);
+
/* stop current authentication */
status = reset_authentication(hdcp, output);
if (status != MOD_HDCP_STATUS_SUCCESS)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5c22cf7e6118..3ce91db560d1 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -331,6 +331,8 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(
struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct mod_hdcp *hdcp, uint8_t index);
+bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp);
+void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -339,8 +341,6 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status);
enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 73ca49f05bd3..2cbd931363bd 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -256,10 +256,12 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+ mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
&input->link_maintenance, &status,
- hdcp, "link_maintenance"))
- goto out;
+ hdcp, "link_maintenance");
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ mod_hdcp_save_current_encryption_states(hdcp);
out:
return status;
}
@@ -426,18 +428,21 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
- &input->bstatus_read, &status,
- hdcp, "bstatus_read"))
- goto out;
- if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
- &input->link_integrity_check, &status,
- hdcp, "link_integrity_check"))
- goto out;
- if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
- &input->reauth_request_check, &status,
- hdcp, "reauth_request_check"))
- goto out;
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+ &input->bstatus_read, &status,
+ hdcp, "bstatus_read");
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ mod_hdcp_execute_and_set(check_link_integrity_dp,
+ &input->link_integrity_check, &status,
+ hdcp, "link_integrity_check");
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+ &input->reauth_request_check, &status,
+ hdcp, "reauth_request_check");
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ mod_hdcp_save_current_encryption_states(hdcp);
out:
return status;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 24ab95b093f7..3dda8c1d83fc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -93,7 +93,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
}
break;
case H1_A45_AUTHENTICATED:
- if (input->link_maintenance != PASS) {
+ if (input->link_maintenance == FAIL) {
/* 1A-07: consider invalid ri' a failure */
/* 1A-07a: consider read ri' not returned a failure */
fail_and_restart_in_ms(0, &status, output);
@@ -243,8 +243,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
}
break;
case D1_A4_AUTHENTICATED:
- if (input->link_integrity_check != PASS ||
- input->reauth_request_check != PASS) {
+ if (input->link_integrity_check == FAIL ||
+ input->reauth_request_check == FAIL) {
/* 1A-07: restart hdcp on a link integrity failure */
fail_and_restart_in_ms(0, &status, output);
break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index a0895a7efda2..c1331facdcb4 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -565,10 +565,10 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
goto out;
}
- if (!process_rxstatus(hdcp, event_ctx, input, &status))
- goto out;
- if (event_ctx->rx_id_list_ready)
- goto out;
+ process_rxstatus(hdcp, event_ctx, input, &status);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ mod_hdcp_save_current_encryption_states(hdcp);
out:
return status;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index e738c7ae66ec..b0306ed6d6b4 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -245,8 +245,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
HDCP_FULL_DDC_TRACE(hdcp);
break;
case H2_A5_AUTHENTICATED:
- if (input->rxstatus_read != PASS ||
- input->reauth_request_check != PASS) {
+ if (input->rxstatus_read == FAIL ||
+ input->reauth_request_check == FAIL) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
@@ -562,11 +562,11 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
HDCP_FULL_DDC_TRACE(hdcp);
break;
case D2_A5_AUTHENTICATED:
- if (input->rxstatus_read != PASS ||
- input->reauth_request_check != PASS) {
+ if (input->rxstatus_read == FAIL ||
+ input->reauth_request_check == FAIL) {
fail_and_restart_in_ms(0, &status, output);
break;
- } else if (input->link_integrity_check_dp != PASS) {
+ } else if (input->link_integrity_check_dp == FAIL) {
if (hdcp->connection.hdcp2_retry_count >= 1)
adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
fail_and_restart_in_ms(0, &status, output);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 904ce9b88088..26f96c05e0ec 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -791,6 +791,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
hdcp->connection.is_hdcp2_revoked = 1;
status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
}
}
mutex_unlock(&psp->hdcp_context.mutex);
@@ -914,3 +916,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
return status;
}
+bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp)
+{
+ /* unsupported */
+ return true;
+}
+
+void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp)
+{
+ /* unsupported */
+}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index b64cd5bdc7b5..75a158a2514c 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -171,10 +171,15 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
unsigned long long mod_freesync_calc_nominal_field_rate(
const struct dc_stream_state *stream);
+unsigned long long mod_freesync_calc_field_rate_from_timing(
+ unsigned int vtotal, unsigned int htotal, unsigned int pix_clk);
+
bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
uint32_t max_refresh_cap_in_uhz,
uint32_t nominal_field_rate_in_uhz);
-
+unsigned int mod_freesync_calc_v_total_from_refresh(
+ const struct dc_stream_state *stream,
+ unsigned int refresh_in_uhz);
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index d223ed3be5d3..acbeada5215b 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -120,6 +120,12 @@ enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
+enum mod_hdcp_display_disable_option {
+ MOD_HDCP_DISPLAY_NOT_DISABLE = 0,
+ MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION,
+ MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
+};
+
struct mod_hdcp_ddc {
void *handle;
struct {
@@ -149,8 +155,8 @@ struct mod_hdcp_psp {
};
struct mod_hdcp_display_adjustment {
- uint8_t disable : 1;
- uint8_t reserved : 7;
+ uint8_t disable : 2;
+ uint8_t reserved : 6;
};
struct mod_hdcp_link_adjustment_hdcp1 {
@@ -255,8 +261,6 @@ struct mod_hdcp_config {
uint8_t index;
};
-struct mod_hdcp;
-
/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
size_t mod_hdcp_get_memory_size(void);
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
index 0102487a2c5f..f21554a1c86c 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
@@ -6955,6 +6955,12 @@
#define mmCP_CE_IB2_BASE_HI_BASE_IDX 1
#define mmCP_CE_IB2_BUFSZ 0x20cb
#define mmCP_CE_IB2_BUFSZ_BASE_IDX 1
+#define mmCP_IB1_BASE_LO 0x20cc
+#define mmCP_IB1_BASE_LO_BASE_IDX 1
+#define mmCP_IB1_BASE_HI 0x20cd
+#define mmCP_IB1_BASE_HI_BASE_IDX 1
+#define mmCP_IB1_BUFSZ 0x20ce
+#define mmCP_IB1_BUFSZ_BASE_IDX 1
#define mmCP_IB2_BASE_LO 0x20cf
#define mmCP_IB2_BASE_LO_BASE_IDX 1
#define mmCP_IB2_BASE_HI 0x20d0
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
index 4d2a1432c121..a827b0ff8905 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
@@ -25818,6 +25818,15 @@
//CP_CE_IB2_BUFSZ
#define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT 0x0
#define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK 0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT 0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK 0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT 0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK 0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT 0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK 0x000FFFFFL
//CP_IB2_BASE_LO
#define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT 0x2
#define CP_IB2_BASE_LO__IB2_BASE_LO_MASK 0xFFFFFFFCL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
index 4089cfa081f5..849450caca15 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
@@ -617,6 +617,22 @@
#define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L
#define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L
+//GCEA_ERR_STATUS
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS__SHIFT 0x0
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS__SHIFT 0x4
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS__SHIFT 0x8
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR__SHIFT 0xa
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS__SHIFT 0xb
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR__SHIFT 0xc
+#define GCEA_ERR_STATUS__FUE_FLAG__SHIFT 0xd
+#define GCEA_ERR_STATUS__SDP_RDRSP_STATUS_MASK 0x0000000FL
+#define GCEA_ERR_STATUS__SDP_WRRSP_STATUS_MASK 0x000000F0L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATASTATUS_MASK 0x00000300L
+#define GCEA_ERR_STATUS__SDP_RDRSP_DATAPARITY_ERROR_MASK 0x00000400L
+#define GCEA_ERR_STATUS__CLEAR_ERROR_STATUS_MASK 0x00000800L
+#define GCEA_ERR_STATUS__BUSY_ON_ERROR_MASK 0x00001000L
+#define GCEA_ERR_STATUS__FUE_FLAG_MASK 0x00002000L
+
// addressBlock: gc_gfxudec
//GRBM_GFX_INDEX
#define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index c1d7b1d0b952..47eb84598b96 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -1987,9 +1987,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
-#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct definition for 36bpp should be 2 for 36bpp(2:1)
#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
-#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct definition for 30bpp should be 1 for 36bpp(5:4)
#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 58364a8eb1f3..f2564ba21c0b 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -981,6 +981,40 @@ struct atom_display_controller_info_v4_2
uint8_t reserved3[8];
};
+struct atom_display_controller_info_v4_3
+{
+ struct atom_common_table_header table_header;
+ uint32_t display_caps;
+ uint32_t bootup_dispclk_10khz;
+ uint16_t dce_refclk_10khz;
+ uint16_t i2c_engine_refclk_10khz;
+ uint16_t dvi_ss_percentage; // in unit of 0.001%
+ uint16_t dvi_ss_rate_10hz;
+ uint16_t hdmi_ss_percentage; // in unit of 0.001%
+ uint16_t hdmi_ss_rate_10hz;
+ uint16_t dp_ss_percentage; // in unit of 0.001%
+ uint16_t dp_ss_rate_10hz;
+ uint8_t dvi_ss_mode; // enum of atom_spread_spectrum_mode
+ uint8_t hdmi_ss_mode; // enum of atom_spread_spectrum_mode
+ uint8_t dp_ss_mode; // enum of atom_spread_spectrum_mode
+ uint8_t ss_reserved;
+ uint8_t dfp_hardcode_mode_num; // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available
+ uint8_t dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available
+ uint8_t vga_hardcode_mode_num; // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+ uint8_t vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+ uint16_t dpphy_refclk_10khz;
+ uint16_t reserved2;
+ uint8_t dcnip_min_ver;
+ uint8_t dcnip_max_ver;
+ uint8_t max_disp_pipe_num;
+ uint8_t max_vbios_active_disp_pipe_num;
+ uint8_t max_ppll_num;
+ uint8_t max_disp_phy_num;
+ uint8_t max_aux_pairs;
+ uint8_t remotedisplayconfig;
+ uint8_t reserved3[8];
+};
+
struct atom_display_controller_info_v4_4 {
struct atom_common_table_header table_header;
uint32_t display_caps;
@@ -1043,7 +1077,9 @@ enum dce_info_caps_def
DCE_INFO_CAPS_DISABLE_DFP_DP_HBR2 =0x04,
// only for VBIOS
DCE_INFO_CAPS_ENABLE_INTERLAC_TIMING =0x08,
-
+ // only for VBIOS
+ DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE =0x20,
+ DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE = 0x40,
};
/*
@@ -3300,6 +3336,47 @@ enum atom_smu11_syspll3_1_clock_id {
SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK
};
+enum atom_smu12_syspll_id {
+ SMU12_SYSPLL0_ID = 0,
+ SMU12_SYSPLL1_ID = 1,
+ SMU12_SYSPLL2_ID = 2,
+ SMU12_SYSPLL3_0_ID = 3,
+ SMU12_SYSPLL3_1_ID = 4,
+};
+
+enum atom_smu12_syspll0_clock_id {
+ SMU12_SYSPLL0_SMNCLK_ID = 0, // SOCCLK
+ SMU12_SYSPLL0_SOCCLK_ID = 1, // SOCCLK
+ SMU12_SYSPLL0_MP0CLK_ID = 2, // MP0CLK
+ SMU12_SYSPLL0_MP1CLK_ID = 3, // MP1CLK
+ SMU12_SYSPLL0_MP2CLK_ID = 4, // MP2CLK
+ SMU12_SYSPLL0_VCLK_ID = 5, // VCLK
+ SMU12_SYSPLL0_LCLK_ID = 6, // LCLK
+ SMU12_SYSPLL0_DCLK_ID = 7, // DCLK
+ SMU12_SYSPLL0_ACLK_ID = 8, // ACLK
+ SMU12_SYSPLL0_ISPCLK_ID = 9, // ISPCLK
+ SMU12_SYSPLL0_SHUBCLK_ID = 10, // SHUBCLK
+};
+
+enum atom_smu12_syspll1_clock_id {
+ SMU12_SYSPLL1_DISPCLK_ID = 0, // DISPCLK
+ SMU12_SYSPLL1_DPPCLK_ID = 1, // DPPCLK
+ SMU12_SYSPLL1_DPREFCLK_ID = 2, // DPREFCLK
+ SMU12_SYSPLL1_DCFCLK_ID = 3, // DCFCLK
+};
+
+enum atom_smu12_syspll2_clock_id {
+ SMU12_SYSPLL2_Pre_GFXCLK_ID = 0, // Pre_GFXCLK
+};
+
+enum atom_smu12_syspll3_0_clock_id {
+ SMU12_SYSPLL3_0_FCLK_ID = 0, // FCLK
+};
+
+enum atom_smu12_syspll3_1_clock_id {
+ SMU12_SYSPLL3_1_UMCCLK_ID = 0, // UMCCLK
+};
+
struct atom_get_smu_clock_info_output_parameters_v3_1
{
union {
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h b/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
index e2bffcae273a..754170a86ea4 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
@@ -1132,5 +1132,7 @@
#define DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT 0x68
#define DCN_1_0__CTXID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT 6
+#define DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT 0x68 // DMCUB_IHC_outbox1_ready_int IHC_DMCUB_outbox1_ready_int_ack DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT DISP_INTERRUPT_STATUS_CONTINUE24 Level/Pulse
+#define DCN_1_0__CTXID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT 8
#endif // __IRQSRCS_DCN_1_0_H__
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index dd695817c481..353468667036 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -242,6 +242,9 @@ struct pp_display_clock_request;
struct pp_clock_levels_with_voltage;
struct pp_clock_levels_with_latency;
struct amd_pp_clocks;
+struct pp_smu_wm_range_sets;
+struct pp_smu_nv_clock_table;
+struct dpm_clocks;
struct amd_pm_funcs {
/* export for dpm on ci and si */
@@ -336,6 +339,17 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ int (*set_watermarks_for_clock_ranges)(void *handle,
+ struct pp_smu_wm_range_sets *ranges);
+ int (*display_disable_memory_clock_switch)(void *handle,
+ bool disable_memory_clock_switch);
+ int (*get_max_sustainable_clocks_by_dc)(void *handle,
+ struct pp_smu_nv_clock_table *max_clocks);
+ int (*get_uclk_dpm_states)(void *handle,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+ int (*get_dpm_clock_table)(void *handle,
+ struct dpm_clocks *clock_table);
};
struct metrics_table_header {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 0a6bb3311f0f..03581d5b1836 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -927,7 +927,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- bool swsmu = is_support_sw_smu(adev);
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
@@ -968,15 +967,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
- if (pp_funcs && pp_funcs->set_powergating_by_smu) {
- ret = (pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- }
- break;
case AMD_IP_BLOCK_TYPE_JPEG:
- if (swsmu)
- ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
- break;
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
if (pp_funcs && pp_funcs->set_powergating_by_smu) {
@@ -1606,7 +1597,10 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
pr_err("smu firmware loading failed\n");
return r;
}
- *smu_version = adev->pm.fw_version;
+
+ if (smu_version)
+ *smu_version = adev->pm.fw_version;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 2627870a786e..9a54066ec0af 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -27,7 +27,6 @@
#include "amdgpu_drv.h"
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
-#include "amdgpu_smu.h"
#include "atom.h"
#include <linux/pci.h>
#include <linux/hwmon.h>
@@ -129,6 +128,8 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -145,9 +146,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+ return sysfs_emit(buf, "%s\n",
+ (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+ (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
}
static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
@@ -162,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
@@ -268,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -283,17 +288,17 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
- (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
- (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
- (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
- "unknown");
+ return sysfs_emit(buf, "%s\n",
+ (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+ (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+ (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+ (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+ (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
+ "unknown");
}
static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
@@ -310,6 +315,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -408,6 +415,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -442,12 +451,14 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- struct pp_states_info data;
+ struct pp_states_info data = {0};
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -472,7 +483,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
if (i == data.nums)
i = -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
+ return sysfs_emit(buf, "%d\n", i);
}
static ssize_t amdgpu_get_pp_force_state(struct device *dev,
@@ -484,11 +495,13 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
- return snprintf(buf, PAGE_SIZE, "\n");
+ return sysfs_emit(buf, "\n");
}
static ssize_t amdgpu_set_pp_force_state(struct device *dev,
@@ -504,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
@@ -564,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -602,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -764,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (count > 127)
return -EINVAL;
@@ -865,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -916,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
@@ -927,14 +952,7 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
+ if (adev->powerplay.pp_funcs->set_ppfeature_status) {
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
if (ret) {
pm_runtime_mark_last_busy(ddev->dev);
@@ -959,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -1018,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -1083,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1239,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -1254,7 +1280,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
@@ -1269,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1312,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -1327,7 +1357,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
@@ -1342,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1405,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -1443,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
tmp[0] = *(buf);
tmp[1] = '\0';
@@ -1506,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
if (r < 0) {
@@ -1523,7 +1561,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
/**
@@ -1544,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
if (r < 0) {
@@ -1561,7 +1601,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
/**
@@ -1587,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (adev->flags & AMD_IS_APU)
return -ENODATA;
@@ -1605,8 +1647,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
- return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
- count0, count1, pcie_get_mps(adev->pdev));
+ return sysfs_emit(buf, "%llu %llu %i\n",
+ count0, count1, pcie_get_mps(adev->pdev));
}
/**
@@ -1628,9 +1670,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (adev->unique_id)
- return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
+ return sysfs_emit(buf, "%016llx\n", adev->unique_id);
return 0;
}
@@ -1657,10 +1701,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
- adev_to_drm(adev)->unique,
- atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
- adev->throttling_logging_rs.interval / HZ + 1);
+ return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
+ adev_to_drm(adev)->unique,
+ atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
+ adev->throttling_logging_rs.interval / HZ + 1);
}
static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
@@ -1726,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0) {
@@ -1798,7 +1844,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (asic_type < CHIP_VEGA10)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
- if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+ if (asic_type < CHIP_VEGA10 ||
+ asic_type == CHIP_ARCTURUS ||
+ asic_type == CHIP_ALDEBARAN)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
if (asic_type < CHIP_VEGA20)
@@ -1845,6 +1893,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
}
}
+ if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+ /* SMU MP1 does not support dcefclk level setting */
+ if (asic_type >= CHIP_NAVI10) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ }
+
#undef DEVICE_ATTR_IS
return 0;
@@ -1954,6 +2010,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (channel >= PP_TEMP_MAX)
return -EINVAL;
@@ -1991,7 +2049,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+ return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
@@ -2007,7 +2065,7 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
else
temp = adev->pm.dpm.thermal.max_temp;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+ return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
@@ -2023,7 +2081,7 @@ static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
else
temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+ return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
@@ -2039,7 +2097,7 @@ static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
else
temp = adev->pm.dpm.thermal.max_mem_crit_temp;
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+ return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
@@ -2051,7 +2109,7 @@ static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
if (channel >= PP_TEMP_MAX)
return -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
+ return sysfs_emit(buf, "%s\n", temp_label[channel].label);
}
static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
@@ -2077,7 +2135,7 @@ static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
break;
}
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+ return sysfs_emit(buf, "%d\n", temp);
}
static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
@@ -2090,6 +2148,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
@@ -2122,6 +2182,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
err = kstrtoint(buf, 10, &value);
if (err)
@@ -2172,6 +2234,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
@@ -2220,6 +2284,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
@@ -2253,6 +2319,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
@@ -2285,6 +2353,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2301,7 +2371,7 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
+ return sysfs_emit(buf, "%d\n", min_rpm);
}
static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
@@ -2315,6 +2385,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2331,7 +2403,7 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
+ return sysfs_emit(buf, "%d\n", max_rpm);
}
static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
@@ -2344,6 +2416,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
@@ -2376,6 +2450,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
@@ -2422,6 +2498,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
@@ -2455,6 +2533,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
err = kstrtoint(buf, 10, &value);
if (err)
@@ -2496,6 +2576,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2513,14 +2595,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
+ return sysfs_emit(buf, "%d\n", vddgfx);
}
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "vddgfx\n");
+ return sysfs_emit(buf, "vddgfx\n");
}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
@@ -2533,6 +2615,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
@@ -2554,14 +2638,14 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
+ return sysfs_emit(buf, "%d\n", vddnb);
}
static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "vddnb\n");
+ return sysfs_emit(buf, "vddnb\n");
}
static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
@@ -2575,6 +2659,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2595,7 +2681,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
/* convert to microwatts */
uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
- return snprintf(buf, PAGE_SIZE, "%u\n", uw);
+ return sysfs_emit(buf, "%u\n", uw);
}
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
@@ -2619,6 +2705,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2656,6 +2744,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2693,6 +2783,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2722,7 +2814,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
{
int limit_type = to_sensor_dev_attr(attr)->index;
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return sysfs_emit(buf, "%s\n",
limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
}
@@ -2739,6 +2831,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -2780,6 +2874,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2797,14 +2893,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
+ return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "sclk\n");
+ return sysfs_emit(buf, "sclk\n");
}
static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
@@ -2817,6 +2913,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
@@ -2834,14 +2932,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
+ return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "mclk\n");
+ return sysfs_emit(buf, "mclk\n");
}
/**
@@ -3390,6 +3488,8 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
if (amdgpu_in_reset(adev))
return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
index 433dd1e9ec4f..610266088ff1 100644
--- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
@@ -100,7 +100,8 @@
#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x40
#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41
-#define PPSMC_Message_Count 0x42
+#define PPSMC_MSG_GfxDriverResetRecovery 0x42
+#define PPSMC_Message_Count 0x43
//PPSMC Reset Types
#define PPSMC_RESET_TYPE_WARM_RESET 0x00
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 25d5f03aaa67..8bb224f6c762 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -195,6 +195,11 @@ struct smu_user_dpm_profile {
uint32_t clk_dependency;
};
+enum smu_event_type {
+
+ SMU_EVENT_RESET_COMPLETE = 0,
+};
+
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
do { \
tables[table_id].size = s; \
@@ -338,7 +343,6 @@ struct smu_power_context {
struct smu_power_gate power_gate;
};
-
#define SMU_FEATURE_MAX (64)
struct smu_feature
{
@@ -807,6 +811,13 @@ struct pptable_funcs {
int (*check_fw_status)(struct smu_context *smu);
/**
+ * @set_mp1_state: put SMU into a correct state for comming
+ * resume from runpm or gpu reset.
+ */
+ int (*set_mp1_state)(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+
+ /**
* @setup_pptable: Initialize the power play table and populate it with
* default values.
*/
@@ -1160,6 +1171,12 @@ struct pptable_funcs {
* @set_light_sbr: Set light sbr mode for the SMU.
*/
int (*set_light_sbr)(struct smu_context *smu, bool enable);
+
+ /**
+ * @wait_for_event: Wait for events from SMU.
+ */
+ int (*wait_for_event)(struct smu_context *smu,
+ enum smu_event_type event, uint64_t event_arg);
};
typedef enum {
@@ -1235,64 +1252,13 @@ enum smu_cmn2asic_mapping_type {
[profile] = {1, (workload)}
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
-int smu_load_microcode(struct smu_context *smu);
-
-int smu_check_fw_status(struct smu_context *smu);
-
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
-
-int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
-
int smu_get_power_limit(struct smu_context *smu,
uint32_t *limit,
enum smu_ppt_limit_level limit_level);
-int smu_set_power_limit(void *handle, uint32_t limit);
-int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf);
-
-int smu_od_edit_dpm_table(void *handle,
- enum PP_OD_DPM_TABLE_COMMAND type,
- long *input, uint32_t size);
-
-int smu_read_sensor(void *handle, int sensor, void *data, int *size);
-int smu_get_power_profile_mode(void *handle, char *buf);
-int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size);
-u32 smu_get_fan_control_mode(void *handle);
-int smu_set_fan_control_mode(struct smu_context *smu, int value);
-void smu_pp_set_fan_control_mode(void *handle, u32 value);
-int smu_get_fan_speed_percent(void *handle, u32 *speed);
-int smu_set_fan_speed_percent(void *handle, u32 speed);
-int smu_get_fan_speed_rpm(void *handle, uint32_t *speed);
-
-int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
-
-int smu_get_clock_by_type_with_latency(struct smu_context *smu,
- enum smu_clk_type clk_type,
- struct pp_clock_levels_with_latency *clocks);
-
-int smu_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request *clock_req);
-int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
-
-int smu_set_xgmi_pstate(void *handle,
- uint32_t pstate);
-
-int smu_set_azalia_d3_pme(struct smu_context *smu);
-
-bool smu_baco_is_support(struct smu_context *smu);
-int smu_get_baco_capability(void *handle, bool *cap);
-
-int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
-
-int smu_baco_enter(struct smu_context *smu);
-int smu_baco_exit(struct smu_context *smu);
-int smu_baco_set_state(void *handle, int state);
-
-
bool smu_mode1_reset_is_support(struct smu_context *smu);
bool smu_mode2_reset_is_support(struct smu_context *smu);
int smu_mode1_reset(struct smu_context *smu);
-int smu_mode2_reset(void *handle);
extern const struct amd_ip_funcs smu_ip_funcs;
@@ -1302,68 +1268,24 @@ extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
bool is_support_sw_smu(struct amdgpu_device *adev);
bool is_support_cclk_dpm(struct amdgpu_device *adev);
-int smu_reset(struct smu_context *smu);
-int smu_sys_get_pp_table(void *handle, char **table);
-int smu_sys_set_pp_table(void *handle, const char *buf, size_t size);
-int smu_get_power_num_states(void *handle, struct pp_states_info *state_info);
-enum amd_pm_state_type smu_get_current_power_state(void *handle);
int smu_write_watermarks_table(struct smu_context *smu);
-int smu_set_watermarks_for_clock_ranges(
- struct smu_context *smu,
- struct pp_smu_wm_range_sets *clock_ranges);
-
-/* smu to display interface */
-extern int smu_display_configuration_change(struct smu_context *smu, const
- struct amd_pp_display_configuration
- *display_config);
-extern int smu_dpm_set_power_gate(void *handle, uint32_t block_type, bool gate);
-extern int smu_handle_task(struct smu_context *smu,
- enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed);
-extern int smu_handle_dpm_task(void *handle,
- enum amd_pp_task task_id,
- enum amd_pm_state_type *user_state);
-int smu_switch_power_profile(void *handle,
- enum PP_SMC_POWER_PROFILE type,
- bool en);
+
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max);
-u32 smu_get_mclk(void *handle, bool low);
-u32 smu_get_sclk(void *handle, bool low);
+
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
-enum amd_dpm_forced_level smu_get_performance_level(void *handle);
-int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level);
-int smu_set_display_count(struct smu_context *smu, uint32_t count);
-int smu_set_ac_dc(struct smu_context *smu);
-int smu_sys_get_pp_feature_mask(void *handle, char *buf);
-int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask);
-int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask);
-int smu_set_mp1_state(void *handle,
- enum pp_mp1_state mp1_state);
-int smu_set_df_cstate(void *handle,
- enum pp_df_cstate state);
-int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
-int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
- struct pp_smu_nv_clock_table *max_clocks);
-
-int smu_get_uclk_dpm_states(struct smu_context *smu,
- unsigned int *clock_values_in_khz,
- unsigned int *num_states);
+int smu_set_ac_dc(struct smu_context *smu);
-int smu_get_dpm_clock_table(struct smu_context *smu,
- struct dpm_clocks *clock_table);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
-ssize_t smu_sys_get_gpu_metrics(void *handle, void **table);
-
-int smu_enable_mgpu_fan_boost(void *handle);
-int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state);
-
int smu_set_light_sbr(struct smu_context *smu, bool enable);
+int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+ uint64_t event_arg);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
index 6e23a3f803a7..8361ebd8d876 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
@@ -26,7 +26,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU13_DRIVER_IF_VERSION 2
+#define SMU13_DRIVER_IF_VERSION 3
typedef struct {
int32_t value;
@@ -191,6 +191,44 @@ typedef struct {
uint16_t SocTemperature; //[centi-Celsius]
uint16_t EdgeTemperature;
uint16_t ThrottlerStatus;
+} SmuMetrics_legacy_t;
+
+typedef struct {
+ uint16_t GfxclkFrequency; //[MHz]
+ uint16_t SocclkFrequency; //[MHz]
+ uint16_t VclkFrequency; //[MHz]
+ uint16_t DclkFrequency; //[MHz]
+ uint16_t MemclkFrequency; //[MHz]
+ uint16_t spare;
+
+ uint16_t GfxActivity; //[centi]
+ uint16_t UvdActivity; //[centi]
+ uint16_t C0Residency[4]; //percentage
+
+ uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX
+ uint16_t CurrentSocketPower; //[mW]
+
+ //3rd party tools in Windows need info in the case of APUs
+ uint16_t CoreFrequency[4]; //[MHz]
+ uint16_t CorePower[4]; //[mW]
+ uint16_t CoreTemperature[4]; //[centi-Celsius]
+ uint16_t L3Frequency[1]; //[MHz]
+ uint16_t L3Temperature[1]; //[centi-Celsius]
+
+ uint16_t GfxTemperature; //[centi-Celsius]
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t EdgeTemperature;
+ uint16_t ThrottlerStatus;
+} SmuMetricsTable_t;
+
+typedef struct {
+ SmuMetricsTable_t Current;
+ SmuMetricsTable_t Average;
+ //uint32_t AccCnt;
+ uint32_t SampleStartTime;
+ uint32_t SampleStopTime;
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
index df2ead254f37..d23533bda002 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
@@ -435,8 +435,12 @@ typedef struct {
uint8_t GpioI2cSda; // Serial Data
uint16_t spare5;
+ uint16_t XgmiMaxCurrent; // in Amps
+ int8_t XgmiOffset; // in Amps
+ uint8_t Padding_TelemetryXgmi;
+
//reserved
- uint32_t reserved[16];
+ uint32_t reserved[15];
} PPTable_t;
@@ -481,7 +485,10 @@ typedef struct {
uint16_t TemperatureAllHBM[4] ;
uint32_t GfxBusyAcc ;
uint32_t DramBusyAcc ;
- uint32_t Spare[4];
+ uint32_t EnergyAcc64bitLow ; //15.259uJ resolution
+ uint32_t EnergyAcc64bitHigh ;
+ uint32_t TimeStampLow ; //10ns resolution
+ uint32_t TimeStampHigh ;
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 5bfb60f41dd4..89a16dcd0fff 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -225,6 +225,7 @@
__SMU_DUMMY_MAP(DisableDeterminism), \
__SMU_DUMMY_MAP(SetUclkDpmMode), \
__SMU_DUMMY_MAP(LightSBR), \
+ __SMU_DUMMY_MAP(GfxDriverResetRecovery),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index ad4db2edf1fb..bb55a96f98e9 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -32,7 +32,7 @@
#define SMU11_DRIVER_IF_VERSION_NV14 0x38
#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE
-#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
+#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
/* MP Apertures */
@@ -61,8 +61,8 @@
#define LINK_WIDTH_MAX 6
#define LINK_SPEED_MAX 3
-static __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160};
+static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160};
static const
struct smu_temperature_range __maybe_unused smu11_thermal_policy[] =
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
index 02de3b6199e5..1ad2dff71090 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
@@ -60,5 +60,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v12_0_set_driver_table_location(struct smu_context *smu);
+int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index 80208e1eefc9..8145e1cbf181 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -26,7 +26,7 @@
#include "amdgpu_smu.h"
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_ALDE 0x5
+#define SMU13_DRIVER_IF_VERSION_ALDE 0x6
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -268,5 +268,8 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu);
int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
bool enablement);
+int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index f5d59fa3a030..f5fe540cd536 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1297,19 +1297,18 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- if (has_gfx_busy) {
+ if (!has_gfx_busy)
+ ret = -EOPNOTSUPP;
+ else {
ret = smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetGfxBusy,
&activity_percent);
if (!ret)
- activity_percent = activity_percent > 100 ? 100 : activity_percent;
+ *((uint32_t *)value) = min(activity_percent, (u32)100);
else
- return -EIO;
- *((uint32_t *)value) = activity_percent;
- return 0;
- } else {
- return -EOPNOTSUPP;
+ ret = -EIO;
}
+ break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 7edafef095a3..0541bfc81c1b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12) ||
- (hwmgr->chip_id == CHIP_TONGA))
+ (hwmgr->chip_id == CHIP_TONGA) ||
+ (hwmgr->chip_id == CHIP_TOPAZ))
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
@@ -3330,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
!hwmgr->display_config->multi_monitor_in_sync) ||
- smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
+ (hwmgr->display_config->num_display &&
+ smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
disable_mclk_switching_for_display;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index b6d7b7b224a9..1a097e608808 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -52,8 +52,8 @@
#define LINK_WIDTH_MAX 6
#define LINK_SPEED_MAX 3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 213c9c6b4462..d3177a534fdf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -57,8 +57,8 @@
#define LINK_WIDTH_MAX 6
#define LINK_SPEED_MAX 3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e722adcf2f53..c29d8b3131b7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -51,8 +51,19 @@ static const struct amd_pm_funcs swsmu_pm_funcs;
static int smu_force_smuclk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t mask);
-
-int smu_sys_get_pp_feature_mask(void *handle, char *buf)
+static int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id,
+ bool lock_needed);
+static int smu_reset(struct smu_context *smu);
+static int smu_set_fan_speed_percent(void *handle, u32 speed);
+static int smu_set_fan_control_mode(struct smu_context *smu, int value);
+static int smu_set_power_limit(void *handle, uint32_t limit);
+static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
+static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+static int smu_sys_get_pp_feature_mask(void *handle,
+ char *buf)
{
struct smu_context *smu = handle;
int size = 0;
@@ -69,7 +80,8 @@ int smu_sys_get_pp_feature_mask(void *handle, char *buf)
return size;
}
-int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask)
+static int smu_sys_set_pp_feature_mask(void *handle,
+ uint64_t new_mask)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -142,7 +154,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
return ret;
}
-u32 smu_get_mclk(void *handle, bool low)
+static u32 smu_get_mclk(void *handle, bool low)
{
struct smu_context *smu = handle;
uint32_t clk_freq;
@@ -156,7 +168,7 @@ u32 smu_get_mclk(void *handle, bool low)
return clk_freq * 100;
}
-u32 smu_get_sclk(void *handle, bool low)
+static u32 smu_get_sclk(void *handle, bool low)
{
struct smu_context *smu = handle;
uint32_t clk_freq;
@@ -256,8 +268,9 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
* Under this case, the smu->mutex lock protection is already enforced on
* the parent API smu_force_performance_level of the call path.
*/
-int smu_dpm_set_power_gate(void *handle, uint32_t block_type,
- bool gate)
+static int smu_dpm_set_power_gate(void *handle,
+ uint32_t block_type,
+ bool gate)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -406,8 +419,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
}
-int smu_get_power_num_states(void *handle,
- struct pp_states_info *state_info)
+static int smu_get_power_num_states(void *handle,
+ struct pp_states_info *state_info)
{
if (!state_info)
return -EINVAL;
@@ -442,7 +455,8 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
}
-int smu_sys_get_pp_table(void *handle, char **table)
+static int smu_sys_get_pp_table(void *handle,
+ char **table)
{
struct smu_context *smu = handle;
struct smu_table_context *smu_table = &smu->smu_table;
@@ -468,7 +482,9 @@ int smu_sys_get_pp_table(void *handle, char **table)
return powerplay_table_size;
}
-int smu_sys_set_pp_table(void *handle, const char *buf, size_t size)
+static int smu_sys_set_pp_table(void *handle,
+ const char *buf,
+ size_t size)
{
struct smu_context *smu = handle;
struct smu_table_context *smu_table = &smu->smu_table;
@@ -632,6 +648,7 @@ err0_out:
return ret;
}
+
static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1337,7 +1354,7 @@ static int smu_disable_dpms(struct smu_context *smu)
bool use_baco = !smu->is_apu &&
((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
- ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
+ ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
/*
* For custom pptable uploading, skip the DPM features
@@ -1430,7 +1447,7 @@ static int smu_hw_fini(void *handle)
return smu_smc_hw_cleanup(smu);
}
-int smu_reset(struct smu_context *smu)
+static int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret;
@@ -1474,7 +1491,8 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- if (smu->is_apu)
+ /* skip CGPG when in S0ix */
+ if (smu->is_apu && !adev->in_s0ix)
smu_set_gfx_cgpg(&adev->smu, false);
return 0;
@@ -1518,9 +1536,10 @@ static int smu_resume(void *handle)
return 0;
}
-int smu_display_configuration_change(struct smu_context *smu,
- const struct amd_pp_display_configuration *display_config)
+static int smu_display_configuration_change(void *handle,
+ const struct amd_pp_display_configuration *display_config)
{
+ struct smu_context *smu = handle;
int index = 0;
int num_of_active_display = 0;
@@ -1676,10 +1695,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
return ret;
}
-int smu_handle_task(struct smu_context *smu,
- enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed)
+static int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id,
+ bool lock_needed)
{
int ret = 0;
@@ -1711,9 +1730,9 @@ out:
return ret;
}
-int smu_handle_dpm_task(void *handle,
- enum amd_pp_task task_id,
- enum amd_pm_state_type *user_state)
+static int smu_handle_dpm_task(void *handle,
+ enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -1722,10 +1741,9 @@ int smu_handle_dpm_task(void *handle,
}
-
-int smu_switch_power_profile(void *handle,
- enum PP_SMC_POWER_PROFILE type,
- bool en)
+static int smu_switch_power_profile(void *handle,
+ enum PP_SMC_POWER_PROFILE type,
+ bool en)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1761,7 +1779,7 @@ int smu_switch_power_profile(void *handle,
return 0;
}
-enum amd_dpm_forced_level smu_get_performance_level(void *handle)
+static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1780,7 +1798,8 @@ enum amd_dpm_forced_level smu_get_performance_level(void *handle)
return level;
}
-int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level)
+static int smu_force_performance_level(void *handle,
+ enum amd_dpm_forced_level level)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1815,8 +1834,9 @@ int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level)
return ret;
}
-int smu_set_display_count(struct smu_context *smu, uint32_t count)
+static int smu_set_display_count(void *handle, uint32_t count)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1859,7 +1879,9 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
return ret;
}
-int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask)
+static int smu_force_ppclk_levels(void *handle,
+ enum pp_clock_type type,
+ uint32_t mask)
{
struct smu_context *smu = handle;
enum smu_clk_type clk_type;
@@ -1903,48 +1925,28 @@ int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask)
* However, the mp1 state setting should still be granted
* even if the dpm_enabled cleared.
*/
-int smu_set_mp1_state(void *handle,
- enum pp_mp1_state mp1_state)
+static int smu_set_mp1_state(void *handle,
+ enum pp_mp1_state mp1_state)
{
struct smu_context *smu = handle;
- uint16_t msg;
- int ret;
+ int ret = 0;
if (!smu->pm_enabled)
return -EOPNOTSUPP;
mutex_lock(&smu->mutex);
- switch (mp1_state) {
- case PP_MP1_STATE_SHUTDOWN:
- msg = SMU_MSG_PrepareMp1ForShutdown;
- break;
- case PP_MP1_STATE_UNLOAD:
- msg = SMU_MSG_PrepareMp1ForUnload;
- break;
- case PP_MP1_STATE_RESET:
- msg = SMU_MSG_PrepareMp1ForReset;
- break;
- case PP_MP1_STATE_NONE:
- default:
- mutex_unlock(&smu->mutex);
- return 0;
- }
-
- ret = smu_send_smc_msg(smu, msg, NULL);
- /* some asics may not support those messages */
- if (ret == -EINVAL)
- ret = 0;
- if (ret)
- dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
+ if (smu->ppt_funcs &&
+ smu->ppt_funcs->set_mp1_state)
+ ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
mutex_unlock(&smu->mutex);
return ret;
}
-int smu_set_df_cstate(void *handle,
- enum pp_df_cstate state)
+static int smu_set_df_cstate(void *handle,
+ enum pp_df_cstate state)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2003,9 +2005,10 @@ int smu_write_watermarks_table(struct smu_context *smu)
return ret;
}
-int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
- struct pp_smu_wm_range_sets *clock_ranges)
+static int smu_set_watermarks_for_clock_ranges(void *handle,
+ struct pp_smu_wm_range_sets *clock_ranges)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2092,41 +2095,39 @@ const struct amdgpu_ip_block_version smu_v13_0_ip_block =
.funcs = &smu_ip_funcs,
};
-int smu_load_microcode(struct smu_context *smu)
+static int smu_load_microcode(void *handle)
{
+ struct smu_context *smu = handle;
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ /* This should be used for non PSP loading */
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
+ return 0;
- if (smu->ppt_funcs->load_microcode)
+ if (smu->ppt_funcs->load_microcode) {
ret = smu->ppt_funcs->load_microcode(smu);
+ if (ret) {
+ dev_err(adev->dev, "Load microcode failed\n");
+ return ret;
+ }
+ }
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
-int smu_check_fw_status(struct smu_context *smu)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->check_fw_status)
+ if (smu->ppt_funcs->check_fw_status) {
ret = smu->ppt_funcs->check_fw_status(smu);
-
- mutex_unlock(&smu->mutex);
+ if (ret) {
+ dev_err(adev->dev, "SMC is not ready\n");
+ return ret;
+ }
+ }
return ret;
}
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
{
int ret = 0;
@@ -2140,7 +2141,7 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
return ret;
}
-int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
+static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
{
struct smu_context *smu = handle;
u32 percent;
@@ -2199,7 +2200,7 @@ int smu_get_power_limit(struct smu_context *smu,
return ret;
}
-int smu_set_power_limit(void *handle, uint32_t limit)
+static int smu_set_power_limit(void *handle, uint32_t limit)
{
struct smu_context *smu = handle;
uint32_t limit_type = limit >> 24;
@@ -2220,6 +2221,7 @@ int smu_set_power_limit(void *handle, uint32_t limit)
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
limit, smu->max_power_limit);
+ ret = -EINVAL;
goto out;
}
@@ -2255,7 +2257,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
return ret;
}
-int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf)
+static int smu_print_ppclk_levels(void *handle,
+ enum pp_clock_type type,
+ char *buf)
{
struct smu_context *smu = handle;
enum smu_clk_type clk_type;
@@ -2296,9 +2300,9 @@ int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf)
return smu_print_smuclk_levels(smu, clk_type, buf);
}
-int smu_od_edit_dpm_table(void *handle,
- enum PP_OD_DPM_TABLE_COMMAND type,
- long *input, uint32_t size)
+static int smu_od_edit_dpm_table(void *handle,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2317,7 +2321,10 @@ int smu_od_edit_dpm_table(void *handle,
return ret;
}
-int smu_read_sensor(void *handle, int sensor, void *data, int *size_arg)
+static int smu_read_sensor(void *handle,
+ int sensor,
+ void *data,
+ int *size_arg)
{
struct smu_context *smu = handle;
struct smu_umd_pstate_table *pstate_table =
@@ -2384,7 +2391,7 @@ unlock:
return ret;
}
-int smu_get_power_profile_mode(void *handle, char *buf)
+static int smu_get_power_profile_mode(void *handle, char *buf)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2402,7 +2409,9 @@ int smu_get_power_profile_mode(void *handle, char *buf)
return ret;
}
-int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size)
+static int smu_set_power_profile_mode(void *handle,
+ long *param,
+ uint32_t param_size)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2420,7 +2429,7 @@ int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size)
}
-u32 smu_get_fan_control_mode(void *handle)
+static u32 smu_get_fan_control_mode(void *handle)
{
struct smu_context *smu = handle;
u32 ret = 0;
@@ -2438,7 +2447,7 @@ u32 smu_get_fan_control_mode(void *handle)
return ret;
}
-int smu_set_fan_control_mode(struct smu_context *smu, int value)
+static int smu_set_fan_control_mode(struct smu_context *smu, int value)
{
int ret = 0;
@@ -2463,14 +2472,15 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
return ret;
}
-void smu_pp_set_fan_control_mode(void *handle, u32 value) {
+static void smu_pp_set_fan_control_mode(void *handle, u32 value)
+{
struct smu_context *smu = handle;
smu_set_fan_control_mode(smu, value);
}
-int smu_get_fan_speed_percent(void *handle, u32 *speed)
+static int smu_get_fan_speed_percent(void *handle, u32 *speed)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2494,7 +2504,7 @@ int smu_get_fan_speed_percent(void *handle, u32 *speed)
return ret;
}
-int smu_set_fan_speed_percent(void *handle, u32 speed)
+static int smu_set_fan_speed_percent(void *handle, u32 speed)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2517,7 +2527,7 @@ int smu_set_fan_speed_percent(void *handle, u32 speed)
return ret;
}
-int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
+static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2538,8 +2548,9 @@ int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
return ret;
}
-int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2554,10 +2565,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
return ret;
}
-int smu_get_clock_by_type_with_latency(struct smu_context *smu,
- enum smu_clk_type clk_type,
- struct pp_clock_levels_with_latency *clocks)
+static int smu_get_clock_by_type_with_latency(void *handle,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
{
+ struct smu_context *smu = handle;
+ enum smu_clk_type clk_type;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2565,17 +2578,38 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
mutex_lock(&smu->mutex);
- if (smu->ppt_funcs->get_clock_by_type_with_latency)
+ if (smu->ppt_funcs->get_clock_by_type_with_latency) {
+ switch (type) {
+ case amd_pp_sys_clock:
+ clk_type = SMU_GFXCLK;
+ break;
+ case amd_pp_mem_clock:
+ clk_type = SMU_MCLK;
+ break;
+ case amd_pp_dcef_clock:
+ clk_type = SMU_DCEFCLK;
+ break;
+ case amd_pp_disp_clock:
+ clk_type = SMU_DISPCLK;
+ break;
+ default:
+ dev_err(smu->adev->dev, "Invalid clock type!\n");
+ mutex_unlock(&smu->mutex);
+ return -EINVAL;
+ }
+
ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+ }
mutex_unlock(&smu->mutex);
return ret;
}
-int smu_display_clock_voltage_request(struct smu_context *smu,
- struct pp_display_clock_request *clock_req)
+static int smu_display_clock_voltage_request(void *handle,
+ struct pp_display_clock_request *clock_req)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2592,8 +2626,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
}
-int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+static int smu_display_disable_memory_clock_switch(void *handle,
+ bool disable_memory_clock_switch)
{
+ struct smu_context *smu = handle;
int ret = -EINVAL;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2609,8 +2645,8 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
return ret;
}
-int smu_set_xgmi_pstate(void *handle,
- uint32_t pstate)
+static int smu_set_xgmi_pstate(void *handle,
+ uint32_t pstate)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2631,49 +2667,7 @@ int smu_set_xgmi_pstate(void *handle,
return ret;
}
-int smu_set_azalia_d3_pme(struct smu_context *smu)
-{
- int ret = 0;
-
- if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->set_azalia_d3_pme)
- ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
-/*
- * On system suspending or resetting, the dpm_enabled
- * flag will be cleared. So that those SMU services which
- * are not supported will be gated.
- *
- * However, the baco/mode1 reset should still be granted
- * as they are still supported and necessary.
- */
-bool smu_baco_is_support(struct smu_context *smu)
-{
- bool ret = false;
-
- if (!smu->pm_enabled)
- return false;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
- ret = smu->ppt_funcs->baco_is_support(smu);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
-}
-
-int smu_get_baco_capability(void *handle, bool *cap)
+static int smu_get_baco_capability(void *handle, bool *cap)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2693,60 +2687,7 @@ int smu_get_baco_capability(void *handle, bool *cap)
return ret;
}
-
-int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
-{
- if (smu->ppt_funcs->baco_get_state)
- return -EINVAL;
-
- mutex_lock(&smu->mutex);
- *state = smu->ppt_funcs->baco_get_state(smu);
- mutex_unlock(&smu->mutex);
-
- return 0;
-}
-
-int smu_baco_enter(struct smu_context *smu)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->baco_enter)
- ret = smu->ppt_funcs->baco_enter(smu);
-
- mutex_unlock(&smu->mutex);
-
- if (ret)
- dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
-
- return ret;
-}
-
-int smu_baco_exit(struct smu_context *smu)
-{
- int ret = 0;
-
- if (!smu->pm_enabled)
- return -EOPNOTSUPP;
-
- mutex_lock(&smu->mutex);
-
- if (smu->ppt_funcs->baco_exit)
- ret = smu->ppt_funcs->baco_exit(smu);
-
- mutex_unlock(&smu->mutex);
-
- if (ret)
- dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
-
- return ret;
-}
-
-int smu_baco_set_state(void *handle, int state)
+static int smu_baco_set_state(void *handle, int state)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2831,7 +2772,7 @@ int smu_mode1_reset(struct smu_context *smu)
return ret;
}
-int smu_mode2_reset(void *handle)
+static int smu_mode2_reset(void *handle)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2852,9 +2793,10 @@ int smu_mode2_reset(void *handle)
return ret;
}
-int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
- struct pp_smu_nv_clock_table *max_clocks)
+static int smu_get_max_sustainable_clocks_by_dc(void *handle,
+ struct pp_smu_nv_clock_table *max_clocks)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2870,10 +2812,11 @@ int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
return ret;
}
-int smu_get_uclk_dpm_states(struct smu_context *smu,
- unsigned int *clock_values_in_khz,
- unsigned int *num_states)
+static int smu_get_uclk_dpm_states(void *handle,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2889,7 +2832,7 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
return ret;
}
-enum amd_pm_state_type smu_get_current_power_state(void *handle)
+static enum amd_pm_state_type smu_get_current_power_state(void *handle)
{
struct smu_context *smu = handle;
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
@@ -2907,9 +2850,10 @@ enum amd_pm_state_type smu_get_current_power_state(void *handle)
return pm_state;
}
-int smu_get_dpm_clock_table(struct smu_context *smu,
- struct dpm_clocks *clock_table)
+static int smu_get_dpm_clock_table(void *handle,
+ struct dpm_clocks *clock_table)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2925,7 +2869,7 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
return ret;
}
-ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
+static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
{
struct smu_context *smu = handle;
ssize_t size;
@@ -2945,7 +2889,7 @@ ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
return size;
}
-int smu_enable_mgpu_fan_boost(void *handle)
+static int smu_enable_mgpu_fan_boost(void *handle)
{
struct smu_context *smu = handle;
int ret = 0;
@@ -2963,8 +2907,10 @@ int smu_enable_mgpu_fan_boost(void *handle)
return ret;
}
-int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state)
+static int smu_gfx_state_change_set(void *handle,
+ uint32_t state)
{
+ struct smu_context *smu = handle;
int ret = 0;
mutex_lock(&smu->mutex);
@@ -3026,4 +2972,31 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_power_profile_mode = smu_get_power_profile_mode,
.force_clock_level = smu_force_ppclk_levels,
.print_clock_levels = smu_print_ppclk_levels,
+ .get_uclk_dpm_states = smu_get_uclk_dpm_states,
+ .get_dpm_clock_table = smu_get_dpm_clock_table,
+ .display_configuration_change = smu_display_configuration_change,
+ .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
+ .display_clock_voltage_request = smu_display_clock_voltage_request,
+ .set_active_display_count = smu_set_display_count,
+ .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
+ .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
+ .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
+ .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
+ .load_firmware = smu_load_microcode,
+ .gfx_state_change_set = smu_gfx_state_change_set,
};
+
+int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+ uint64_t event_arg)
+{
+ int ret = -EINVAL;
+ struct smu_context *smu = &adev->smu;
+
+ if (smu->ppt_funcs->wait_for_event) {
+ mutex_lock(&smu->mutex);
+ ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
+ mutex_unlock(&smu->mutex);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index bbc03092b0a9..77693bf0840c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -2365,6 +2365,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_fan_parameters = arcturus_get_fan_parameters,
.interrupt_work = smu_v11_0_interrupt_work,
.set_light_sbr = smu_v11_0_set_light_sbr,
+ .set_mp1_state = smu_cmn_set_mp1_state,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 3d0de2c958fa..ac13042672ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -431,6 +431,30 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
return 0;
}
+static int navi10_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+ int ret = 0;
+
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ if (ret)
+ return ret;
+
+ if (mp1_state == PP_MP1_STATE_UNLOAD) {
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK;
+
+ WREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags);
+ }
+
+ return 0;
+}
+
static int navi10_setup_pptable(struct smu_context *smu)
{
int ret = 0;
@@ -1419,7 +1443,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_MCLK:
case SMU_UCLK:
- case SMU_DCEFCLK:
case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
@@ -1439,6 +1462,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return size;
break;
+ case SMU_DCEFCLK:
+ dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
+ break;
+
default:
break;
}
@@ -3031,6 +3058,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_fan_parameters = navi10_get_fan_parameters,
.post_init = navi10_post_smu_init,
.interrupt_work = smu_v11_0_interrupt_work,
+ .set_mp1_state = navi10_set_mp1_state,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 3621884c3293..d2fd44b903ca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_MCLK:
case SMU_UCLK:
- case SMU_DCEFCLK:
case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */
if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
@@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
if (ret)
goto forec_level_out;
break;
+ case SMU_DCEFCLK:
+ dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
+ break;
default:
break;
}
@@ -3110,6 +3112,23 @@ static int sienna_cichlid_system_features_control(struct smu_context *smu,
return smu_v11_0_system_features_control(smu, en);
}
+static int sienna_cichlid_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ break;
+ default:
+ /* Ignore others */
+ ret = 0;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -3195,6 +3214,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_fan_parameters = sienna_cichlid_get_fan_parameters,
.interrupt_work = smu_v11_0_interrupt_work,
.gpo_control = sienna_cichlid_gpo_control,
+ .set_mp1_state = sienna_cichlid_set_mp1_state,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 0d137af1a78a..6274cae4a065 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -561,6 +561,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
break;
case 3:
+ case 4:
default:
v_3_3 = (struct atom_firmware_info_v3_3 *)header;
smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index c5b387360f01..77f532a49e37 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -194,18 +194,34 @@ static int vangogh_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ uint32_t ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ goto err0_out;
+ }
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+
+ if (if_version < 0x3) {
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
+ } else {
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ }
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
@@ -235,13 +251,12 @@ err0_out:
return -ENOMEM;
}
-static int vangogh_get_smu_metrics_data(struct smu_context *smu,
+static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
uint32_t *value)
{
struct smu_table_context *smu_table = &smu->smu_table;
-
- SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
mutex_lock(&smu->metrics_lock);
@@ -255,7 +270,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
}
switch (member) {
- case METRICS_AVERAGE_GFXCLK:
+ case METRICS_CURR_GFXCLK:
*value = metrics->GfxclkFrequency;
break;
case METRICS_AVERAGE_SOCCLK:
@@ -267,7 +282,7 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
case METRICS_AVERAGE_DCLK:
*value = metrics->DclkFrequency;
break;
- case METRICS_AVERAGE_UCLK:
+ case METRICS_CURR_UCLK:
*value = metrics->MemclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
@@ -311,6 +326,103 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
+static int vangogh_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ mutex_lock(&smu->metrics_lock);
+
+ ret = smu_cmn_get_metrics_table_locked(smu,
+ NULL,
+ false);
+ if (ret) {
+ mutex_unlock(&smu->metrics_lock);
+ return ret;
+ }
+
+ switch (member) {
+ case METRICS_CURR_GFXCLK:
+ *value = metrics->Current.GfxclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCCLK:
+ *value = metrics->Current.SocclkFrequency;
+ break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->Current.VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->Current.DclkFrequency;
+ break;
+ case METRICS_CURR_UCLK:
+ *value = metrics->Current.MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = metrics->Current.GfxActivity;
+ break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = metrics->Current.UvdActivity;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->Current.CurrentSocketPower << 8) /
+ 1000;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->Current.GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->Current.SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = metrics->Current.ThrottlerStatus;
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->Current.Voltage[2];
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = metrics->Current.Voltage[1];
+ break;
+ case METRICS_AVERAGE_CPUCLK:
+ memcpy(value, &metrics->Current.CoreFrequency[0],
+ smu->cpu_core_num * sizeof(uint16_t));
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ mutex_unlock(&smu->metrics_lock);
+
+ return ret;
+}
+
+static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
+ else
+ ret = vangogh_get_smu_metrics_data(smu, member, value);
+
+ return ret;
+}
+
static int vangogh_allocate_dpm_context(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -384,10 +496,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
static bool vangogh_is_dpm_running(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t feature_mask[2];
uint64_t feature_enabled;
+ /* we need to re-init after suspend so return false */
+ if (adev->in_suspend)
+ return false;
+
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
if (ret)
@@ -442,11 +559,11 @@ static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_typ
return 0;
}
-static int vangogh_print_clk_levels(struct smu_context *smu,
+static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
- SmuMetrics_t metrics;
+ SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
@@ -541,6 +658,126 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return size;
}
+static int vangogh_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ SmuMetrics_t metrics;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ bool cur_value_match_level = false;
+
+ memset(&metrics, 0, sizeof(metrics));
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, false);
+ if (ret)
+ return ret;
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
+ }
+ break;
+ case SMU_OD_CCLK:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_OD_RANGE:
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ size += sprintf(buf + size, "CCLK: %7uMhz %10uMhz\n",
+ smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
+ }
+ break;
+ case SMU_SOCCLK:
+ /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
+ count = clk_table->NumSocClkLevelsEnabled;
+ cur_value = metrics.Current.SocclkFrequency;
+ break;
+ case SMU_VCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.Current.VclkFrequency;
+ break;
+ case SMU_DCLK:
+ count = clk_table->VcnClkLevelsEnabled;
+ cur_value = metrics.Current.DclkFrequency;
+ break;
+ case SMU_MCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ cur_value = metrics.Current.MemclkFrequency;
+ break;
+ case SMU_FCLK:
+ count = clk_table->NumDfPstatesEnabled;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
+ if (ret)
+ return ret;
+ break;
+ default:
+ break;
+ }
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ case SMU_FCLK:
+ for (i = 0; i < count; i++) {
+ ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+ if (ret)
+ return ret;
+ if (!value)
+ continue;
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
+ }
+
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+ break;
+ default:
+ break;
+ }
+
+ return size;
+}
+
+static int vangogh_common_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
+ else
+ ret = vangogh_print_clk_levels(smu, clk_type, buf);
+
+ return ret;
+}
+
static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
enum amd_dpm_forced_level level,
uint32_t *vclk_mask,
@@ -855,7 +1092,6 @@ static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
return ret;
break;
case SMU_FCLK:
- case SMU_MCLK:
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinFclkByFreq,
min, NULL);
@@ -943,7 +1179,6 @@ static int vangogh_force_clk_levels(struct smu_context *smu,
if (ret)
return ret;
break;
- case SMU_MCLK:
case SMU_FCLK:
ret = vangogh_get_dpm_clk_limited(smu,
clk_type, soft_min_level, &min_freq);
@@ -1030,7 +1265,6 @@ static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
SMU_SOCCLK,
SMU_VCLK,
SMU_DCLK,
- SMU_MCLK,
SMU_FCLK,
};
@@ -1059,7 +1293,6 @@ static int vangogh_unforce_dpm_levels(struct smu_context *smu)
enum smu_clk_type clk_type;
uint32_t feature;
} clk_feature_map[] = {
- {SMU_MCLK, SMU_FEATURE_DPM_FCLK_BIT},
{SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
{SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
{SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
@@ -1191,7 +1424,6 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
- vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
@@ -1231,7 +1463,6 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
- vangogh_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
@@ -1273,57 +1504,57 @@ static int vangogh_read_sensor(struct smu_context *smu,
mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_GFXACTIVITY,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GPU_POWER:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_SOCKETPOWER,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_EDGE_TEMP:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_EDGE,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_TEMPERATURE_HOTSPOT,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = vangogh_get_smu_metrics_data(smu,
- METRICS_AVERAGE_UCLK,
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = vangogh_get_smu_metrics_data(smu,
- METRICS_AVERAGE_GFXCLK,
+ ret = vangogh_common_get_smu_metrics_data(smu,
+ METRICS_CURR_GFXCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDGFX:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDGFX,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_VDDNB:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_VOLTAGE_VDDSOC,
(uint32_t *)data);
*size = 4;
break;
case AMDGPU_PP_SENSOR_CPU_CLK:
- ret = vangogh_get_smu_metrics_data(smu,
+ ret = vangogh_common_get_smu_metrics_data(smu,
METRICS_AVERAGE_CPUCLK,
(uint32_t *)data);
*size = smu->cpu_core_num * sizeof(uint16_t);
@@ -1397,13 +1628,13 @@ static int vangogh_set_watermarks_table(struct smu_context *smu,
return 0;
}
-static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
+static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v2_1 *gpu_metrics =
(struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
- SmuMetrics_t metrics;
+ SmuMetrics_legacy_t metrics;
int ret = 0;
ret = smu_cmn_get_metrics_table(smu, &metrics, true);
@@ -1416,9 +1647,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->temperature_soc = metrics.SocTemperature;
memcpy(&gpu_metrics->temperature_core[0],
&metrics.CoreTemperature[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
- gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
gpu_metrics->average_gfx_activity = metrics.GfxActivity;
gpu_metrics->average_mm_activity = metrics.UvdActivity;
@@ -1429,7 +1659,7 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_power = metrics.Power[2];
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
@@ -1440,9 +1670,8 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
- sizeof(uint16_t) * 8);
+ sizeof(uint16_t) * 4);
gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
- gpu_metrics->current_l3clk[1] = metrics.L3Frequency[1];
gpu_metrics->throttle_status = metrics.ThrottlerStatus;
@@ -1453,6 +1682,88 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_1 *gpu_metrics =
+ (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+
+ gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
+ memcpy(&gpu_metrics->temperature_core[0],
+ &metrics.Current.CoreTemperature[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
+
+ gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
+ gpu_metrics->average_cpu_power = metrics.Current.Power[0];
+ gpu_metrics->average_soc_power = metrics.Current.Power[1];
+ gpu_metrics->average_gfx_power = metrics.Current.Power[2];
+ memcpy(&gpu_metrics->average_core_power[0],
+ &metrics.Average.CorePower[0],
+ sizeof(uint16_t) * 4);
+
+ gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
+
+ gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
+ gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
+ gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
+ gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
+ gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
+
+ memcpy(&gpu_metrics->current_coreclk[0],
+ &metrics.Current.CoreFrequency[0],
+ sizeof(uint16_t) * 4);
+ gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
+
+ gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
+
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_1);
+}
+
+static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t if_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, NULL);
+ if (ret) {
+ dev_err(adev->dev, "Failed to get smu if version!\n");
+ return ret;
+ }
+
+ if (if_version < 0x3)
+ ret = vangogh_get_legacy_gpu_metrics(smu, table);
+ else
+ ret = vangogh_get_gpu_metrics(smu, table);
+
+ return ret;
+}
+
static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
{
@@ -1871,9 +2182,9 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.set_watermarks_table = vangogh_set_watermarks_table,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.interrupt_work = smu_v11_0_interrupt_work,
- .get_gpu_metrics = vangogh_get_gpu_metrics,
+ .get_gpu_metrics = vangogh_common_get_gpu_metrics,
.od_edit_dpm_table = vangogh_od_edit_dpm_table,
- .print_clk_levels = vangogh_print_clk_levels,
+ .print_clk_levels = vangogh_common_print_clk_levels,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
.system_features_control = vangogh_system_features_control,
@@ -1889,6 +2200,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.get_ppt_limit = vangogh_get_ppt_limit,
.get_power_limit = vangogh_get_power_limit,
.set_power_limit = vangogh_set_power_limit,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index e3232295f2bf..f43b4c623685 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1332,6 +1332,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.gfx_state_change_set = renoir_gfx_state_change_set,
.set_fine_grain_gfx_freq_parameters = renoir_set_fine_grain_gfx_freq_parameters,
.od_edit_dpm_table = renoir_od_edit_dpm_table,
+ .get_vbios_bootup_values = smu_v12_0_get_vbios_bootup_values,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 6cc4855c8a37..d60b8c5e8715 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -27,6 +27,7 @@
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_atombios.h"
#include "smu_v12_0.h"
#include "soc15_common.h"
#include "atom.h"
@@ -278,3 +279,125 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
return ret;
}
+
+static int smu_v12_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
+ uint8_t clk_id,
+ uint8_t syspll_id,
+ uint32_t *clk_freq)
+{
+ struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
+ int ret, index;
+
+ input.clk_id = clk_id;
+ input.syspll_id = syspll_id;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+}
+
+int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_1 *v_3_1;
+ struct atom_firmware_info_v3_3 *v_3_3;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu12\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
+ break;
+ case 3:
+ case 4:
+ default:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
+ }
+
+ smu->smu_table.boot_values.format_revision = header->format_revision;
+ smu->smu_table.boot_values.content_revision = header->content_revision;
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_SOCCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.socclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL1_DCFCLK_ID,
+ (uint8_t)SMU12_SYSPLL1_ID,
+ &smu->smu_table.boot_values.dcefclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_VCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.vclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_DCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.dclk);
+
+ if ((smu->smu_table.boot_values.format_revision == 3) &&
+ (smu->smu_table.boot_values.content_revision >= 2))
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL3_0_FCLK_ID,
+ (uint8_t)SMU12_SYSPLL3_0_ID,
+ &smu->smu_table.boot_values.fclk);
+
+ smu_v12_0_atom_get_smu_clockinfo(smu->adev,
+ (uint8_t)SMU12_SYSPLL0_LCLK_ID,
+ (uint8_t)SMU12_SYSPLL0_ID,
+ &smu->smu_table.boot_values.lclk);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 9813a86ca31a..dcbe3a72da09 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -78,6 +78,8 @@
#define smnPCIE_ESM_CTRL 0x111003D0
+#define CLOCK_VALID (1 << 31)
+
static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -126,7 +128,8 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(SetExecuteDMATest, PPSMC_MSG_SetExecuteDMATest, 0),
MSG_MAP(EnableDeterminism, PPSMC_MSG_EnableDeterminism, 0),
MSG_MAP(DisableDeterminism, PPSMC_MSG_DisableDeterminism, 0),
- MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
+ MSG_MAP(SetUclkDpmMode, PPSMC_MSG_SetUclkDpmMode, 0),
+ MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
};
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -404,6 +407,9 @@ static int aldebaran_setup_pptable(struct smu_context *smu)
{
int ret = 0;
+ /* VBIOS pptable is the first choice */
+ smu->smu_table.boot_values.pp_table_id = 0;
+
ret = smu_v13_0_setup_pptable(smu);
if (ret)
return ret;
@@ -669,6 +675,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_context *dpm_context = NULL;
uint32_t display_levels;
uint32_t freq_values[3] = {0};
+ uint32_t min_clk, max_clk;
if (amdgpu_ras_intr_triggered())
return snprintf(buf, PAGE_SIZE, "unavailable\n");
@@ -696,12 +703,20 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
display_levels = clocks.num_levels;
+ min_clk = smu->gfx_actual_hard_min_freq & CLOCK_VALID ?
+ smu->gfx_actual_hard_min_freq & ~CLOCK_VALID :
+ single_dpm_table->dpm_levels[0].value;
+ max_clk = smu->gfx_actual_soft_max_freq & CLOCK_VALID ?
+ smu->gfx_actual_soft_max_freq & ~CLOCK_VALID :
+ single_dpm_table->dpm_levels[1].value;
+
+ freq_values[0] = min_clk;
+ freq_values[1] = max_clk;
+
/* fine-grained dpm has only 2 levels */
- if (now > single_dpm_table->dpm_levels[0].value &&
- now < single_dpm_table->dpm_levels[1].value) {
+ if (now > min_clk && now < max_clk) {
display_levels = clocks.num_levels + 1;
- freq_values[0] = single_dpm_table->dpm_levels[0].value;
- freq_values[2] = single_dpm_table->dpm_levels[1].value;
+ freq_values[2] = max_clk;
freq_values[1] = now;
}
@@ -711,12 +726,15 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
*/
if (display_levels == clocks.num_levels) {
for (i = 0; i < clocks.num_levels; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n", i,
- clocks.data[i].clocks_in_khz / 1000,
- (clocks.num_levels == 1) ? "*" :
+ size += sprintf(
+ buf + size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (clocks.num_levels == 1) ?
+ "*" :
(aldebaran_freqs_in_same_level(
- clocks.data[i].clocks_in_khz / 1000,
- now) ? "*" : ""));
+ freq_values[i], now) ?
+ "*" :
+ ""));
} else {
for (i = 0; i < display_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n", i,
@@ -1116,6 +1134,9 @@ static int aldebaran_set_performance_level(struct smu_context *smu,
&& (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
+ /* Reset user min/max gfx clock */
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
switch (level) {
@@ -1157,7 +1178,14 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
min_clk = max(min, dpm_context->dpm_tables.gfx_table.min);
max_clk = min(max, dpm_context->dpm_tables.gfx_table.max);
- return smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
+ ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
+ min_clk, max_clk);
+
+ if (!ret) {
+ smu->gfx_actual_hard_min_freq = min_clk | CLOCK_VALID;
+ smu->gfx_actual_soft_max_freq = max_clk | CLOCK_VALID;
+ }
+ return ret;
}
if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
@@ -1177,9 +1205,15 @@ static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnableDeterminism,
max, NULL);
- if (ret)
+ if (ret) {
dev_err(adev->dev,
"Failed to enable determinism at GFX clock %d MHz\n", max);
+ } else {
+ smu->gfx_actual_hard_min_freq =
+ min_clk | CLOCK_VALID;
+ smu->gfx_actual_soft_max_freq =
+ max | CLOCK_VALID;
+ }
}
}
@@ -1265,6 +1299,233 @@ static bool aldebaran_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
+static void aldebaran_fill_i2c_req(SwI2cRequest_t *req, bool write,
+ uint8_t address, uint32_t numbytes,
+ uint8_t *data)
+{
+ int i;
+
+ req->I2CcontrollerPort = 0;
+ req->I2CSpeed = 2;
+ req->SlaveAddress = address;
+ req->NumCmds = numbytes;
+
+ for (i = 0; i < numbytes; i++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[i];
+
+ /* First 2 bytes are always write for lower 2b EEPROM address */
+ if (i < 2)
+ cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
+ else
+ cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
+
+
+ /* Add RESTART for read after address filled */
+ cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+ /* Add STOP in the end */
+ cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+ /* Fill with data regardless if read or write to simplify code */
+ cmd->ReadWriteData = data[i];
+ }
+}
+
+static int aldebaran_i2c_read_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t i, ret = 0;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+
+ if (numbytes > MAX_SW_I2C_COMMANDS) {
+ dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+ numbytes, MAX_SW_I2C_COMMANDS);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ aldebaran_fill_i2c_req(&req, false, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ /* Now read data starting with that address */
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+ true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+ /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */
+ for (i = 0; i < numbytes; i++)
+ data[i] = res->SwI2cCmds[i].ReadWriteData;
+
+ dev_dbg(adev->dev, "aldebaran_i2c_read_data, address = %x, bytes = %d, data :",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ } else
+ dev_err(adev->dev, "aldebaran_i2c_read_data - error occurred :%x", ret);
+
+ return ret;
+}
+
+static int aldebaran_i2c_write_data(struct i2c_adapter *control,
+ uint8_t address,
+ uint8_t *data,
+ uint32_t numbytes)
+{
+ uint32_t ret;
+ SwI2cRequest_t req;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+
+ if (numbytes > MAX_SW_I2C_COMMANDS) {
+ dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+ numbytes, MAX_SW_I2C_COMMANDS);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ aldebaran_fill_i2c_req(&req, true, address, numbytes, data);
+
+ mutex_lock(&adev->smu.mutex);
+ ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+ mutex_unlock(&adev->smu.mutex);
+
+ if (!ret) {
+ dev_dbg(adev->dev, "aldebaran_i2c_write(), address = %x, bytes = %d , data: ",
+ (uint16_t)address, numbytes);
+
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+ 8, 1, data, numbytes, false);
+ /*
+ * According to EEPROM spec there is a MAX of 10 ms required for
+ * EEPROM to flush internal RX buffer after STOP was issued at the
+ * end of write transaction. During this time the EEPROM will not be
+ * responsive to any more commands - so wait a bit more.
+ */
+ msleep(10);
+
+ } else
+ dev_err(adev->dev, "aldebaran_i2c_write- error occurred :%x", ret);
+
+ return ret;
+}
+
+static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+ uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+ for (i = 0; i < num; i++) {
+ /*
+ * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+ * once and hence the data needs to be spliced into chunks and sent each
+ * chunk separately
+ */
+ data_size = msgs[i].len - 2;
+ data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+ next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+ data_ptr = msgs[i].buf + 2;
+
+ for (j = 0; j < data_size / data_chunk_size; j++) {
+ /* Insert the EEPROM dest addess, bits 0-15 */
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = aldebaran_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+
+ memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+ } else {
+
+ memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+ ret = aldebaran_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, MAX_SW_I2C_COMMANDS);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+
+ next_eeprom_addr += data_chunk_size;
+ data_ptr += data_chunk_size;
+ }
+
+ if (data_size % data_chunk_size) {
+ data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+ data_chunk[1] = (next_eeprom_addr & 0xff);
+
+ if (msgs[i].flags & I2C_M_RD) {
+ ret = aldebaran_i2c_read_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+
+ memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+ } else {
+ memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+ ret = aldebaran_i2c_write_data(i2c_adap,
+ (uint8_t)msgs[i].addr,
+ data_chunk, (data_size % data_chunk_size) + 2);
+ }
+
+ if (ret) {
+ num = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ return num;
+}
+
+static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm aldebaran_i2c_algo = {
+ .master_xfer = aldebaran_i2c_xfer,
+ .functionality = aldebaran_i2c_func,
+};
+
+static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &aldebaran_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+ res = i2c_add_adapter(control);
+ if (res)
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+ return res;
+}
+
+static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+ i2c_del_adapter(control);
+}
+
static void aldebaran_get_unique_id(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -1432,6 +1693,57 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_1);
}
+static int aldebaran_mode2_reset(struct smu_context *smu)
+{
+ u32 smu_version;
+ int ret = 0, index;
+ struct amdgpu_device *adev = smu->adev;
+ int timeout = 10;
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GfxDeviceDriverReset);
+
+ mutex_lock(&smu->message_lock);
+ if (smu_version >= 0x00441400) {
+ ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
+ /* This is similar to FLR, wait till max FLR timeout */
+ msleep(100);
+ dev_dbg(smu->adev->dev, "restore config space...\n");
+ /* Restore the config space saved during init */
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ dev_dbg(smu->adev->dev, "wait for reset ack\n");
+ while (ret == -ETIME && timeout) {
+ ret = smu_cmn_wait_for_response(smu);
+ /* Wait a bit more time for getting ACK */
+ if (ret == -ETIME) {
+ --timeout;
+ usleep_range(500, 1000);
+ continue;
+ }
+
+ if (ret != 1) {
+ dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+ SMU_RESET_MODE_2, ret);
+ goto out;
+ }
+ }
+
+ } else {
+ dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
+ smu_version);
+ }
+
+ if (ret == 1)
+ ret = 0;
+out:
+ mutex_unlock(&smu->message_lock);
+
+ return ret;
+}
+
static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
{
#if 0
@@ -1460,6 +1772,19 @@ static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
return true;
}
+static int aldebaran_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ return smu_cmn_set_mp1_state(smu, mp1_state);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs aldebaran_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
@@ -1517,7 +1842,11 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
.mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
.mode1_reset = smu_v13_0_mode1_reset,
- .mode2_reset = smu_v13_0_mode2_reset,
+ .set_mp1_state = aldebaran_set_mp1_state,
+ .mode2_reset = aldebaran_mode2_reset,
+ .wait_for_event = smu_v13_0_wait_for_event,
+ .i2c_init = aldebaran_i2c_control_init,
+ .i2c_fini = aldebaran_i2c_control_fini,
};
void aldebaran_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index bd3a9c89dc44..0864083e7435 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -72,8 +72,8 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
@@ -276,8 +276,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
void *table;
uint16_t version_major, version_minor;
- /* temporarily hardcode to use vbios pptable */
- smu->smu_table.boot_values.pp_table_id = 0;
if (amdgpu_smu_pptable_id >= 0) {
smu->smu_table.boot_values.pp_table_id = amdgpu_smu_pptable_id;
@@ -1374,19 +1372,43 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
return ret;
}
-int smu_v13_0_mode2_reset(struct smu_context *smu)
+static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
+ uint64_t event_arg)
{
- u32 smu_version;
int ret = 0;
- struct amdgpu_device *adev = smu->adev;
- smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (smu_version >= 0x00440700)
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
- else
- dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", smu_version);
- /*TODO: mode2 reset wait time should be shorter, will modify it later*/
+
+ dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
+
+ return ret;
+}
+
+int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+ uint64_t event_arg)
+{
+ int ret = -EINVAL;
+
+ switch (event) {
+ case SMU_EVENT_RESET_COMPLETE:
+ ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int smu_v13_0_mode2_reset(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ /*TODO: mode2 reset wait time should be shorter, add ASIC specific func if required */
if (!ret)
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
return ret;
}
@@ -1686,10 +1708,14 @@ int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *value)
{
- return smu_v13_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0xff,
- value);
+ int ret;
+
+ ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+ /* FW returns 0 based max level, increment by one */
+ if (!ret && value)
+ ++(*value);
+
+ return ret;
}
int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 4b45953b36d8..dc7d2e71aa6f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -76,10 +76,10 @@ static void smu_cmn_read_arg(struct smu_context *smu,
*arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
-static int smu_cmn_wait_for_response(struct smu_context *smu)
+int smu_cmn_wait_for_response(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
+ uint32_t cur_value, i, timeout = adev->usec_timeout * 20;
for (i = 0; i < timeout; i++) {
cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
@@ -780,3 +780,31 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
header->structure_size = structure_size;
}
+
+int smu_cmn_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ enum smu_message_type msg;
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_SHUTDOWN:
+ msg = SMU_MSG_PrepareMp1ForShutdown;
+ break;
+ case PP_MP1_STATE_UNLOAD:
+ msg = SMU_MSG_PrepareMp1ForUnload;
+ break;
+ case PP_MP1_STATE_RESET:
+ msg = SMU_MSG_PrepareMp1ForReset;
+ break;
+ case PP_MP1_STATE_NONE:
+ default:
+ return 0;
+ }
+
+ ret = smu_cmn_send_smc_msg(smu, msg, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index c69250185575..da6ff6f024f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -37,6 +37,8 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
enum smu_message_type msg,
uint32_t *read_arg);
+int smu_cmn_wait_for_response(struct smu_context *smu);
+
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type,
uint32_t index);
@@ -99,5 +101,8 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
+int smu_cmn_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state);
+
#endif
#endif