summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig5
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c188
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c792
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h (renamed from drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c)73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1498
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c459
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c866
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c212
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c159
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c287
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c276
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c252
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c10
-rwxr-xr-xdrivers/gpu/drm/amd/amdgpu/vce_v4_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm1384
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1267
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c75
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c112
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c17
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c167
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c59
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h79
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c259
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c78
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c1061
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h33
-rw-r--r--drivers/gpu/drm/amd/display/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/TODO3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c183
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c294
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h493
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h224
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c568
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c532
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c)353
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h)117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h159
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile21
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h45
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h14
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h52
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h2
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile21
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile22
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile22
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c182
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h35
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h24
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile22
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c13
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h60
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h186
-rw-r--r--drivers/gpu/drm/amd/scheduler/spsc_queue.h121
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c39
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h1
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c434
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h26
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c8
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c11
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c302
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h24
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c26
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c6
-rw-r--r--drivers/gpu/drm/drm_atomic.c45
-rw-r--r--drivers/gpu/drm/drm_blend.c8
-rw-r--r--drivers/gpu/drm/drm_connector.c65
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c58
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c119
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c191
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_lease.c4
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_mode_config.c8
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c3
-rw-r--r--drivers/gpu/drm/drm_syncobj.c45
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c40
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c29
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c23
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c197
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c199
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c215
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.h2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig11
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c1806
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h252
-rw-r--r--drivers/gpu/drm/exynos/regs-decon5433.h209
-rw-r--r--drivers/gpu/drm/exynos/regs-decon7.h353
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c15
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile3
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c39
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c83
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c537
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.h67
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c514
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h169
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h56
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c756
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c171
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c89
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c412
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h (renamed from drivers/gpu/drm/i915/gvt/render.h)9
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h45
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c137
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c406
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h15
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c41
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c164
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c75
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h550
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c228
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c106
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c41
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c275
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c7
-rw-r--r--drivers/gpu/drm/i915/i915_params.c44
-rw-r--r--drivers/gpu/drm/i915/i915_params.h11
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c6
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h40
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h15
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c34
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h35
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c73
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c103
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h183
-rw-r--r--drivers/gpu/drm/i915/intel_display.c240
-rw-r--r--drivers/gpu/drm/i915/intel_display.h321
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c105
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c13
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c111
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c63
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.h2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c29
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c300
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c17
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c81
-rw-r--r--drivers/gpu/drm/i915/intel_huc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c61
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c30
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h106
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c40
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h10
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c249
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h23
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c112
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c322
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c4
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c34
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c102
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c38
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c140
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c9
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c18
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c112
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c291
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c15
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h38
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c13
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c14
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h37
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c41
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c25
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c45
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h140
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.h46
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h39
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c11
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h99
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.h39
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.h37
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c4
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h4
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h2
-rw-r--r--drivers/gpu/drm/panel/Kconfig8
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c962
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c23
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h1
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c9
-rw-r--r--drivers/gpu/drm/radeon/cik.c114
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c9
-rw-r--r--drivers/gpu/drm/scheduler/Makefile26
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c (renamed from drivers/gpu/drm/amd/scheduler/gpu_scheduler.c)296
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c (renamed from drivers/gpu/drm/amd/scheduler/sched_fence.c)122
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c27
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h1
-rw-r--r--drivers/gpu/drm/stm/drv.c38
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c5
-rw-r--r--drivers/gpu/drm/stm/ltdc.c6
-rw-r--r--drivers/gpu/drm/stm/ltdc.h1
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c177
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.h12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c244
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c21
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h3
-rw-r--r--drivers/gpu/drm/tegra/drm.c13
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/fb.c14
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c51
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h2
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig10
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c37
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c7
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c3
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c3
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c215
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c125
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c5
-rw-r--r--drivers/gpu/drm/tve200/tve200_drm.h1
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c18
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c12
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c48
-rw-r--r--drivers/gpu/ipu-v3/Kconfig4
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c1
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c29
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c84
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h4
599 files changed, 23055 insertions, 14658 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d853989848d6..0bc374459440 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -154,6 +154,10 @@ config DRM_VM
bool
depends on DRM && MMU
+config DRM_SCHED
+ tristate
+ depends on DRM
+
source "drivers/gpu/drm/i2c/Kconfig"
source "drivers/gpu/drm/arm/Kconfig"
@@ -183,6 +187,7 @@ config DRM_AMDGPU
depends on DRM && PCI && MMU
select FW_LOADER
select DRM_KMS_HELPER
+ select DRM_SCHED
select DRM_TTM
select POWER_SUPPLY
select HWMON
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e5bf68b9c171..dd5ae67f8e2b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -102,3 +102,4 @@ obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
+obj-$(CONFIG_DRM_SCHED) += scheduler/
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8a08e81ee90d..d4176a3fb706 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the ACP, which is a sub-component
# of AMDSOC/AMDGPU drm driver.
# It provides the HW control for ACP related functionalities.
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 78d609123420..d6e5b7273853 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
@@ -32,7 +52,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
- amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
+ amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o \
+ amdgpu_ids.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -42,7 +63,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \
- vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o
+ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
# add GMC block
amdgpu-y += \
@@ -115,10 +136,7 @@ amdgpu-y += \
amdgpu-y += amdgpu_cgs.o
# GPU scheduler
-amdgpu-y += \
- ../scheduler/gpu_scheduler.o \
- ../scheduler/sched_fence.o \
- amdgpu_job.o
+amdgpu-y += amdgpu_job.o
# ACP componet
ifneq ($(CONFIG_DRM_AMD_ACP),)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5e2958a79928..d5a2eefd6c3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -45,6 +45,7 @@
#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
+#include <drm/gpu_scheduler.h>
#include <kgd_kfd_interface.h>
#include "dm_pp_interface.h"
@@ -68,10 +69,9 @@
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
#include "amdgpu_dm.h"
-#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
#include "amdgpu_gart.h"
-
+#include "amdgpu_debugfs.h"
/*
* Modules parameters.
@@ -126,6 +126,7 @@ extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -223,17 +224,18 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_LAST
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state);
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state);
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state);
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state);
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags);
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
#define AMDGPU_MAX_IP_NUM 16
@@ -258,15 +260,16 @@ struct amdgpu_ip_block {
const struct amdgpu_ip_block_version *version;
};
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor);
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor);
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type);
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version);
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
struct amdgpu_buffer_funcs {
@@ -346,8 +349,9 @@ struct amdgpu_gart_funcs {
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
- u64 (*get_vm_pde)(struct amdgpu_device *adev, u64 addr);
- uint32_t (*get_invalidate_req)(unsigned int vm_id);
+ void (*get_vm_pde)(struct amdgpu_device *adev, int level,
+ u64 *dst, u64 *flags);
+ uint32_t (*get_invalidate_req)(unsigned int vmid);
};
/* provided by the ih block */
@@ -373,9 +377,6 @@ struct amdgpu_dummy_page {
struct page *page;
dma_addr_t addr;
};
-int amdgpu_dummy_page_init(struct amdgpu_device *adev);
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
-
/*
* Clocks
@@ -423,7 +424,6 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
@@ -540,6 +540,7 @@ struct amdgpu_mc {
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
+ bool translate_further;
};
/*
@@ -650,12 +651,6 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
AMDGPU_DOORBELL64_INVALID = 0xFFFF
} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
-
/*
* IRQS.
*/
@@ -689,7 +684,7 @@ struct amdgpu_ib {
uint32_t flags;
};
-extern const struct amd_sched_backend_ops amdgpu_sched_ops;
+extern const struct drm_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
@@ -699,7 +694,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f);
/*
@@ -732,7 +727,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_ctx_ring {
uint64_t sequence;
struct dma_fence **fences;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
};
struct amdgpu_ctx {
@@ -746,8 +741,8 @@ struct amdgpu_ctx {
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
- enum amd_sched_priority init_priority;
- enum amd_sched_priority override_priority;
+ enum drm_sched_priority init_priority;
+ enum drm_sched_priority override_priority;
struct mutex lock;
atomic_t guilty;
};
@@ -767,7 +762,7 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -964,6 +959,7 @@ struct amdgpu_gfx_config {
};
struct amdgpu_cu_info {
+ uint32_t simd_per_cu;
uint32_t max_waves_per_simd;
uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu;
@@ -1116,7 +1112,7 @@ struct amdgpu_cs_parser {
#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
struct amdgpu_job {
- struct amd_sched_job base;
+ struct drm_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
@@ -1129,7 +1125,7 @@ struct amdgpu_job {
void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush;
- unsigned vm_id;
+ unsigned vmid;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
@@ -1170,10 +1166,10 @@ struct amdgpu_wb {
unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
};
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
-void amdgpu_get_pcie_info(struct amdgpu_device *adev);
+void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/*
* SDMA
@@ -1238,24 +1234,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/
void amdgpu_test_moves(struct amdgpu_device *adev);
-/*
- * Debugfs
- */
-struct amdgpu_debugfs {
- const struct drm_info_list *files;
- unsigned num_files;
-};
-
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles);
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
-
-#if defined(CONFIG_DEBUG_FS)
-int amdgpu_debugfs_init(struct drm_minor *minor);
-#endif
-
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
/*
* amdgpu smumgr functions
@@ -1410,9 +1388,6 @@ struct amdgpu_fw_vram_usage {
void *va;
};
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
-void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev);
-
/*
* CGS
*/
@@ -1428,6 +1403,80 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+
+/*
+ * amdgpu nbio functions
+ *
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ void (*hdp_flush)(struct amdgpu_device *adev);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u32 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*detect_hw_virt)(struct amdgpu_device *adev);
+};
+
+
+/* Define the HW IP blocks will be used in driver , add more if necessary */
+enum amd_hw_ip_block_type {
+ GC_HWIP = 1,
+ HDP_HWIP,
+ SDMA0_HWIP,
+ SDMA1_HWIP,
+ MMHUB_HWIP,
+ ATHUB_HWIP,
+ NBIO_HWIP,
+ MP0_HWIP,
+ UVD_HWIP,
+ VCN_HWIP = UVD_HWIP,
+ VCE_HWIP,
+ DF_HWIP,
+ DCE_HWIP,
+ OSSSYS_HWIP,
+ SMUIO_HWIP,
+ PWR_HWIP,
+ NBIF_HWIP,
+ MAX_HWIP
+};
+
+#define HWIP_MAX_INSTANCE 6
+
struct amd_powerplay {
struct cgs_device *cgs_device;
void *pp_handle;
@@ -1620,6 +1669,11 @@ struct amdgpu_device {
/* amdkfd interface */
struct kfd_dev *kfd;
+ /* soc15 register offset based on ip, instance and segment */
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+ const struct amdgpu_nbio_funcs *nbio_funcs;
+
/* delayed work_func for deferring clockgating during resume */
struct delayed_work late_init_work;
@@ -1785,7 +1839,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_gart_get_vm_pde(adev, addr) (adev)->gart.gart_funcs->get_vm_pde((adev), (addr))
+#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
@@ -1796,7 +1850,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
+#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -1835,23 +1889,25 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
/* Common functions */
-int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job);
-bool amdgpu_need_backup(struct amdgpu_device *adev);
-void amdgpu_pci_config_reset(struct amdgpu_device *adev);
-bool amdgpu_need_post(struct amdgpu_device *adev);
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job* job, bool force);
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc, u64 base);
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc);
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev);
void amdgpu_ttm_fini(struct amdgpu_device *adev);
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
@@ -1885,7 +1941,7 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int amdgpu_suspend(struct amdgpu_device *adev);
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index c04f44a90392..a29362f9ef41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -277,7 +277,7 @@ static int acp_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
const struct amdgpu_ip_block *ip_block =
- amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
if (!ip_block)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c70cda04dbfb..1d605e1c1d66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -93,6 +93,39 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
adev->pdev, kfd2kgd);
}
+/**
+ * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
+ * setup amdkfd
+ *
+ * @adev: amdgpu_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
+ *
+ * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
+ * takes doorbells required for its own rings and reports the setup to amdkfd.
+ * amdgpu reserved doorbells are at the start of the doorbell aperture.
+ */
+static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset)
+{
+ /*
+ * The first num_doorbells are used by amdgpu.
+ * amdkfd takes whatever's left in the aperture.
+ */
+ if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
+ *aperture_base = adev->doorbell.base;
+ *aperture_size = adev->doorbell.size;
+ *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
+ } else {
+ *aperture_base = 0;
+ *aperture_size = 0;
+ *start_offset = 0;
+ }
+}
+
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i;
@@ -242,14 +275,34 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
kfree(mem);
}
-uint64_t get_vmem_size(struct kgd_dev *kgd)
+void get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info)
{
- struct amdgpu_device *adev =
- (struct amdgpu_device *)kgd;
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
+ ~((1ULL << 32) - 1);
+ resource_size_t aper_limit = adev->mc.aper_base + adev->mc.aper_size;
+
+ memset(mem_info, 0, sizeof(*mem_info));
+ if (!(adev->mc.aper_base & address_mask || aper_limit & address_mask)) {
+ mem_info->local_mem_size_public = adev->mc.visible_vram_size;
+ mem_info->local_mem_size_private = adev->mc.real_vram_size -
+ adev->mc.visible_vram_size;
+ } else {
+ mem_info->local_mem_size_public = 0;
+ mem_info->local_mem_size_private = adev->mc.real_vram_size;
+ }
+ mem_info->vram_width = adev->mc.vram_width;
- BUG_ON(kgd == NULL);
+ pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
+ &adev->mc.aper_base, &aper_limit,
+ mem_info->local_mem_size_public,
+ mem_info->local_mem_size_private);
- return adev->mc.real_vram_size;
+ if (amdgpu_sriov_vf(adev))
+ mem_info->mem_clk_max = adev->clock.default_mclk / 100;
+ else
+ mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
}
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
@@ -265,6 +318,39 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- /* The sclk is in quantas of 10kHz */
- return adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+ /* the sclk is in quantas of 10kHz */
+ if (amdgpu_sriov_vf(adev))
+ return adev->clock.default_sclk / 100;
+
+ return amdgpu_dpm_get_sclk(adev, false) / 100;
+}
+
+void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
+
+ memset(cu_info, 0, sizeof(*cu_info));
+ if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
+ return;
+
+ cu_info->cu_active_number = acu_info.number;
+ cu_info->cu_ao_mask = acu_info.ao_cu_mask;
+ memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
+ sizeof(acu_info.bitmap));
+ cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
+ cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
+ cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
+ cu_info->simd_per_cu = acu_info.simd_per_cu;
+ cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
+ cu_info->wave_front_size = acu_info.wave_front_size;
+ cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
+ cu_info->lds_size = acu_info.lds_size;
+}
+
+uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8d689ab7e429..2a519f9062ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -56,10 +56,13 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr);
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-uint64_t get_vmem_size(struct kgd_dev *kgd);
+void get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
#define read_user_wptr(mmptr, wptr, dst) \
({ \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 1e3e9be7d77e..a9e6aea0e5f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -105,7 +105,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
@@ -166,17 +173,19 @@ static int get_tile_config(struct kgd_dev *kgd,
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
.free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
+ .get_local_mem_info = get_local_mem_info,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_vm_alloc_pasid,
- .free_pasid = amdgpu_vm_free_pasid,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
@@ -191,6 +200,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
+ .get_cu_info = get_cu_info,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -375,7 +386,44 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (35+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ DUMP_REG(reg);
+
+ release_queue(kgd);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
@@ -410,10 +458,17 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ m->sdma_rlc_rb_rptr);
+
WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
m->sdma_rlc_virtual_addr);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
@@ -423,8 +478,37 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
m->sdma_rlc_rb_rptr_addr_lo);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- m->sdma_rlc_rb_cntl);
+
+ data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
return 0;
}
@@ -575,7 +659,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -588,10 +672,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies))
return -ETIME;
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
@@ -599,6 +682,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 056929b8ccd0..b127259d7d85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -45,7 +45,7 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-struct cik_sdma_rlc_registers;
+struct vi_sdma_mqd;
/*
* Register access functions
@@ -64,7 +64,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr,
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
@@ -125,17 +132,19 @@ static int get_tile_config(struct kgd_dev *kgd,
static const struct kfd2kgd_calls kfd2kgd = {
.init_gtt_mem_allocation = alloc_gtt_mem,
.free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
+ .get_local_mem_info = get_local_mem_info,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_vm_alloc_pasid,
- .free_pasid = amdgpu_vm_free_pasid,
+ .alloc_pasid = amdgpu_pasid_alloc,
+ .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
.init_interrupts = kgd_init_interrupts,
.hqd_load = kgd_hqd_load,
.hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
.hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
@@ -152,6 +161,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
+ .get_cu_info = get_cu_info,
+ .get_vram_usage = amdgpu_amdkfd_get_vram_usage
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -268,9 +279,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
{
- return 0;
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+ pr_debug("kfd: sdma base address: 0x%x\n", retval);
+
+ return retval;
}
static inline struct vi_mqd *get_mqd(void *mqd)
@@ -278,9 +295,9 @@ static inline struct vi_mqd *get_mqd(void *mqd)
return (struct vi_mqd *)mqd;
}
-static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
{
- return (struct cik_sdma_rlc_registers *)mqd;
+ return (struct vi_sdma_mqd *)mqd;
}
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
@@ -358,8 +375,138 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_dump(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (54+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(kgd, pipe_id, queue_id);
+
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
+ DUMP_REG(reg);
+
+ release_queue(kgd);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ struct vi_sdma_mqd *m;
+ unsigned long end_jiffies;
+ uint32_t sdma_base_addr;
+ uint32_t data;
+
+ m = get_sdma_mqd(mqd);
+ sdma_base_addr = get_sdma_base_addr(m);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+ usleep_range(500, 1000);
+ }
+ if (m->sdma_engine_id) {
+ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+ } else {
+ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+ }
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdmax_rlcx_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4+2+3+7)
+
+ *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
return 0;
}
@@ -388,7 +535,7 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
+ struct vi_sdma_mqd *m;
uint32_t sdma_base_addr;
uint32_t sdma_rlc_rb_cntl;
@@ -509,10 +656,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
+ struct vi_sdma_mqd *m;
uint32_t sdma_base_addr;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@@ -523,18 +670,19 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies))
return -ETIME;
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 39f4d0df1ada..bf872f694f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
+#include "amdgpu_atomfirmware.h"
#include "amdgpu_i2c.h"
#include "atom.h"
@@ -1699,7 +1700,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
+static void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
{
uint32_t bios_2_scratch, bios_6_scratch;
@@ -1721,28 +1722,6 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
-}
-
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
-{
- int i;
-
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
-
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
-
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung)
{
@@ -1798,7 +1777,7 @@ void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
#endif
}
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
+static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
@@ -1841,3 +1820,234 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32(reg, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32(reg);
+ return r;
+}
+
+/**
+ * cail_ioreg_write - write IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a IO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+
+ WREG32_IO(reg, val);
+}
+
+/**
+ * cail_ioreg_read - read IO register
+ *
+ * @info: atom card_info pointer
+ * @reg: IO register offset
+ *
+ * Provides an IO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the IO register.
+ */
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32_IO(reg);
+ return r;
+}
+
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+
+/**
+ * amdgpu_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void amdgpu_atombios_fini(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context) {
+ kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
+ kfree(adev->mode_info.atom_context);
+ adev->mode_info.atom_context = NULL;
+ kfree(adev->mode_info.atom_card_info);
+ adev->mode_info.atom_card_info = NULL;
+ device_remove_file(adev->dev, &dev_attr_vbios_version);
+}
+
+/**
+ * amdgpu_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int amdgpu_atombios_init(struct amdgpu_device *adev)
+{
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+ int ret;
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ adev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = adev->ddev;
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ /* needed for iio ops */
+ if (adev->rio_mem) {
+ atom_card_info->ioreg_read = cail_ioreg_read;
+ atom_card_info->ioreg_write = cail_ioreg_write;
+ } else {
+ DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
+ atom_card_info->ioreg_read = cail_reg_read;
+ atom_card_info->ioreg_write = cail_reg_write;
+ }
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
+ if (!adev->mode_info.atom_context) {
+ amdgpu_atombios_fini(adev);
+ return -ENOMEM;
+ }
+
+ mutex_init(&adev->mode_info.atom_context->mutex);
+ if (adev->is_atom_fw) {
+ amdgpu_atomfirmware_scratch_regs_init(adev);
+ amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ } else {
+ amdgpu_atombios_scratch_regs_init(adev);
+ amdgpu_atombios_allocate_fb_scratch(adev);
+ }
+
+ ret = device_create_file(adev->dev, &dev_attr_vbios_version);
+ if (ret) {
+ DRM_ERROR("Failed to create device file for VBIOS version\n");
+ return ret;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index b0d5d1d7fdba..fd8f18074f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -195,9 +195,6 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
@@ -219,6 +216,7 @@ int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
u8 voltage_type,
u8 *svd_gpio_id, u8 *svc_gpio_id);
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev);
+void amdgpu_atombios_fini(struct amdgpu_device *adev);
+int amdgpu_atombios_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 057e1ecd83ce..a5df80d50d44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -93,7 +93,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMD_IS_APU))
- if (amdgpu_need_post(adev))
+ if (amdgpu_device_need_post(adev))
return false;
adev->bios = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 85d2149b9dbe..4466f3535e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -801,6 +801,11 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else
strcpy(fw_name, "amdgpu/vega10_smc.bin");
break;
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_RAVEN:
+ adev->pm.fw_version = info->version;
+ return 0;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4cea9ab237ac..5e539fc5b05f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -343,7 +343,12 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_operation_ctx ctx = { true, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .allow_reserved_eviction = false,
+ .resv = bo->tbo.resv
+ };
uint32_t domain;
int r;
@@ -1150,7 +1155,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
- struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
+ struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
unsigned i;
uint64_t seq;
@@ -1173,7 +1178,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
@@ -1202,7 +1207,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
trace_amdgpu_cs_ioctl(job);
- amd_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base, entity);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index d71dc164b469..09d35051fdd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -28,10 +28,10 @@
#include "amdgpu_sched.h"
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
/* NORMAL and below are accessible by everyone */
- if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+ if (priority <= DRM_SCHED_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
@@ -44,14 +44,14 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
- if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+ if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
@@ -78,19 +78,19 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
- ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
+ ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
- r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
+ r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs, &ctx->guilty);
if (r)
goto failed;
@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
+ drm_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
@@ -126,7 +126,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
ctx->fences = NULL;
for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(&adev->rings[i]->sched,
+ drm_sched_entity_fini(&adev->rings[i]->sched,
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
@@ -137,7 +137,7 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
- enum amd_sched_priority priority,
+ enum drm_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -266,7 +266,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@@ -278,8 +278,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (priority == AMD_SCHED_PRIORITY_INVALID)
- priority = AMD_SCHED_PRIORITY_NORMAL;
+ if (priority == DRM_SCHED_PRIORITY_INVALID)
+ priority = DRM_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -385,18 +385,18 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
struct amdgpu_device *adev = ctx->adev;
- struct amd_sched_rq *rq;
- struct amd_sched_entity *entity;
+ struct drm_sched_rq *rq;
+ struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
- enum amd_sched_priority ctx_prio;
+ enum drm_sched_priority ctx_prio;
ctx->override_priority = priority;
- ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+ ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < adev->num_rings; i++) {
@@ -407,7 +407,7 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
continue;
- amd_sched_entity_set_rq(entity, rq);
+ drm_sched_entity_set_rq(entity, rq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
new file mode 100644
index 000000000000..ee76b468774a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -0,0 +1,792 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <drm/drmP.h>
+#include <linux/debugfs.h>
+#include "amdgpu.h"
+
+/*
+ * Debugfs
+ */
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles)
+{
+ unsigned i;
+
+ for (i = 0; i < adev->debugfs_count; i++) {
+ if (adev->debugfs[i].files == files) {
+ /* Already registered */
+ return 0;
+ }
+ }
+
+ i = adev->debugfs_count + 1;
+ if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
+ DRM_ERROR("Reached maximum number of debugfs components.\n");
+ DRM_ERROR("Report so we increase "
+ "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
+ return -EINVAL;
+ }
+ adev->debugfs[adev->debugfs_count].files = files;
+ adev->debugfs[adev->debugfs_count].num_files = nfiles;
+ adev->debugfs_count = i;
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_create_files(files, nfiles,
+ adev->ddev->primary->debugfs_root,
+ adev->ddev->primary);
+#endif
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ goto end;
+
+ value = RREG32(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto end;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank;
+ unsigned instance_bank, sh_bank, se_bank;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = 1;
+ } else {
+ use_bank = 0;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
+ return -EINVAL;
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (*pos > adev->rmmio_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_PCIE(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_PCIE(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_DIDT(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_DIDT(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_SMC(*pos);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ WREG32_SMC(*pos, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 3;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = adev->cg_flags;
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
+ /* rev==3 */
+ config[no_regs++] = adev->pdev->device;
+ config[no_regs++] = adev->pdev->revision;
+ config[no_regs++] = adev->pdev->subsystem_device;
+ config[no_regs++] = adev->pdev->subsystem_vendor;
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int idx, x, outsize, r, valuesize;
+ uint32_t values[16];
+
+ if (size & 3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (amdgpu_dpm == 0)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ valuesize = sizeof(values);
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+ else
+ return -EINVAL;
+
+ if (size > valuesize)
+ return -EINVAL;
+
+ outsize = 0;
+ x = 0;
+ if (!r) {
+ while (size) {
+ r = put_user(values[x++], (int32_t *)buf);
+ buf += 4;
+ size -= 4;
+ outsize += 4;
+ }
+ }
+
+ return !r ? outsize : r;
+}
+
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result=0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ if (!x)
+ return -EINVAL;
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
+static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
+
+ data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+
+ if (bank == 0) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ while (size) {
+ uint32_t value;
+
+ value = data[offset++];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto err;
+ }
+
+ result += 4;
+ buf += 4;
+ size -= 4;
+ }
+
+err:
+ kfree(data);
+ return result;
+}
+
+static const struct file_operations amdgpu_debugfs_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_read,
+ .write = amdgpu_debugfs_regs_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_didt_read,
+ .write = amdgpu_debugfs_regs_didt_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_pcie_read,
+ .write = amdgpu_debugfs_regs_pcie_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_smc_read,
+ .write = amdgpu_debugfs_regs_smc_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_gpr_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gpr_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations *debugfs_regs[] = {
+ &amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs_didt_fops,
+ &amdgpu_debugfs_regs_pcie_fops,
+ &amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
+ &amdgpu_debugfs_gpr_fops,
+};
+
+static const char *debugfs_regs_names[] = {
+ "amdgpu_regs",
+ "amdgpu_regs_didt",
+ "amdgpu_regs_pcie",
+ "amdgpu_regs_smc",
+ "amdgpu_gca_config",
+ "amdgpu_sensors",
+ "amdgpu_wave",
+ "amdgpu_gpr",
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ unsigned i, j;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ ent = debugfs_create_file(debugfs_regs_names[i],
+ S_IFREG | S_IRUGO, root,
+ adev, debugfs_regs[i]);
+ if (IS_ERR(ent)) {
+ for (j = 0; j < i; j++) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ return PTR_ERR(ent);
+ }
+
+ if (!i)
+ i_size_write(ent->d_inode, adev->rmmio_size);
+ adev->debugfs_regs[i] = ent;
+ }
+
+ return 0;
+}
+
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ if (adev->debugfs_regs[i]) {
+ debugfs_remove(adev->debugfs_regs[i]);
+ adev->debugfs_regs[i] = NULL;
+ }
+ }
+}
+
+static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0, i;
+
+ /* hold on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_park(ring->sched.thread);
+ }
+
+ seq_printf(m, "run ib test:\n");
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ seq_printf(m, "ib ring tests failed (%d).\n", r);
+ else
+ seq_printf(m, "ib ring tests passed.\n");
+
+ /* go on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->sched.thread)
+ continue;
+ kthread_unpark(ring->sched.thread);
+ }
+
+ return 0;
+}
+
+static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_write(m, adev->bios, adev->bios_size);
+ return 0;
+}
+
+static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_debugfs_list[] = {
+ {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
+ {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
+ {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram}
+};
+
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
+ ARRAY_SIZE(amdgpu_debugfs_list));
+}
+
+#else
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 147822545252..8260d8073c26 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -1,5 +1,7 @@
/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,57 +21,22 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#include "dm_services.h"
-#include "include/grph_object_id.h"
-
-static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
-{
- bool rc = true;
-
- switch (id.type) {
- case OBJECT_TYPE_UNKNOWN:
- rc = false;
- break;
- case OBJECT_TYPE_GPU:
- case OBJECT_TYPE_ENGINE:
- /* do NOT check for id.id == 0 */
- if (id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- default:
- if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
- rc = false;
- break;
- }
-
- return rc;
-}
-
-bool dal_graphics_object_id_is_equal(
- struct graphics_object_id id1,
- struct graphics_object_id id2)
-{
- if (false == dal_graphics_object_id_is_valid(id1)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id1'!\n", __func__);
- return false;
- }
-
- if (false == dal_graphics_object_id_is_valid(id2)) {
- dm_output_to_console(
- "%s: Warning: comparing invalid object 'id2'!\n", __func__);
- return false;
- }
-
- if (id1.id == id2.id && id1.enum_id == id2.enum_id
- && id1.type == id2.type)
- return true;
-
- return false;
-}
-
-
+/*
+ * Debugfs
+ */
+struct amdgpu_debugfs {
+ const struct drm_info_list *files;
+ unsigned num_files;
+};
+
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
+int amdgpu_debugfs_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
+ const struct drm_info_list *files,
+ unsigned nfiles);
+int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 70c9e5756b02..357cd8bf2e55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -28,7 +28,6 @@
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
-#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -64,11 +63,6 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
-
static const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
@@ -333,7 +327,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
BUG();
}
-static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
+static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
{
return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
@@ -342,13 +336,13 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
(void **)&adev->vram_scratch.ptr);
}
-static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
+static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
}
/**
- * amdgpu_program_register_sequence - program an array of registers.
+ * amdgpu_device_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @registers: pointer to the register array
@@ -357,9 +351,9 @@ static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
* Programs an array or registers with and and or masks.
* This is a helper for setting golden registers.
*/
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
- const u32 *registers,
- const u32 array_size)
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 *registers,
+ const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
@@ -383,7 +377,7 @@ void amdgpu_program_register_sequence(struct amdgpu_device *adev,
}
}
-void amdgpu_pci_config_reset(struct amdgpu_device *adev)
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
{
pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
@@ -392,14 +386,14 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
* GPU doorbell aperture helpers function.
*/
/**
- * amdgpu_doorbell_init - Init doorbell driver information.
+ * amdgpu_device_doorbell_init - Init doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Init doorbell driver information (CIK)
* Returns 0 on success, error on failure.
*/
-static int amdgpu_doorbell_init(struct amdgpu_device *adev)
+static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
@@ -432,66 +426,35 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
*
* @adev: amdgpu_device pointer
*
* Tear down doorbell driver information (CIK)
*/
-static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
{
iounmap(adev->doorbell.ptr);
adev->doorbell.ptr = NULL;
}
-/**
- * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup amdkfd
- *
- * @adev: amdgpu_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
- *
- * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
- * takes doorbells required for its own rings and reports the setup to amdkfd.
- * amdgpu reserved doorbells are at the start of the doorbell aperture.
- */
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /*
- * The first num_doorbells are used by amdgpu.
- * amdkfd takes whatever's left in the aperture.
- */
- if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = adev->doorbell.base;
- *aperture_size = adev->doorbell.size;
- *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
+
/*
- * amdgpu_wb_*()
+ * amdgpu_device_wb_*()
* Writeback is the method by which the GPU updates special pages in memory
* with the status of certain GPU events (fences, ring pointers,etc.).
*/
/**
- * amdgpu_wb_fini - Disable Writeback and free memory
+ * amdgpu_device_wb_fini - Disable Writeback and free memory
*
* @adev: amdgpu_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
-static void amdgpu_wb_fini(struct amdgpu_device *adev)
+static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
@@ -502,7 +465,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_init- Init Writeback driver info and allocate memory
+ * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
@@ -510,7 +473,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
-static int amdgpu_wb_init(struct amdgpu_device *adev)
+static int amdgpu_device_wb_init(struct amdgpu_device *adev)
{
int r;
@@ -536,7 +499,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_get - Allocate a wb entry
+ * amdgpu_device_wb_get - Allocate a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
@@ -544,7 +507,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
* Allocate a wb slot for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
@@ -558,21 +521,21 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
}
/**
- * amdgpu_wb_free - Free a wb entry
+ * amdgpu_device_wb_free - Free a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
*
* Free a wb slot allocated for use by the driver (all asics)
*/
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
if (wb < adev->wb.num_wb)
__clear_bit(wb >> 3, adev->wb.used);
}
/**
- * amdgpu_vram_location - try to find VRAM location
+ * amdgpu_device_vram_location - try to find VRAM location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
* @base: base address at which to put VRAM
@@ -580,7 +543,8 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
* Function will try to place VRAM at base address provided
* as parameter.
*/
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
+void amdgpu_device_vram_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc, u64 base)
{
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
@@ -594,7 +558,7 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
}
/**
- * amdgpu_gart_location - try to find GTT location
+ * amdgpu_device_gart_location - try to find GTT location
* @adev: amdgpu device structure holding all necessary informations
* @mc: memory controller structure holding memory informations
*
@@ -605,7 +569,8 @@ void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64
*
* FIXME: when reducing GTT size align new size on power of 2.
*/
-void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
+void amdgpu_device_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_mc *mc)
{
u64 size_af, size_bf;
@@ -632,101 +597,6 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
-/*
- * Firmware Reservation functions
- */
-/**
- * amdgpu_fw_reserve_vram_fini - free fw reserved vram
- *
- * @adev: amdgpu_device pointer
- *
- * free fw reserved vram if it has been reserved.
- */
-void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
- NULL, &adev->fw_vram_usage.va);
-}
-
-/**
- * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
- *
- * @adev: amdgpu_device pointer
- *
- * create bo vram reservation from fw.
- */
-int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
-{
- struct ttm_operation_ctx ctx = { false, false };
- int r = 0;
- int i;
- u64 vram_size = adev->mc.visible_vram_size;
- u64 offset = adev->fw_vram_usage.start_offset;
- u64 size = adev->fw_vram_usage.size;
- struct amdgpu_bo *bo;
-
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
-
- if (adev->fw_vram_usage.size > 0 &&
- adev->fw_vram_usage.size <= vram_size) {
-
- r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
- &adev->fw_vram_usage.reserved_bo);
- if (r)
- goto error_create;
-
- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
- if (r)
- goto error_reserve;
-
- /* remove the original mem node and create a new one at the
- * request position
- */
- bo = adev->fw_vram_usage.reserved_bo;
- offset = ALIGN(offset, PAGE_SIZE);
- for (i = 0; i < bo->placement.num_placement; ++i) {
- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
- }
-
- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
- &bo->tbo.mem, &ctx);
- if (r)
- goto error_pin;
-
- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
- AMDGPU_GEM_DOMAIN_VRAM,
- adev->fw_vram_usage.start_offset,
- (adev->fw_vram_usage.start_offset +
- adev->fw_vram_usage.size), NULL);
- if (r)
- goto error_pin;
- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
- &adev->fw_vram_usage.va);
- if (r)
- goto error_kmap;
-
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
- }
- return r;
-
-error_kmap:
- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
-error_pin:
- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
-error_reserve:
- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
-error_create:
- adev->fw_vram_usage.va = NULL;
- adev->fw_vram_usage.reserved_bo = NULL;
- return r;
-}
-
/**
* amdgpu_device_resize_fb_bar - try to resize FB BAR
*
@@ -771,7 +641,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
cmd & ~PCI_COMMAND_MEMORY);
/* Free the VRAM and doorbell BAR, we most likely need to move both. */
- amdgpu_doorbell_fini(adev);
+ amdgpu_device_doorbell_fini(adev);
if (adev->asic_type >= CHIP_BONAIRE)
pci_release_resource(adev->pdev, 2);
@@ -788,7 +658,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
*/
- r = amdgpu_doorbell_init(adev);
+ r = amdgpu_device_doorbell_init(adev);
if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
return -ENODEV;
@@ -801,7 +671,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
* GPU helpers function.
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_device_need_post - check if the hw need post or not
*
* @adev: amdgpu_device pointer
*
@@ -809,7 +679,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
* or post is needed if hw reset is performed.
* Returns true if need or false if not.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -854,285 +724,9 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
return true;
}
-/**
- * amdgpu_dummy_page_init - init dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page used by the driver (all asics).
- * This dummy page is used by the driver as a filler for gart entries
- * when pages are taken out of the GART
- * Returns 0 on sucess, -ENOMEM on failure.
- */
-int amdgpu_dummy_page_init(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page)
- return 0;
- adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
- if (adev->dummy_page.page == NULL)
- return -ENOMEM;
- adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
- dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * amdgpu_dummy_page_fini - free dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the dummy page used by the driver (all asics).
- */
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page == NULL)
- return;
- pci_unmap_page(adev->pdev, adev->dummy_page.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
-}
-
-
-/* ATOM accessor methods */
-/*
- * ATOM is an interpreted byte code stored in tables in the vbios. The
- * driver registers callbacks to access registers and the interpreter
- * in the driver parses the tables and executes then to program specific
- * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
- * atombios.h, and atom.c
- */
-
-/**
- * cail_pll_read - read PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- * Returns the value of the PLL register.
- */
-static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_pll_write - write PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- * @val: value to write to the pll register
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- */
-static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_mc_read - read MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- *
- * Provides an MC register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MC register.
- */
-static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
-
-/**
- * cail_mc_write - write MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- * @val: value to write to the pll register
- *
- * Provides a MC register accessor for the atom interpreter (r4xx+).
- */
-static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
-
-}
-
-/**
- * cail_reg_write - write MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- * @val: value to write to the pll register
- *
- * Provides a MMIO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32(reg, val);
-}
-
-/**
- * cail_reg_read - read MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- *
- * Provides an MMIO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MMIO register.
- */
-static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32(reg);
- return r;
-}
-
-/**
- * cail_ioreg_write - write IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- * @val: value to write to the pll register
- *
- * Provides a IO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
-
- WREG32_IO(reg, val);
-}
-
-/**
- * cail_ioreg_read - read IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- *
- * Provides an IO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the IO register.
- */
-static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32_IO(reg);
- return r;
-}
-
-static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct atom_context *ctx = adev->mode_info.atom_context;
-
- return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
-}
-
-static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
- NULL);
-
-/**
- * amdgpu_atombios_fini - free the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the driver info and register access callbacks for the ATOM
- * interpreter (r4xx+).
- * Called at driver shutdown.
- */
-static void amdgpu_atombios_fini(struct amdgpu_device *adev)
-{
- if (adev->mode_info.atom_context) {
- kfree(adev->mode_info.atom_context->scratch);
- kfree(adev->mode_info.atom_context->iio);
- }
- kfree(adev->mode_info.atom_context);
- adev->mode_info.atom_context = NULL;
- kfree(adev->mode_info.atom_card_info);
- adev->mode_info.atom_card_info = NULL;
- device_remove_file(adev->dev, &dev_attr_vbios_version);
-}
-
-/**
- * amdgpu_atombios_init - init the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Initializes the driver info and register access callbacks for the
- * ATOM interpreter (r4xx+).
- * Returns 0 on sucess, -ENOMEM on failure.
- * Called at driver startup.
- */
-static int amdgpu_atombios_init(struct amdgpu_device *adev)
-{
- struct card_info *atom_card_info =
- kzalloc(sizeof(struct card_info), GFP_KERNEL);
- int ret;
-
- if (!atom_card_info)
- return -ENOMEM;
-
- adev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = adev->ddev;
- atom_card_info->reg_read = cail_reg_read;
- atom_card_info->reg_write = cail_reg_write;
- /* needed for iio ops */
- if (adev->rio_mem) {
- atom_card_info->ioreg_read = cail_ioreg_read;
- atom_card_info->ioreg_write = cail_ioreg_write;
- } else {
- DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
- atom_card_info->ioreg_read = cail_reg_read;
- atom_card_info->ioreg_write = cail_reg_write;
- }
- atom_card_info->mc_read = cail_mc_read;
- atom_card_info->mc_write = cail_mc_write;
- atom_card_info->pll_read = cail_pll_read;
- atom_card_info->pll_write = cail_pll_write;
-
- adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
- if (!adev->mode_info.atom_context) {
- amdgpu_atombios_fini(adev);
- return -ENOMEM;
- }
-
- mutex_init(&adev->mode_info.atom_context->mutex);
- if (adev->is_atom_fw) {
- amdgpu_atomfirmware_scratch_regs_init(adev);
- amdgpu_atomfirmware_allocate_fb_scratch(adev);
- } else {
- amdgpu_atombios_scratch_regs_init(adev);
- amdgpu_atombios_allocate_fb_scratch(adev);
- }
-
- ret = device_create_file(adev->dev, &dev_attr_vbios_version);
- if (ret) {
- DRM_ERROR("Failed to create device file for VBIOS version\n");
- return ret;
- }
-
- return 0;
-}
-
/* if we get transitioned to only one device, take VGA back */
/**
- * amdgpu_vga_set_decode - enable/disable vga decode
+ * amdgpu_device_vga_set_decode - enable/disable vga decode
*
* @cookie: amdgpu_device pointer
* @state: enable/disable vga decode
@@ -1140,7 +734,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
-static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
+static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
{
struct amdgpu_device *adev = cookie;
amdgpu_asic_set_vga_state(adev, state);
@@ -1151,7 +745,7 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-static void amdgpu_check_block_size(struct amdgpu_device *adev)
+static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
@@ -1166,7 +760,7 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev)
}
}
-static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
{
/* no need to check the default value */
if (amdgpu_vm_size == -1)
@@ -1180,14 +774,14 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev)
}
/**
- * amdgpu_check_arguments - validate module params
+ * amdgpu_device_check_arguments - validate module params
*
* @adev: amdgpu_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
-static void amdgpu_check_arguments(struct amdgpu_device *adev)
+static void amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
@@ -1220,9 +814,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vm_fragment_size = -1;
}
- amdgpu_check_vm_size(adev);
+ amdgpu_device_check_vm_size(adev);
- amdgpu_check_block_size(adev);
+ amdgpu_device_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!is_power_of_2(amdgpu_vram_page_split))) {
@@ -1230,6 +824,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_vram_page_split);
amdgpu_vram_page_split = 1024;
}
+
+ if (amdgpu_lockup_timeout == 0) {
+ dev_warn(adev->dev, "lockup_timeout msut be > 0, adjusting to 10000\n");
+ amdgpu_lockup_timeout = 10000;
+ }
}
/**
@@ -1293,9 +892,9 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
.can_switch = amdgpu_switcheroo_can_switch,
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state)
+int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state)
{
int i, r = 0;
@@ -1315,9 +914,9 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
return r;
}
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state)
+int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state)
{
int i, r = 0;
@@ -1337,7 +936,8 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
return r;
}
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
{
int i;
@@ -1349,8 +949,8 @@ void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
}
}
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i, r;
@@ -1368,8 +968,8 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
}
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
@@ -1383,8 +983,9 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
}
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
@@ -1396,7 +997,7 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_version_cmp
+ * amdgpu_device_ip_block_version_cmp
*
* @adev: amdgpu_device pointer
* @type: enum amd_ip_block_type
@@ -1406,11 +1007,11 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
* return 0 if equal or greater
* return 1 if smaller or the ip_block doesn't exist
*/
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor)
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor)
{
- struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
@@ -1421,7 +1022,7 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_add
+ * amdgpu_device_ip_block_add
*
* @adev: amdgpu_device pointer
* @ip_block_version: pointer to the IP to add
@@ -1429,8 +1030,8 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
* Adds the IP block driver information to the collection of IPs
* on the asic.
*/
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version)
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
@@ -1586,7 +1187,7 @@ out:
return err;
}
-static int amdgpu_early_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1695,7 +1296,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
@@ -1711,7 +1312,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- r = amdgpu_vram_scratch_init(adev);
+ r = amdgpu_device_vram_scratch_init(adev);
if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r;
@@ -1721,9 +1322,9 @@ static int amdgpu_init(struct amdgpu_device *adev)
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r;
}
- r = amdgpu_wb_init(adev);
+ r = amdgpu_device_wb_init(adev);
if (r) {
- DRM_ERROR("amdgpu_wb_init failed %d\n", r);
+ DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
return r;
}
adev->ip_blocks[i].status.hw = true;
@@ -1762,18 +1363,18 @@ static int amdgpu_init(struct amdgpu_device *adev)
return 0;
}
-static void amdgpu_fill_reset_magic(struct amdgpu_device *adev)
+static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
{
memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
}
-static bool amdgpu_check_vram_lost(struct amdgpu_device *adev)
+static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
return !!memcmp(adev->gart.ptr, adev->reset_magic,
AMDGPU_RESET_MAGIC_NUM);
}
-static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
+static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
{
int i = 0, r;
@@ -1796,7 +1397,7 @@ static int amdgpu_late_set_cg_state(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_late_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
{
int i = 0, r;
@@ -1817,12 +1418,12 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
mod_delayed_work(system_wq, &adev->late_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- amdgpu_fill_reset_magic(adev);
+ amdgpu_device_fill_reset_magic(adev);
return 0;
}
-static int amdgpu_fini(struct amdgpu_device *adev)
+static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
@@ -1856,8 +1457,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_free_static_csa(adev);
- amdgpu_wb_fini(adev);
- amdgpu_vram_scratch_fini(adev);
+ amdgpu_device_wb_fini(adev);
+ amdgpu_device_vram_scratch_fini(adev);
}
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -1910,14 +1511,14 @@ static int amdgpu_fini(struct amdgpu_device *adev)
return 0;
}
-static void amdgpu_late_init_func_handler(struct work_struct *work)
+static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, late_init_work.work);
- amdgpu_late_set_cg_state(adev);
+ amdgpu_device_ip_late_set_cg_state(adev);
}
-int amdgpu_suspend(struct amdgpu_device *adev)
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int i, r;
@@ -1925,10 +1526,10 @@ int amdgpu_suspend(struct amdgpu_device *adev)
amdgpu_virt_request_full_gpu(adev, false);
/* ungate SMC block first */
- r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
+ r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
+ AMD_CG_STATE_UNGATE);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
+ DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r);
}
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
@@ -1958,7 +1559,7 @@ int amdgpu_suspend(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -1987,7 +1588,7 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -2020,7 +1621,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume_phase1(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
{
int i, r;
@@ -2043,7 +1644,7 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume_phase2(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
{
int i, r;
@@ -2065,14 +1666,14 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_resume(struct amdgpu_device *adev)
+static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
{
int r;
- r = amdgpu_resume_phase1(adev);
+ r = amdgpu_device_ip_resume_phase1(adev);
if (r)
return r;
- r = amdgpu_resume_phase2(adev);
+ r = amdgpu_device_ip_resume_phase2(adev);
return r;
}
@@ -2211,7 +1812,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
- amdgpu_check_arguments(adev);
+ amdgpu_device_check_arguments(adev);
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->smc_idx_lock);
@@ -2229,7 +1830,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->ring_lru_list);
spin_lock_init(&adev->ring_lru_list_lock);
- INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
+ INIT_DELAYED_WORK(&adev->late_init_work,
+ amdgpu_device_ip_late_init_func_handler);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -2249,7 +1851,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
/* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ amdgpu_device_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2263,14 +1865,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("PCI I/O BAR is not found.\n");
/* early init functions */
- r = amdgpu_early_init(adev);
+ r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
- vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
+ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
if (amdgpu_runtime_pm == 1)
runtime = true;
@@ -2299,7 +1901,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
@@ -2345,7 +1947,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* init the mode config */
drm_mode_config_init(adev->ddev);
- r = amdgpu_init(adev);
+ r = amdgpu_device_ip_init(adev);
if (r) {
/* failed in exclusive mode due to timeout */
if (amdgpu_sriov_vf(adev) &&
@@ -2359,9 +1961,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = -EAGAIN;
goto failed;
}
- dev_err(adev->dev, "amdgpu_init failed\n");
+ dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- amdgpu_fini(adev);
+ amdgpu_device_ip_fini(adev);
goto failed;
}
@@ -2397,7 +1999,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
- r = amdgpu_gem_debugfs_init(adev);
+ r = amdgpu_debugfs_gem_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@@ -2405,17 +2007,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_test_ib_ring_init(adev);
- if (r)
- DRM_ERROR("registering register test ib ring debugfs failed (%d).\n", r);
-
r = amdgpu_debugfs_firmware_init(adev);
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_vbios_dump_init(adev);
+ r = amdgpu_debugfs_init(adev);
if (r)
- DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
+ DRM_ERROR("Creating debugfs files failed (%d).\n", r);
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
@@ -2433,9 +2031,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_late_init failed\n");
+ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@@ -2466,12 +2064,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->shutdown = true;
if (adev->mode_info.mode_config_initialized)
drm_crtc_force_disable_all(adev->ddev);
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+
amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
- r = amdgpu_fini(adev);
+ r = amdgpu_device_ip_fini(adev);
if (adev->firmware.gpu_info_fw) {
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
@@ -2494,7 +2091,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- amdgpu_doorbell_fini(adev);
+ amdgpu_device_doorbell_fini(adev);
amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2575,7 +2172,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2583,7 +2180,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
*/
amdgpu_bo_evict_vram(adev);
- amdgpu_atombios_scratch_regs_save(adev);
pci_save_state(dev->pdev);
if (suspend) {
/* Shut down the device */
@@ -2632,18 +2228,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
goto unlock;
}
- amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
- if (amdgpu_need_post(adev)) {
+ if (amdgpu_device_need_post(adev)) {
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r)
DRM_ERROR("amdgpu asic init failed\n");
}
- r = amdgpu_resume(adev);
+ r = amdgpu_device_ip_resume(adev);
if (r) {
- DRM_ERROR("amdgpu_resume failed (%d).\n", r);
+ DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
goto unlock;
}
amdgpu_fence_driver_resume(adev);
@@ -2654,7 +2249,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
DRM_ERROR("ib ring test failed (%d).\n", r);
}
- r = amdgpu_late_init(adev);
+ r = amdgpu_device_ip_late_init(adev);
if (r)
goto unlock;
@@ -2734,7 +2329,7 @@ unlock:
return r;
}
-static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
{
int i;
bool asic_hang = false;
@@ -2756,7 +2351,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
return asic_hang;
}
-static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2774,7 +2369,7 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
{
int i;
@@ -2795,7 +2390,7 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
return false;
}
-static int amdgpu_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2813,7 +2408,7 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2830,18 +2425,10 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
return 0;
}
-bool amdgpu_need_backup(struct amdgpu_device *adev)
-{
- if (adev->flags & AMD_IS_APU)
- return false;
-
- return amdgpu_lockup_timeout > 0 ? true : false;
-}
-
-static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_bo *bo,
+ struct dma_fence **fence)
{
uint32_t domain;
int r;
@@ -2874,7 +2461,7 @@ err:
}
/*
- * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough
+ * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
*
* @adev: amdgpu device pointer
* @reset_flags: output param tells caller the reset result
@@ -2882,18 +2469,19 @@ err:
* attempt to do soft-reset or full-reset and reinitialize Asic
* return 0 means successed otherwise failed
*/
-static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
+static int amdgpu_device_reset(struct amdgpu_device *adev,
+ uint64_t* reset_flags)
{
bool need_full_reset, vram_lost = 0;
int r;
- need_full_reset = amdgpu_need_full_reset(adev);
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
if (!need_full_reset) {
- amdgpu_pre_soft_reset(adev);
- r = amdgpu_soft_reset(adev);
- amdgpu_post_soft_reset(adev);
- if (r || amdgpu_check_soft_reset(adev)) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("soft reset failed, will fallback to full reset!\n");
need_full_reset = true;
}
@@ -2901,22 +2489,20 @@ static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags)
}
if (need_full_reset) {
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
retry:
- amdgpu_atombios_scratch_regs_save(adev);
r = amdgpu_asic_reset(adev);
- amdgpu_atombios_scratch_regs_restore(adev);
/* post card */
amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (!r) {
dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume_phase1(adev);
+ r = amdgpu_device_ip_resume_phase1(adev);
if (r)
goto out;
- vram_lost = amdgpu_check_vram_lost(adev);
+ vram_lost = amdgpu_device_check_vram_lost(adev);
if (vram_lost) {
DRM_ERROR("VRAM is lost!\n");
atomic_inc(&adev->vram_lost_counter);
@@ -2927,12 +2513,12 @@ retry:
if (r)
goto out;
- r = amdgpu_resume_phase2(adev);
+ r = amdgpu_device_ip_resume_phase2(adev);
if (r)
goto out;
if (vram_lost)
- amdgpu_fill_reset_magic(adev);
+ amdgpu_device_fill_reset_magic(adev);
}
}
@@ -2942,7 +2528,7 @@ out:
r = amdgpu_ib_ring_tests(adev);
if (r) {
dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_suspend(adev);
+ r = amdgpu_device_ip_suspend(adev);
need_full_reset = true;
goto retry;
}
@@ -2960,7 +2546,7 @@ out:
}
/*
- * amdgpu_reset_sriov - reset ASIC for SR-IOV vf
+ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
*
* @adev: amdgpu device pointer
* @reset_flags: output param tells caller the reset result
@@ -2968,7 +2554,9 @@ out:
* do VF FLR and reinitialize Asic
* return 0 means successed otherwise failed
*/
-static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor)
+static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ uint64_t *reset_flags,
+ bool from_hypervisor)
{
int r;
@@ -2980,7 +2568,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
return r;
/* Resume IP prior to SMC */
- r = amdgpu_sriov_reinit_early(adev);
+ r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
goto error;
@@ -2988,7 +2576,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags,
amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
/* now we are okay to resume SMC/CP/SDMA */
- r = amdgpu_sriov_reinit_late(adev);
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
if (r)
goto error;
@@ -3015,25 +2603,33 @@ error:
}
/**
- * amdgpu_gpu_recover - reset the asic and recover scheduler
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
* @adev: amdgpu device pointer
* @job: which job trigger hang
+ * @force forces reset regardless of amdgpu_gpu_recovery
*
* Attempt to reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
-int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job, bool force)
{
struct drm_atomic_state *state = NULL;
uint64_t reset_flags = 0;
int i, r, resched;
- if (!amdgpu_check_soft_reset(adev)) {
+ if (!amdgpu_device_ip_check_soft_reset(adev)) {
DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
return 0;
}
+ if (!force && (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))) {
+ DRM_INFO("GPU recovery disabled.\n");
+ return 0;
+ }
+
dev_info(adev->dev, "GPU reset begin!\n");
mutex_lock(&adev->lock_reset);
@@ -3058,16 +2654,16 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
continue;
kthread_park(ring->sched.thread);
- amd_sched_hw_job_reset(&ring->sched, &job->base);
+ drm_sched_hw_job_reset(&ring->sched, &job->base);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
if (amdgpu_sriov_vf(adev))
- r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true);
+ r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true);
else
- r = amdgpu_reset(adev, &reset_flags);
+ r = amdgpu_device_reset(adev, &reset_flags);
if (!r) {
if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) ||
@@ -3080,7 +2676,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->shadow_list_lock);
list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
+ amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next);
if (fence) {
r = dma_fence_wait(fence, false);
if (r) {
@@ -3111,7 +2707,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
if (job && job->ring->idx != i)
continue;
- amd_sched_job_recovery(&ring->sched);
+ drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
}
} else {
@@ -3153,7 +2749,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job)
return r;
}
-void amdgpu_get_pcie_info(struct amdgpu_device *adev)
+void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
@@ -3245,773 +2841,3 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
}
}
-/*
- * Debugfs
- */
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles)
-{
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- if (adev->debugfs[i].files == files) {
- /* Already registered */
- return 0;
- }
- }
-
- i = adev->debugfs_count + 1;
- if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
- }
- adev->debugfs[adev->debugfs_count].files = files;
- adev->debugfs[adev->debugfs_count].num_files = nfiles;
- adev->debugfs_count = i;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_create_files(files, nfiles,
- adev->ddev->primary->debugfs_root,
- adev->ddev->primary);
-#endif
- return 0;
-}
-
-#if defined(CONFIG_DEBUG_FS)
-
-static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- goto end;
-
- value = RREG32(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto end;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
-end:
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
- sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
- instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
-
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
-
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
-
- while (size) {
- uint32_t value;
-
- if (*pos > adev->rmmio_size)
- return result;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
-
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_PCIE(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_PCIE(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_DIDT(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_DIDT(*pos >> 2, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- value = RREG32_SMC(*pos);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_SMC(*pos, value);
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- uint32_t *config, no_regs = 0;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
-
- /* version, increment each time something is added */
- config[no_regs++] = 3;
- config[no_regs++] = adev->gfx.config.max_shader_engines;
- config[no_regs++] = adev->gfx.config.max_tile_pipes;
- config[no_regs++] = adev->gfx.config.max_cu_per_sh;
- config[no_regs++] = adev->gfx.config.max_sh_per_se;
- config[no_regs++] = adev->gfx.config.max_backends_per_se;
- config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
- config[no_regs++] = adev->gfx.config.max_gprs;
- config[no_regs++] = adev->gfx.config.max_gs_threads;
- config[no_regs++] = adev->gfx.config.max_hw_contexts;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
- config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.num_tile_pipes;
- config[no_regs++] = adev->gfx.config.backend_enable_mask;
- config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
- config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
- config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
- config[no_regs++] = adev->gfx.config.num_gpus;
- config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
- config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
- config[no_regs++] = adev->gfx.config.gb_addr_config;
- config[no_regs++] = adev->gfx.config.num_rbs;
-
- /* rev==1 */
- config[no_regs++] = adev->rev_id;
- config[no_regs++] = adev->pg_flags;
- config[no_regs++] = adev->cg_flags;
-
- /* rev==2 */
- config[no_regs++] = adev->family;
- config[no_regs++] = adev->external_rev_id;
-
- /* rev==3 */
- config[no_regs++] = adev->pdev->device;
- config[no_regs++] = adev->pdev->revision;
- config[no_regs++] = adev->pdev->subsystem_device;
- config[no_regs++] = adev->pdev->subsystem_vendor;
-
- while (size && (*pos < no_regs * 4)) {
- uint32_t value;
-
- value = config[*pos >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- kfree(config);
- return r;
- }
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- kfree(config);
- return result;
-}
-
-static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- int idx, x, outsize, r, valuesize;
- uint32_t values[16];
-
- if (size & 3 || *pos & 0x3)
- return -EINVAL;
-
- if (amdgpu_dpm == 0)
- return -EINVAL;
-
- /* convert offset to sensor number */
- idx = *pos >> 2;
-
- valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- else
- return -EINVAL;
-
- if (size > valuesize)
- return -EINVAL;
-
- outsize = 0;
- x = 0;
- if (!r) {
- while (size) {
- r = put_user(values[x++], (int32_t *)buf);
- buf += 4;
- size -= 4;
- outsize += 4;
- }
- }
-
- return !r ? outsize : r;
-}
-
-static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r, x;
- ssize_t result=0;
- uint32_t offset, se, sh, cu, wave, simd, data[32];
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = (*pos & GENMASK_ULL(6, 0));
- se = (*pos & GENMASK_ULL(14, 7)) >> 7;
- sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
- cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
- wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
- simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- x = 0;
- if (adev->gfx.funcs->read_wave_data)
- adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- if (!x)
- return -EINVAL;
-
- while (size && (offset < x * 4)) {
- uint32_t value;
-
- value = data[offset >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- offset += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r;
- ssize_t result = 0;
- uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
-
- if (size & 3 || *pos & 3)
- return -EINVAL;
-
- /* decode offset */
- offset = *pos & GENMASK_ULL(11, 0);
- se = (*pos & GENMASK_ULL(19, 12)) >> 12;
- sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
- cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
- wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
- simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
- thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
- bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
-
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
-
- if (bank == 0) {
- if (adev->gfx.funcs->read_wave_vgprs)
- adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
- } else {
- if (adev->gfx.funcs->read_wave_sgprs)
- adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
- }
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
-
- while (size) {
- uint32_t value;
-
- value = data[offset++];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto err;
- }
-
- result += 4;
- buf += 4;
- size -= 4;
- }
-
-err:
- kfree(data);
- return result;
-}
-
-static const struct file_operations amdgpu_debugfs_regs_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_read,
- .write = amdgpu_debugfs_regs_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_didt_read,
- .write = amdgpu_debugfs_regs_didt_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_pcie_read,
- .write = amdgpu_debugfs_regs_pcie_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_smc_read,
- .write = amdgpu_debugfs_regs_smc_write,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_gca_config_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gca_config_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_sensors_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_sensor_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_wave_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_wave_read,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_gpr_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gpr_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations *debugfs_regs[] = {
- &amdgpu_debugfs_regs_fops,
- &amdgpu_debugfs_regs_didt_fops,
- &amdgpu_debugfs_regs_pcie_fops,
- &amdgpu_debugfs_regs_smc_fops,
- &amdgpu_debugfs_gca_config_fops,
- &amdgpu_debugfs_sensors_fops,
- &amdgpu_debugfs_wave_fops,
- &amdgpu_debugfs_gpr_fops,
-};
-
-static const char *debugfs_regs_names[] = {
- "amdgpu_regs",
- "amdgpu_regs_didt",
- "amdgpu_regs_pcie",
- "amdgpu_regs_smc",
- "amdgpu_gca_config",
- "amdgpu_sensors",
- "amdgpu_wave",
- "amdgpu_gpr",
-};
-
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- struct drm_minor *minor = adev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
- unsigned i, j;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | S_IRUGO, root,
- adev, debugfs_regs[i]);
- if (IS_ERR(ent)) {
- for (j = 0; j < i; j++) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- return PTR_ERR(ent);
- }
-
- if (!i)
- i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs[i] = ent;
- }
-
- return 0;
-}
-
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
-static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int r = 0, i;
-
- /* hold on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_park(ring->sched.thread);
- }
-
- seq_printf(m, "run ib test:\n");
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- seq_printf(m, "ib ring tests failed (%d).\n", r);
- else
- seq_printf(m, "ib ring tests passed.\n");
-
- /* go on the scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
-
- if (!ring || !ring->sched.thread)
- continue;
- kthread_unpark(ring->sched.thread);
- }
-
- return 0;
-}
-
-static const struct drm_info_list amdgpu_debugfs_test_ib_ring_list[] = {
- {"amdgpu_test_ib", &amdgpu_debugfs_test_ib}
-};
-
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_debugfs_test_ib_ring_list, 1);
-}
-
-int amdgpu_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
-
-static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- seq_write(m, adev->bios, adev->bios_size);
- return 0;
-}
-
-static const struct drm_info_list amdgpu_vbios_dump_list[] = {
- {"amdgpu_vbios",
- amdgpu_debugfs_get_vbios_dump,
- 0, NULL},
-};
-
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return amdgpu_debugfs_add_files(adev,
- amdgpu_vbios_dump_list, 1);
-}
-#else
-static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
-{
- return 0;
-}
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 31383e004947..50afcf65181a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -90,7 +90,7 @@ int amdgpu_disp_priority = 0;
int amdgpu_hw_i2c = 0;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+int amdgpu_lockup_timeout = 10000;
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
@@ -128,6 +128,7 @@ int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = -1; /* auto */
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -165,7 +166,7 @@ module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms > 0 (default 10000)");
module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
@@ -280,6 +281,9 @@ module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
+module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@@ -645,7 +649,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
- amdgpu_suspend(adev);
+ amdgpu_device_ip_suspend(adev);
}
static int amdgpu_pmops_suspend(struct device *dev)
@@ -850,9 +854,6 @@ static struct drm_driver kms_driver = {
.disable_vblank = amdgpu_disable_vblank_kms,
.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
.get_scanout_position = amdgpu_get_crtc_scanout_position,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = amdgpu_debugfs_init,
-#endif
.irq_preinstall = amdgpu_irq_preinstall,
.irq_postinstall = amdgpu_irq_postinstall,
.irq_uninstall = amdgpu_irq_uninstall,
@@ -912,10 +913,6 @@ static int __init amdgpu_init(void)
if (r)
goto error_fence;
- r = amd_sched_fence_slab_init();
- if (r)
- goto error_sched;
-
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -928,9 +925,6 @@ static int __init amdgpu_init(void)
/* let modprobe override vga console setting */
return pci_register_driver(pdriver);
-error_sched:
- amdgpu_fence_slab_fini();
-
error_fence:
amdgpu_sync_fini();
@@ -944,7 +938,6 @@ static void __exit amdgpu_exit(void)
pci_unregister_driver(pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
- amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini();
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 604ac03a42e4..008e1984b7e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -187,7 +187,7 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
seq = ++ring->fence_drv.sync_seq;
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- seq, AMDGPU_FENCE_FLAG_INT);
+ seq, 0);
*s = seq;
@@ -410,7 +410,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
unsigned num_hw_submission)
{
- long timeout;
int r;
/* Check that num_hw_submission is a power of two */
@@ -434,20 +433,9 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
/* No need to setup the GPU scheduler for KIQ ring */
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+ r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
- timeout, ring->name);
+ msecs_to_jiffies(amdgpu_lockup_timeout), ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
@@ -503,7 +491,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
}
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- amd_sched_fini(&ring->sched);
+ drm_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
@@ -705,7 +693,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
struct amdgpu_device *adev = dev->dev_private;
seq_printf(m, "gpu recover\n");
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, true);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 1f51897acc5b..0a4f34afaaaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -57,6 +57,51 @@
*/
/**
+ * amdgpu_dummy_page_init - init dummy page used by the driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
+ */
+static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
+{
+ if (adev->dummy_page.page)
+ return 0;
+ adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+ if (adev->dummy_page.page == NULL)
+ return -ENOMEM;
+ adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
+ dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+ __free_page(adev->dummy_page.page);
+ adev->dummy_page.page = NULL;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_dummy_page_fini - free dummy page used by the driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the dummy page used by the driver (all asics).
+ */
+static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
+{
+ if (adev->dummy_page.page == NULL)
+ return;
+ pci_unmap_page(adev->pdev, adev->dummy_page.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(adev->dummy_page.page);
+ adev->dummy_page.page = NULL;
+}
+
+/**
* amdgpu_gart_table_vram_alloc - allocate vram for gart page table
*
* @adev: amdgpu_device pointer
@@ -308,7 +353,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
- r = amdgpu_dummy_page_init(adev);
+ r = amdgpu_gart_dummy_page_init(adev);
if (r)
return r;
/* Compute table size */
@@ -340,5 +385,5 @@ void amdgpu_gart_fini(struct amdgpu_device *adev)
vfree(adev->gart.pages);
adev->gart.pages = NULL;
#endif
- amdgpu_dummy_page_fini(adev);
+ amdgpu_gart_dummy_page_fini(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index eb75eb44efc6..10805edcf964 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -851,7 +851,7 @@ static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
};
#endif
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index ef043361009f..bb40d2529a30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -203,7 +203,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
+ r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
return r;
@@ -229,7 +229,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq)
{
- amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
+ amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
amdgpu_ring_fini(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 0cf86eb357d6..a162d87ca0c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -149,7 +149,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
- if (vm && !job->vm_id) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
@@ -211,7 +211,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
- amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
+ amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
need_ctx_switch);
need_ctx_switch = false;
}
@@ -229,9 +229,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
r = amdgpu_fence_emit(ring, f);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
- if (job && job->vm_id)
- amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
- job->vm_id);
+ if (job && job->vmid)
+ amdgpu_vmid_reset(adev, ring->funcs->vmhub, job->vmid);
amdgpu_ring_undo(ring);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
new file mode 100644
index 000000000000..16884a0b677b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -0,0 +1,459 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ids.h"
+
+#include <linux/idr.h>
+#include <linux/dma-fence-array.h>
+#include <drm/drmP.h>
+
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_pasid_ida);
+
+/**
+ * amdgpu_pasid_alloc - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_pasid_alloc(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_simple_get(&amdgpu_pasid_ida,
+ 1U << (bits - 1), 1U << bits,
+ GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ return pasid;
+}
+
+/**
+ * amdgpu_pasid_free - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_pasid_free(unsigned int pasid)
+{
+ ida_simple_remove(&amdgpu_pasid_ida, pasid);
+}
+
+/*
+ * VMID manager
+ *
+ * VMIDs are a per VMHUB identifier for page tables handling.
+ */
+
+/**
+ * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
+ *
+ * @adev: amdgpu_device pointer
+ * @id: VMID structure
+ *
+ * Check if GPU reset occured since last use of the VMID.
+ */
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter);
+}
+
+/* idr_mgr->lock must be held */
+static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync,
+ struct dma_fence *fence,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct dma_fence *updates = sync->last_vm_update;
+ int r = 0;
+ struct dma_fence *flushed, *tmp;
+ bool needs_flush = vm->use_cpu_for_update;
+
+ flushed = id->flushed_updates;
+ if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
+ (atomic64_read(&id->owner) != vm->entity.fence_context) ||
+ (job->vm_pd_addr != id->pd_gpu_addr) ||
+ (updates && (!flushed || updates->context != flushed->context ||
+ dma_fence_is_later(updates, flushed))) ||
+ (!id->last_flush || (id->last_flush->context != fence_context &&
+ !dma_fence_is_signaled(id->last_flush)))) {
+ needs_flush = true;
+ /* to prevent one context starved by another context */
+ id->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&id->active, ring);
+ if (tmp) {
+ r = amdgpu_sync_fence(adev, sync, tmp, false);
+ return r;
+ }
+ }
+
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ if (r)
+ goto out;
+
+ if (updates && (!flushed || updates->context != flushed->context ||
+ dma_fence_is_later(updates, flushed))) {
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ }
+ id->pd_gpu_addr = job->vm_pd_addr;
+ atomic64_set(&id->owner, vm->entity.fence_context);
+ job->vm_needs_flush = needs_flush;
+ if (needs_flush) {
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+ }
+ job->vmid = id - id_mgr->ids;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+out:
+ return r;
+}
+
+/**
+ * amdgpu_vm_grab_id - allocate the next free VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @sync: sync object where we add dependencies
+ * @fence: fence protecting ID from reuse
+ *
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ */
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->funcs->vmhub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ struct dma_fence *updates = sync->last_vm_update;
+ struct amdgpu_vmid *id, *idle;
+ struct dma_fence **fences;
+ unsigned i;
+ int r = 0;
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub]) {
+ r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job);
+ mutex_unlock(&id_mgr->lock);
+ return r;
+ }
+ fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
+ if (!fences) {
+ mutex_unlock(&id_mgr->lock);
+ return -ENOMEM;
+ }
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry(idle, &id_mgr->ids_lru, list) {
+ fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&idle->list == &id_mgr->ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct dma_fence_array *array;
+ unsigned j;
+
+ for (j = 0; j < i; ++j)
+ dma_fence_get(fences[j]);
+
+ array = dma_fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ dma_fence_put(fences[j]);
+ kfree(fences);
+ r = -ENOMEM;
+ goto error;
+ }
+
+
+ r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
+ dma_fence_put(&array->base);
+ if (r)
+ goto error;
+
+ mutex_unlock(&id_mgr->lock);
+ return 0;
+
+ }
+ kfree(fences);
+
+ job->vm_needs_flush = vm->use_cpu_for_update;
+ /* Check if we can use a VMID already assigned to this VM */
+ list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
+ struct dma_fence *flushed;
+ bool needs_flush = vm->use_cpu_for_update;
+
+ /* Check all the prerequisites to using this VMID */
+ if (amdgpu_vmid_had_gpu_reset(adev, id))
+ continue;
+
+ if (atomic64_read(&id->owner) != vm->entity.fence_context)
+ continue;
+
+ if (job->vm_pd_addr != id->pd_gpu_addr)
+ continue;
+
+ if (!id->last_flush ||
+ (id->last_flush->context != fence_context &&
+ !dma_fence_is_signaled(id->last_flush)))
+ needs_flush = true;
+
+ flushed = id->flushed_updates;
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
+ needs_flush = true;
+
+ /* Concurrent flushes are only possible starting with Vega10 */
+ if (adev->asic_type < CHIP_VEGA10 && needs_flush)
+ continue;
+
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ if (r)
+ goto error;
+
+ if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ }
+
+ if (needs_flush)
+ goto needs_flush;
+ else
+ goto no_flush_needed;
+
+ };
+
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
+
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
+ if (r)
+ goto error;
+
+ id->pd_gpu_addr = job->vm_pd_addr;
+ dma_fence_put(id->flushed_updates);
+ id->flushed_updates = dma_fence_get(updates);
+ atomic64_set(&id->owner, vm->entity.fence_context);
+
+needs_flush:
+ job->vm_needs_flush = true;
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+
+no_flush_needed:
+ list_move_tail(&id->list, &id_mgr->ids_lru);
+
+ job->vmid = id - id_mgr->ids;
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+
+error:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr;
+ struct amdgpu_vmid *idle;
+ int r = 0;
+
+ id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
+ AMDGPU_VM_MAX_RESERVED_VMID) {
+ DRM_ERROR("Over limitation of reserved vmid\n");
+ atomic_dec(&id_mgr->reserved_vmid_num);
+ r = -EINVAL;
+ goto unlock;
+ }
+ /* Select the first entry VMID */
+ idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&idle->list);
+ vm->reserved_vmid[vmhub] = idle;
+ mutex_unlock(&id_mgr->lock);
+
+ return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ atomic_dec(&id_mgr->reserved_vmid_num);
+ }
+ mutex_unlock(&id_mgr->lock);
+}
+
+/**
+ * amdgpu_vmid_reset - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ * @vmid: vmid number to use
+ *
+ * Reset saved GDW, GWS and OA to force switch on next flush.
+ */
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id = &id_mgr->ids[vmid];
+
+ atomic64_set(&id->owner, 0);
+ id->gds_base = 0;
+ id->gds_size = 0;
+ id->gws_base = 0;
+ id->gws_size = 0;
+ id->oa_base = 0;
+ id->oa_size = 0;
+}
+
+/**
+ * amdgpu_vmid_reset_all - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ for (j = 1; j < id_mgr->num_ids; ++j)
+ amdgpu_vmid_reset(adev, i, j);
+ }
+}
+
+/**
+ * amdgpu_vmid_mgr_init - init the VMID manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_init(&id_mgr->lock);
+ INIT_LIST_HEAD(&id_mgr->ids_lru);
+ atomic_set(&id_mgr->reserved_vmid_num, 0);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (j = 1; j < id_mgr->num_ids; ++j) {
+ amdgpu_vmid_reset(adev, i, j);
+ amdgpu_sync_create(&id_mgr->ids[i].active);
+ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
+ }
+ }
+
+ adev->vm_manager.fence_context =
+ dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
+ adev->vm_manager.seqno[i] = 0;
+}
+
+/**
+ * amdgpu_vmid_mgr_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_destroy(&id_mgr->lock);
+ for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
+ struct amdgpu_vmid *id = &id_mgr->ids[j];
+
+ amdgpu_sync_free(&id->active);
+ dma_fence_put(id->flushed_updates);
+ dma_fence_put(id->last_flush);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
new file mode 100644
index 000000000000..ad931fa570b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_IDS_H__
+#define __AMDGPU_IDS_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/dma-fence.h>
+
+#include "amdgpu_sync.h"
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VMID 16
+
+struct amdgpu_device;
+struct amdgpu_vm;
+struct amdgpu_ring;
+struct amdgpu_sync;
+struct amdgpu_job;
+
+struct amdgpu_vmid {
+ struct list_head list;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ atomic64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ struct dma_fence *flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+};
+
+struct amdgpu_vmid_mgr {
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
+ atomic_t reserved_vmid_num;
+};
+
+int amdgpu_pasid_alloc(unsigned int bits);
+void amdgpu_pasid_free(unsigned int pasid);
+
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ unsigned vmhub);
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_sync *sync, struct dma_fence *fence,
+ struct amdgpu_job *job);
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid);
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
+
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev);
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f5f27e4f0f7f..06373d44b3da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -92,15 +92,15 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
}
return 0;
} else {
- r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
+ r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs);
if (r) {
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
return r;
}
@@ -133,8 +133,8 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
&adev->irq.ih.gpu_addr,
(void **)&adev->irq.ih.ring);
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
- amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs);
+ amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index ada89358e220..29cf10927a92 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -105,8 +105,8 @@ struct amdgpu_iv_entry {
unsigned client_id;
unsigned src_id;
unsigned ring_id;
- unsigned vm_id;
- unsigned vm_id_src;
+ unsigned vmid;
+ unsigned vmid_src;
uint64_t timestamp;
unsigned timestamp_src;
unsigned pas_id;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index c340774082ea..56bcd59c3399 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -88,7 +88,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
reset_work);
if (!amdgpu_sriov_vf(adev))
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
/* Disable *all* interrupts */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index bdc210ac74f8..2bd56760c744 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -37,7 +37,7 @@ static void amdgpu_job_timedout(struct amd_sched_job *s_job)
atomic_read(&job->ring->fence_drv.last_seq),
job->ring->fence_drv.sync_seq);
- amdgpu_gpu_recover(job->adev, job);
+ amdgpu_device_gpu_recover(job->adev, job, false);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -96,7 +96,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
amdgpu_ib_free(job->adev, &job->ibs[i], f);
}
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
@@ -118,7 +118,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
}
int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
+ struct drm_sched_entity *entity, void *owner,
struct dma_fence **f)
{
int r;
@@ -127,7 +127,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
if (!f)
return -EINVAL;
- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
@@ -136,13 +136,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring, job->base.s_priority);
- amd_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base, entity);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
- struct amd_sched_entity *s_entity)
+static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
@@ -151,19 +151,19 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
if (fence && explicit) {
- if (amd_sched_dependency_optimized(fence, s_entity)) {
+ if (drm_sched_dependency_optimized(fence, s_entity)) {
r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
if (r)
DRM_ERROR("Error adding fence to sync (%d)\n", r);
}
}
- while (fence == NULL && vm && !job->vm_id) {
+ while (fence == NULL && vm && !job->vmid) {
struct amdgpu_ring *ring = job->ring;
- r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->finished,
- job);
+ r = amdgpu_vmid_grab(vm, ring, &job->sync,
+ &job->base.s_fence->finished,
+ job);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
@@ -173,7 +173,7 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job,
return fence;
}
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
struct dma_fence *fence = NULL, *finished;
struct amdgpu_device *adev;
@@ -211,7 +211,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return fence;
}
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_job_dependency,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index dc0a8be98043..5c4c3e0d527b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -37,6 +37,18 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
+static bool amdgpu_need_backup(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ if (amdgpu_gpu_recovery == 0 ||
+ (amdgpu_gpu_recovery == -1 && !amdgpu_sriov_vf(adev)))
+ return false;
+
+ return true;
+}
+
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
@@ -327,7 +339,12 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
- struct ttm_operation_ctx ctx = { !kernel, false };
+ struct ttm_operation_ctx ctx = {
+ .interruptible = !kernel,
+ .no_wait_gpu = false,
+ .allow_reserved_eviction = true,
+ .resv = resv
+ };
struct amdgpu_bo *bo;
enum ttm_bo_type type;
unsigned long page_align;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 6f56ff606e43..01a996c6b802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1,4 +1,6 @@
/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -1276,16 +1278,16 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
amdgpu_pm_compute_clocks(adev);
} else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = false;
mutex_unlock(&adev->pm.mutex);
@@ -1582,7 +1584,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *ddev = adev->ddev;
u32 flags = 0;
- amdgpu_get_clockgating_state(adev, &flags);
+ amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
amdgpu_parse_cg_state(m, flags);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a98fbbb4739f..13044e66dcaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -164,7 +164,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Release a request for executing at @priority
*/
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
int i;
@@ -175,7 +175,7 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
return;
/* no need to restore if the job is already at the lowest priority */
- if (priority == AMD_SCHED_PRIORITY_NORMAL)
+ if (priority == DRM_SCHED_PRIORITY_NORMAL)
return;
mutex_lock(&ring->priority_mutex);
@@ -184,8 +184,8 @@ void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
goto out_unlock;
/* decay priority to the next level with a job available */
- for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
- if (i == AMD_SCHED_PRIORITY_NORMAL
+ for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ if (i == DRM_SCHED_PRIORITY_NORMAL
|| atomic_read(&ring->num_jobs[i])) {
ring->priority = i;
ring->funcs->set_priority(ring, i);
@@ -206,7 +206,7 @@ out_unlock:
* Request a ring's priority to be raised to @priority (refcounted).
*/
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
if (!ring->funcs->set_priority)
return;
@@ -263,25 +263,25 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->fence_offs);
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
if (r) {
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
@@ -317,12 +317,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
- ring->priority = AMD_SCHED_PRIORITY_NORMAL;
+ ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
- for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
+ for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
if (amdgpu_debugfs_ring_init(adev, ring)) {
@@ -348,11 +348,11 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
- amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free(ring->adev, ring->fence_offs);
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index a6b89e3932a5..102dad3edf6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -25,7 +25,7 @@
#define __AMDGPU_RING_H__
#include <drm/amdgpu_drm.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
/* max number of rings */
#define AMDGPU_MAX_RINGS 18
@@ -121,11 +121,11 @@ struct amdgpu_ring_funcs {
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
+ unsigned vmid, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
@@ -154,14 +154,14 @@ struct amdgpu_ring_funcs {
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
};
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
+ struct drm_gpu_scheduler sched;
struct list_head lru_list;
struct amdgpu_bo *ring_obj;
@@ -186,6 +186,7 @@ struct amdgpu_ring {
uint64_t eop_gpu_addr;
u32 doorbell_index;
bool use_doorbell;
+ bool use_pollmem;
unsigned wptr_offs;
unsigned fence_offs;
uint64_t current_ctx;
@@ -196,7 +197,7 @@ struct amdgpu_ring {
unsigned vm_inv_eng;
bool has_compute_vm_bug;
- atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
+ atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
@@ -212,9 +213,9 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum amd_sched_priority priority);
+ enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 290cc3f9c433..86a0715d9431 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -29,29 +29,29 @@
#include "amdgpu_vm.h"
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_HW;
+ return DRM_SCHED_PRIORITY_HIGH_HW;
case AMDGPU_CTX_PRIORITY_HIGH:
- return AMD_SCHED_PRIORITY_HIGH_SW;
+ return DRM_SCHED_PRIORITY_HIGH_SW;
case AMDGPU_CTX_PRIORITY_NORMAL:
- return AMD_SCHED_PRIORITY_NORMAL;
+ return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
- return AMD_SCHED_PRIORITY_LOW;
+ return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_UNSET:
- return AMD_SCHED_PRIORITY_UNSET;
+ return DRM_SCHED_PRIORITY_UNSET;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return AMD_SCHED_PRIORITY_INVALID;
+ return DRM_SCHED_PRIORITY_INVALID;
}
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
struct file *filp = fcheck(fd);
struct drm_file *file;
@@ -86,11 +86,11 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = dev->dev_private;
- enum amd_sched_priority priority;
+ enum drm_sched_priority priority;
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
- if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
+ if (args->in.flags || priority == DRM_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index b28c067d3822..2a1a0c734bdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -27,7 +27,7 @@
#include <drm/drmP.h>
-enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
+enum drm_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index ebe1ffbab0c1..df65c66dc956 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -64,7 +64,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence) {
struct amdgpu_ring *ring;
@@ -85,7 +85,7 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
*/
static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence)
return s_fence->owner;
@@ -120,7 +120,7 @@ static void amdgpu_sync_keep_later(struct dma_fence **keep,
* Tries to add the fence to an existing hash entry. Returns true when an entry
* was found, false otherwise.
*/
-static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
+static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f, bool explicit)
{
struct amdgpu_sync_entry *e;
@@ -129,6 +129,10 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
continue;
amdgpu_sync_keep_later(&e->fence, f);
+
+ /* Preserve eplicit flag to not loose pipe line sync */
+ e->explicit |= explicit;
+
return true;
}
return false;
@@ -148,12 +152,11 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
if (!f)
return 0;
-
if (amdgpu_sync_same_dev(adev, f) &&
amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
amdgpu_sync_keep_later(&sync->last_vm_update, f);
- if (amdgpu_sync_add_later(sync, f))
+ if (amdgpu_sync_add_later(sync, f, explicit))
return 0;
e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
@@ -245,7 +248,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct dma_fence *f = e->fence;
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index f337c316ec2c..cace7a93fc94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
@@ -60,8 +82,8 @@ TRACE_EVENT(amdgpu_iv,
__field(unsigned, client_id)
__field(unsigned, src_id)
__field(unsigned, ring_id)
- __field(unsigned, vm_id)
- __field(unsigned, vm_id_src)
+ __field(unsigned, vmid)
+ __field(unsigned, vmid_src)
__field(uint64_t, timestamp)
__field(unsigned, timestamp_src)
__field(unsigned, pas_id)
@@ -71,8 +93,8 @@ TRACE_EVENT(amdgpu_iv,
__entry->client_id = iv->client_id;
__entry->src_id = iv->src_id;
__entry->ring_id = iv->ring_id;
- __entry->vm_id = iv->vm_id;
- __entry->vm_id_src = iv->vm_id_src;
+ __entry->vmid = iv->vmid;
+ __entry->vmid_src = iv->vmid_src;
__entry->timestamp = iv->timestamp;
__entry->timestamp_src = iv->timestamp_src;
__entry->pas_id = iv->pas_id;
@@ -81,9 +103,9 @@ TRACE_EVENT(amdgpu_iv,
__entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3];
),
- TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
+ TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
__entry->client_id, __entry->src_id,
- __entry->ring_id, __entry->vm_id,
+ __entry->ring_id, __entry->vmid,
__entry->timestamp, __entry->pas_id,
__entry->src_data[0], __entry->src_data[1],
__entry->src_data[2], __entry->src_data[3])
@@ -197,7 +219,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm)
__field(u32, ring)
- __field(u32, vm_id)
+ __field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
__field(u32, needs_flush)
@@ -206,13 +228,13 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign(
__entry->vm = vm;
__entry->ring = ring->idx;
- __entry->vm_id = job->vm_id;
+ __entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
- __entry->vm, __entry->ring, __entry->vm_id,
+ __entry->vm, __entry->ring, __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
@@ -335,24 +357,24 @@ TRACE_EVENT(amdgpu_vm_copy_ptes,
);
TRACE_EVENT(amdgpu_vm_flush,
- TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
+ TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr),
- TP_ARGS(ring, vm_id, pd_addr),
+ TP_ARGS(ring, vmid, pd_addr),
TP_STRUCT__entry(
__field(u32, ring)
- __field(u32, vm_id)
+ __field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
),
TP_fast_assign(
__entry->ring = ring->idx;
- __entry->vm_id = vm_id;
+ __entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
),
TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
- __entry->ring, __entry->vm_id,
+ __entry->ring, __entry->vmid,
__entry->vm_hub,__entry->pd_addr)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 952e0bf3bc84..e4bb435e614b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -76,7 +76,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int r;
adev->mman.mem_global_referenced = false;
@@ -108,8 +108,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
@@ -131,7 +131,7 @@ error_mem:
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
- amd_sched_entity_fini(adev->mman.entity.sched,
+ drm_sched_entity_fini(adev->mman.entity.sched,
&adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
@@ -497,7 +497,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup;
}
- r = ttm_tt_bind(bo->ttm, &tmp_mem);
+ r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -505,7 +505,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -536,7 +536,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -597,8 +597,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
return r;
}
@@ -991,7 +990,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
return &gtt->ttm.ttm;
}
-static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1019,11 +1019,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev);
+ return ttm_dma_populate(&gtt->ttm, adev->dev, ctx);
}
#endif
- return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
+ return ttm_populate_and_map_pages(adev->dev, &gtt->ttm, ctx);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -1270,6 +1270,101 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.access_memory = &amdgpu_ttm_access_memory
};
+/*
+ * Firmware Reservation functions
+ */
+/**
+ * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free fw reserved vram if it has been reserved.
+ */
+static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
+ NULL, &adev->fw_vram_usage.va);
+}
+
+/**
+ * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int r = 0;
+ int i;
+ u64 vram_size = adev->mc.visible_vram_size;
+ u64 offset = adev->fw_vram_usage.start_offset;
+ u64 size = adev->fw_vram_usage.size;
+ struct amdgpu_bo *bo;
+
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+
+ if (adev->fw_vram_usage.size > 0 &&
+ adev->fw_vram_usage.size <= vram_size) {
+
+ r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
+ PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
+ &adev->fw_vram_usage.reserved_bo);
+ if (r)
+ goto error_create;
+
+ r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+ if (r)
+ goto error_reserve;
+
+ /* remove the original mem node and create a new one at the
+ * request position
+ */
+ bo = adev->fw_vram_usage.reserved_bo;
+ offset = ALIGN(offset, PAGE_SIZE);
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+ bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+
+ ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+ r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
+ &bo->tbo.mem, &ctx);
+ if (r)
+ goto error_pin;
+
+ r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ adev->fw_vram_usage.start_offset,
+ (adev->fw_vram_usage.start_offset +
+ adev->fw_vram_usage.size), NULL);
+ if (r)
+ goto error_pin;
+ r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+ &adev->fw_vram_usage.va);
+ if (r)
+ goto error_kmap;
+
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+ }
+ return r;
+
+error_kmap:
+ amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+error_pin:
+ amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+error_reserve:
+ amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+error_create:
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+ return r;
+}
+
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size;
@@ -1312,7 +1407,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*The reserved vram for firmware must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
- r = amdgpu_fw_reserve_vram_init(adev);
+ r = amdgpu_ttm_fw_reserve_vram_init(adev);
if (r) {
return r;
}
@@ -1330,9 +1425,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
struct sysinfo si;
si_meminfo(&si);
- gtt_size = max(AMDGPU_DEFAULT_GTT_SIZE_MB << 20,
- (uint64_t)si.totalram * si.mem_unit * 3/4);
- } else
+ gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
+ adev->mc.mc_vram_size),
+ ((uint64_t)si.totalram * si.mem_unit * 3/4));
+ }
+ else
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
if (r) {
@@ -1396,7 +1493,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_ttm_debugfs_fini(adev);
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
- amdgpu_fw_reserve_vram_fini(adev);
+ amdgpu_ttm_fw_reserve_vram_fini(adev);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 4f9433e61406..167856f6080f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -25,7 +25,7 @@
#define __AMDGPU_TTM_H__
#include "amdgpu.h"
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -55,7 +55,7 @@ struct amdgpu_mman {
struct mutex gtt_window_lock;
/* Scheduler entity for buffer moves */
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
};
struct amdgpu_copy_mem {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f2a9e17fdb4..b2eae86bf906 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -116,7 +116,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -230,8 +230,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
ring = &adev->uvd.ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up UVD run queue.\n");
@@ -244,7 +244,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
- if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
+ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
switch (adev->asic_type) {
@@ -272,7 +272,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int i;
kfree(adev->uvd.saved_bo);
- amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+ drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
&adev->uvd.gpu_addr,
@@ -297,6 +297,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (adev->uvd.vcpu_bo == NULL)
return 0;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
for (i = 0; i < adev->uvd.max_handles; ++i)
if (atomic_read(&adev->uvd.handles[i]))
break;
@@ -304,8 +306,6 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_UVD_HANDLES)
return 0;
- cancel_delayed_work_sync(&adev->uvd.idle_work);
-
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
ptr = adev->uvd.cpu_addr;
@@ -346,6 +346,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
memset_io(ptr, 0, size);
+ /* to restore uvd fence seq */
+ amdgpu_fence_driver_force_completion(&adev->uvd.ring);
}
return 0;
@@ -1153,10 +1155,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
@@ -1176,10 +1178,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_uvd(adev, true);
} else {
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 845eea993f75..32ea20b99e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -51,8 +51,8 @@ struct amdgpu_uvd {
struct amdgpu_irq_src irq;
bool address_64_bit;
bool use_ctx_buf;
- struct amd_sched_entity entity;
- struct amd_sched_entity entity_enc;
+ struct drm_sched_entity entity;
+ struct drm_sched_entity entity_enc;
uint32_t srbm_soft_reset;
unsigned num_enc_rings;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index ba6d846b08ff..55a726a322e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -85,7 +85,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
@@ -174,8 +174,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
}
ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCE run queue.\n");
@@ -207,7 +207,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
@@ -311,10 +311,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -343,10 +343,10 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
}
}
@@ -991,7 +991,7 @@ out:
*
*/
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 5ce54cde472d..0fd378ae92c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -46,7 +46,7 @@ struct amdgpu_vce {
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
unsigned harvest_config;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
};
@@ -63,7 +63,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
+ unsigned vmid, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index d7ba048c2f80..837962118dbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -35,7 +35,6 @@
#include "soc15d.h"
#include "soc15_common.h"
-#include "soc15ip.h"
#include "vcn/vcn_1_0_offset.h"
/* 1 second timeout */
@@ -51,7 +50,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -104,8 +103,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
ring = &adev->vcn.ring_dec;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN dec run queue.\n");
@@ -113,8 +112,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
ring = &adev->vcn.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc,
rq, amdgpu_sched_jobs, NULL);
if (r != 0) {
DRM_ERROR("Failed setting up VCN enc run queue.\n");
@@ -130,9 +129,9 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
kfree(adev->vcn.saved_bo);
- amd_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
+ drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec);
- amd_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
+ drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d50ba0657854..2fd7db891689 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -56,8 +56,8 @@ struct amdgpu_vcn {
struct amdgpu_ring ring_dec;
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
struct amdgpu_irq_src irq;
- struct amd_sched_entity entity_dec;
- struct amd_sched_entity entity_enc;
+ struct drm_sched_entity entity_dec;
+ struct drm_sched_entity entity_enc;
unsigned num_enc_rings;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3ecdbdfb04dd..d4510807a692 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -34,52 +34,6 @@
#include "amdgpu_trace.h"
/*
- * PASID manager
- *
- * PASIDs are global address space identifiers that can be shared
- * between the GPU, an IOMMU and the driver. VMs on different devices
- * may use the same PASID if they share the same address
- * space. Therefore PASIDs are allocated using a global IDA. VMs are
- * looked up from the PASID per amdgpu_device.
- */
-static DEFINE_IDA(amdgpu_vm_pasid_ida);
-
-/**
- * amdgpu_vm_alloc_pasid - Allocate a PASID
- * @bits: Maximum width of the PASID in bits, must be at least 1
- *
- * Allocates a PASID of the given width while keeping smaller PASIDs
- * available if possible.
- *
- * Returns a positive integer on success. Returns %-EINVAL if bits==0.
- * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
- * memory allocation failure.
- */
-int amdgpu_vm_alloc_pasid(unsigned int bits)
-{
- int pasid = -EINVAL;
-
- for (bits = min(bits, 31U); bits > 0; bits--) {
- pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
- 1U << (bits - 1), 1U << bits,
- GFP_KERNEL);
- if (pasid != -ENOSPC)
- break;
- }
-
- return pasid;
-}
-
-/**
- * amdgpu_vm_free_pasid - Free a PASID
- * @pasid: PASID to free
- */
-void amdgpu_vm_free_pasid(unsigned int pasid)
-{
- ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
-}
-
-/*
* GPUVM
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
@@ -148,12 +102,23 @@ struct amdgpu_prt_cb {
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
{
- if (level != adev->vm_manager.num_level)
- return 9 * (adev->vm_manager.num_level - level - 1) +
+ unsigned shift = 0xff;
+
+ switch (level) {
+ case AMDGPU_VM_PDB2:
+ case AMDGPU_VM_PDB1:
+ case AMDGPU_VM_PDB0:
+ shift = 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size;
- else
- /* For the page tables on the leaves */
- return 0;
+ break;
+ case AMDGPU_VM_PTB:
+ shift = 0;
+ break;
+ default:
+ dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ }
+
+ return shift;
}
/**
@@ -166,12 +131,13 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
{
- unsigned shift = amdgpu_vm_level_shift(adev, 0);
+ unsigned shift = amdgpu_vm_level_shift(adev,
+ adev->vm_manager.root_level);
- if (level == 0)
+ if (level == adev->vm_manager.root_level)
/* For the root directory */
return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
- else if (level != adev->vm_manager.num_level)
+ else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
else
@@ -329,9 +295,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
to >= amdgpu_vm_num_entries(adev, level))
return -EINVAL;
- if (to > parent->last_entry_used)
- parent->last_entry_used = to;
-
++level;
saddr = saddr & ((1 << shift) - 1);
eaddr = eaddr & ((1 << shift) - 1);
@@ -346,7 +309,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
if (vm->pte_support_ats) {
init_value = AMDGPU_PTE_DEFAULT_ATC;
- if (level != adev->vm_manager.num_level - 1)
+ if (level != AMDGPU_VM_PTB)
init_value |= AMDGPU_PDE_PTE;
}
@@ -386,10 +349,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
spin_lock(&vm->status_lock);
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
- entry->addr = 0;
}
- if (level < adev->vm_manager.num_level) {
+ if (level < AMDGPU_VM_PTB) {
uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1);
@@ -435,287 +397,8 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
-}
-
-/**
- * amdgpu_vm_had_gpu_reset - check if reset occured since last use
- *
- * @adev: amdgpu_device pointer
- * @id: VMID structure
- *
- * Check if GPU reset occured since last use of the VMID.
- */
-static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
- struct amdgpu_vm_id *id)
-{
- return id->current_gpu_reset_count !=
- atomic_read(&adev->gpu_reset_counter);
-}
-
-static bool amdgpu_vm_reserved_vmid_ready(struct amdgpu_vm *vm, unsigned vmhub)
-{
- return !!vm->reserved_vmid[vmhub];
-}
-
-/* idr_mgr->lock must be held */
-static int amdgpu_vm_grab_reserved_vmid_locked(struct amdgpu_vm *vm,
- struct amdgpu_ring *ring,
- struct amdgpu_sync *sync,
- struct dma_fence *fence,
- struct amdgpu_job *job)
-{
- struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
- uint64_t fence_context = adev->fence_context + ring->idx;
- struct amdgpu_vm_id *id = vm->reserved_vmid[vmhub];
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct dma_fence *updates = sync->last_vm_update;
- int r = 0;
- struct dma_fence *flushed, *tmp;
- bool needs_flush = vm->use_cpu_for_update;
-
- flushed = id->flushed_updates;
- if ((amdgpu_vm_had_gpu_reset(adev, id)) ||
- (atomic64_read(&id->owner) != vm->client_id) ||
- (job->vm_pd_addr != id->pd_gpu_addr) ||
- (updates && (!flushed || updates->context != flushed->context ||
- dma_fence_is_later(updates, flushed))) ||
- (!id->last_flush || (id->last_flush->context != fence_context &&
- !dma_fence_is_signaled(id->last_flush)))) {
- needs_flush = true;
- /* to prevent one context starved by another context */
- id->pd_gpu_addr = 0;
- tmp = amdgpu_sync_peek_fence(&id->active, ring);
- if (tmp) {
- r = amdgpu_sync_fence(adev, sync, tmp, false);
- return r;
- }
- }
-
- /* Good we can use this VMID. Remember this submission as
- * user of the VMID.
- */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
- if (r)
- goto out;
-
- if (updates && (!flushed || updates->context != flushed->context ||
- dma_fence_is_later(updates, flushed))) {
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- }
- id->pd_gpu_addr = job->vm_pd_addr;
- atomic64_set(&id->owner, vm->client_id);
- job->vm_needs_flush = needs_flush;
- if (needs_flush) {
- dma_fence_put(id->last_flush);
- id->last_flush = NULL;
- }
- job->vm_id = id - id_mgr->ids;
- trace_amdgpu_vm_grab_id(vm, ring, job);
-out:
- return r;
-}
-
-/**
- * amdgpu_vm_grab_id - allocate the next free VMID
- *
- * @vm: vm to allocate id for
- * @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
- *
- * Allocate an id for the vm, adding fences to the sync obj as necessary.
- */
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job)
-{
- struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- uint64_t fence_context = adev->fence_context + ring->idx;
- struct dma_fence *updates = sync->last_vm_update;
- struct amdgpu_vm_id *id, *idle;
- struct dma_fence **fences;
- unsigned i;
- int r = 0;
-
- mutex_lock(&id_mgr->lock);
- if (amdgpu_vm_reserved_vmid_ready(vm, vmhub)) {
- r = amdgpu_vm_grab_reserved_vmid_locked(vm, ring, sync, fence, job);
- mutex_unlock(&id_mgr->lock);
- return r;
- }
- fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
- if (!fences) {
- mutex_unlock(&id_mgr->lock);
- return -ENOMEM;
- }
- /* Check if we have an idle VMID */
- i = 0;
- list_for_each_entry(idle, &id_mgr->ids_lru, list) {
- fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
- if (!fences[i])
- break;
- ++i;
- }
-
- /* If we can't find a idle VMID to use, wait till one becomes available */
- if (&idle->list == &id_mgr->ids_lru) {
- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct dma_fence_array *array;
- unsigned j;
-
- for (j = 0; j < i; ++j)
- dma_fence_get(fences[j]);
-
- array = dma_fence_array_create(i, fences, fence_context,
- seqno, true);
- if (!array) {
- for (j = 0; j < i; ++j)
- dma_fence_put(fences[j]);
- kfree(fences);
- r = -ENOMEM;
- goto error;
- }
-
-
- r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
- dma_fence_put(&array->base);
- if (r)
- goto error;
-
- mutex_unlock(&id_mgr->lock);
- return 0;
-
- }
- kfree(fences);
-
- job->vm_needs_flush = vm->use_cpu_for_update;
- /* Check if we can use a VMID already assigned to this VM */
- list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
- struct dma_fence *flushed;
- bool needs_flush = vm->use_cpu_for_update;
-
- /* Check all the prerequisites to using this VMID */
- if (amdgpu_vm_had_gpu_reset(adev, id))
- continue;
-
- if (atomic64_read(&id->owner) != vm->client_id)
- continue;
-
- if (job->vm_pd_addr != id->pd_gpu_addr)
- continue;
-
- if (!id->last_flush ||
- (id->last_flush->context != fence_context &&
- !dma_fence_is_signaled(id->last_flush)))
- needs_flush = true;
-
- flushed = id->flushed_updates;
- if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
- needs_flush = true;
-
- /* Concurrent flushes are only possible starting with Vega10 */
- if (adev->asic_type < CHIP_VEGA10 && needs_flush)
- continue;
-
- /* Good we can use this VMID. Remember this submission as
- * user of the VMID.
- */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
- if (r)
- goto error;
-
- if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- }
-
- if (needs_flush)
- goto needs_flush;
- else
- goto no_flush_needed;
-
- };
-
- /* Still no ID to use? Then use the idle one found earlier */
- id = idle;
-
- /* Remember this submission as user of the VMID */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
- if (r)
- goto error;
-
- id->pd_gpu_addr = job->vm_pd_addr;
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- atomic64_set(&id->owner, vm->client_id);
-
-needs_flush:
- job->vm_needs_flush = true;
- dma_fence_put(id->last_flush);
- id->last_flush = NULL;
-
-no_flush_needed:
- list_move_tail(&id->list, &id_mgr->ids_lru);
-
- job->vm_id = id - id_mgr->ids;
- trace_amdgpu_vm_grab_id(vm, ring, job);
-
-error:
- mutex_unlock(&id_mgr->lock);
- return r;
-}
-
-static void amdgpu_vm_free_reserved_vmid(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- unsigned vmhub)
-{
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
-
- mutex_lock(&id_mgr->lock);
- if (vm->reserved_vmid[vmhub]) {
- list_add(&vm->reserved_vmid[vmhub]->list,
- &id_mgr->ids_lru);
- vm->reserved_vmid[vmhub] = NULL;
- atomic_dec(&id_mgr->reserved_vmid_num);
- }
- mutex_unlock(&id_mgr->lock);
-}
-
-static int amdgpu_vm_alloc_reserved_vmid(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- unsigned vmhub)
-{
- struct amdgpu_vm_id_manager *id_mgr;
- struct amdgpu_vm_id *idle;
- int r = 0;
-
- id_mgr = &adev->vm_manager.id_mgr[vmhub];
- mutex_lock(&id_mgr->lock);
- if (vm->reserved_vmid[vmhub])
- goto unlock;
- if (atomic_inc_return(&id_mgr->reserved_vmid_num) >
- AMDGPU_VM_MAX_RESERVED_VMID) {
- DRM_ERROR("Over limitation of reserved vmid\n");
- atomic_dec(&id_mgr->reserved_vmid_num);
- r = -EINVAL;
- goto unlock;
- }
- /* Select the first entry VMID */
- idle = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vm_id, list);
- list_del_init(&idle->list);
- vm->reserved_vmid[vmhub] = idle;
- mutex_unlock(&id_mgr->lock);
-
- return 0;
-unlock:
- mutex_unlock(&id_mgr->lock);
- return r;
+ return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
+ adev->vm_manager.root_level);
}
/**
@@ -732,7 +415,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
has_compute_vm_bug = false;
- ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block) {
/* Compute has a VM bug for GFX version < 7.
Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
@@ -758,14 +441,14 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
bool gds_switch_needed;
bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug;
- if (job->vm_id == 0)
+ if (job->vmid == 0)
return false;
- id = &id_mgr->ids[job->vm_id];
+ id = &id_mgr->ids[job->vmid];
gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@@ -774,7 +457,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
- if (amdgpu_vm_had_gpu_reset(adev, id))
+ if (amdgpu_vmid_had_gpu_reset(adev, id))
return true;
return vm_flush_needed || gds_switch_needed;
@@ -789,7 +472,7 @@ static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vm_id: vmid number to use
+ * @vmid: vmid number to use
* @pd_addr: address of the page directory
*
* Emit a VM flush when it is necessary.
@@ -798,8 +481,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
bool gds_switch_needed = ring->funcs->emit_gds_switch && (
id->gds_base != job->gds_base ||
id->gds_size != job->gds_size ||
@@ -811,7 +494,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
unsigned patch_offset = 0;
int r;
- if (amdgpu_vm_had_gpu_reset(adev, id)) {
+ if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
}
@@ -828,8 +511,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
if (ring->funcs->emit_vm_flush && vm_flush_needed) {
struct dma_fence *fence;
- trace_amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
+ trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
+ amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
r = amdgpu_fence_emit(ring, &fence);
if (r)
@@ -849,7 +532,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->gws_size = job->gws_size;
id->oa_base = job->oa_base;
id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
+ amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
job->gds_size, job->gws_base,
job->gws_size, job->oa_base,
job->oa_size);
@@ -867,49 +550,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
}
/**
- * amdgpu_vm_reset_id - reset VMID to zero
- *
- * @adev: amdgpu device structure
- * @vm_id: vmid number to use
- *
- * Reset saved GDW, GWS and OA to force switch on next flush.
- */
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
- unsigned vmid)
-{
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
-
- atomic64_set(&id->owner, 0);
- id->gds_base = 0;
- id->gds_size = 0;
- id->gws_base = 0;
- id->gws_size = 0;
- id->oa_base = 0;
- id->oa_size = 0;
-}
-
-/**
- * amdgpu_vm_reset_all_id - reset VMID to zero
- *
- * @adev: amdgpu device structure
- *
- * Reset VMID to force flush on next use
- */
-void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
-{
- unsigned i, j;
-
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
-
- for (j = 1; j < id_mgr->num_ids; ++j)
- amdgpu_vm_reset_id(adev, i, j);
- }
-}
-
-/**
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
*
* @vm: requested vm
@@ -1060,162 +700,52 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/*
- * amdgpu_vm_update_level - update a single level in the hierarchy
+ * amdgpu_vm_update_pde - update a single level in the hierarchy
*
- * @adev: amdgpu_device pointer
+ * @param: parameters for the update
* @vm: requested vm
* @parent: parent directory
+ * @entry: entry to update
*
- * Makes sure all entries in @parent are up to date.
- * Returns 0 for success, error for failure.
+ * Makes sure the requested entry in parent is up to date.
*/
-static int amdgpu_vm_update_level(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ struct amdgpu_vm_pt *entry)
{
- struct amdgpu_bo *shadow;
- struct amdgpu_ring *ring = NULL;
+ struct amdgpu_bo *bo = entry->base.bo, *shadow = NULL, *pbo;
uint64_t pd_addr, shadow_addr = 0;
- uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
- unsigned count = 0, pt_idx, ndw = 0;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *fence = NULL;
- uint32_t incr;
-
- int r;
+ uint64_t pde, pt, flags;
+ unsigned level;
- if (!parent->entries)
- return 0;
-
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- shadow = parent->base.bo->shadow;
+ /* Don't update huge pages here */
+ if (entry->huge)
+ return;
if (vm->use_cpu_for_update) {
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
- r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
- if (unlikely(r))
- return r;
-
- params.func = amdgpu_vm_cpu_set_ptes;
} else {
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
- sched);
-
- /* padding, etc. */
- ndw = 64;
-
- /* assume the worst case */
- ndw += parent->last_entry_used * 6;
-
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
-
- if (shadow) {
+ shadow = parent->base.bo->shadow;
+ if (shadow)
shadow_addr = amdgpu_bo_gpu_offset(shadow);
- ndw *= 2;
- } else {
- shadow_addr = 0;
- }
-
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
-
- params.ib = &job->ibs[0];
- params.func = amdgpu_vm_do_set_ptes;
}
+ for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
+ pbo = pbo->parent;
- /* walk over the address space and update the directory */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- struct amdgpu_bo *bo = entry->base.bo;
- uint64_t pde, pt;
-
- if (bo == NULL)
- continue;
-
- spin_lock(&vm->status_lock);
- list_del_init(&entry->base.vm_status);
- spin_unlock(&vm->status_lock);
-
- pt = amdgpu_bo_gpu_offset(bo);
- pt = amdgpu_gart_get_vm_pde(adev, pt);
- /* Don't update huge pages here */
- if ((parent->entries[pt_idx].addr & AMDGPU_PDE_PTE) ||
- parent->entries[pt_idx].addr == (pt | AMDGPU_PTE_VALID))
- continue;
-
- parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
-
- pde = pd_addr + pt_idx * 8;
- incr = amdgpu_bo_size(bo);
- if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt) ||
- (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
-
- if (count) {
- if (shadow)
- params.func(&params,
- last_shadow,
- last_pt, count,
- incr,
- AMDGPU_PTE_VALID);
-
- params.func(&params, last_pde,
- last_pt, count, incr,
- AMDGPU_PTE_VALID);
- }
-
- count = 1;
- last_pde = pde;
- last_shadow = shadow_addr + pt_idx * 8;
- last_pt = pt;
- } else {
- ++count;
- }
- }
-
- if (count) {
- if (vm->root.base.bo->shadow)
- params.func(&params, last_shadow, last_pt,
- count, incr, AMDGPU_PTE_VALID);
-
- params.func(&params, last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
- }
-
- if (!vm->use_cpu_for_update) {
- if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync,
- parent->base.bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
- if (shadow)
- amdgpu_sync_resv(adev, &job->sync,
- shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
-
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(parent->base.bo, fence, true);
- dma_fence_put(vm->last_update);
- vm->last_update = fence;
- }
+ level += params->adev->vm_manager.root_level;
+ pt = amdgpu_bo_gpu_offset(bo);
+ flags = AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags);
+ if (shadow) {
+ pde = shadow_addr + (entry - parent->entries) * 8;
+ params->func(params, pde, pt, 1, 0, flags);
}
- return 0;
-
-error_free:
- amdgpu_job_free(job);
- return r;
+ pde = pd_addr + (entry - parent->entries) * 8;
+ params->func(params, pde, pt, 1, 0, flags);
}
/*
@@ -1225,27 +755,29 @@ error_free:
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned pt_idx;
+ unsigned pt_idx, num_entries;
/*
* Recurse into the subdirectories. This recursion is harmless because
* we only have a maximum of 5 layers.
*/
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
+ num_entries = amdgpu_vm_num_entries(adev, level);
+ for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
if (!entry->base.bo)
continue;
- entry->addr = ~0ULL;
spin_lock(&vm->status_lock);
if (list_empty(&entry->base.vm_status))
list_add(&entry->base.vm_status, &vm->relocated);
spin_unlock(&vm->status_lock);
- amdgpu_vm_invalidate_level(vm, entry);
+ amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
}
}
@@ -1261,38 +793,63 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
+ struct amdgpu_pte_update_params params;
+ struct amdgpu_job *job;
+ unsigned ndw = 0;
int r = 0;
+ if (list_empty(&vm->relocated))
+ return 0;
+
+restart:
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
+ if (unlikely(r))
+ return r;
+
+ params.func = amdgpu_vm_cpu_set_ptes;
+ } else {
+ ndw = 512 * 8;
+ r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ if (r)
+ return r;
+
+ params.ib = &job->ibs[0];
+ params.func = amdgpu_vm_do_set_ptes;
+ }
+
spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_vm_bo_base *bo_base, *parent;
+ struct amdgpu_vm_pt *pt, *entry;
struct amdgpu_bo *bo;
bo_base = list_first_entry(&vm->relocated,
struct amdgpu_vm_bo_base,
vm_status);
+ list_del_init(&bo_base->vm_status);
spin_unlock(&vm->status_lock);
bo = bo_base->bo->parent;
- if (bo) {
- struct amdgpu_vm_bo_base *parent;
- struct amdgpu_vm_pt *pt;
-
- parent = list_first_entry(&bo->va,
- struct amdgpu_vm_bo_base,
- bo_list);
- pt = container_of(parent, struct amdgpu_vm_pt, base);
-
- r = amdgpu_vm_update_level(adev, vm, pt);
- if (r) {
- amdgpu_vm_invalidate_level(vm, &vm->root);
- return r;
- }
- spin_lock(&vm->status_lock);
- } else {
+ if (!bo) {
spin_lock(&vm->status_lock);
- list_del_init(&bo_base->vm_status);
+ continue;
}
+
+ parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
+ bo_list);
+ pt = container_of(parent, struct amdgpu_vm_pt, base);
+ entry = container_of(bo_base, struct amdgpu_vm_pt, base);
+
+ amdgpu_vm_update_pde(&params, vm, pt, entry);
+
+ spin_lock(&vm->status_lock);
+ if (!vm->use_cpu_for_update &&
+ (ndw - params.ib->length_dw) < 32)
+ break;
}
spin_unlock(&vm->status_lock);
@@ -1300,8 +857,44 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
/* Flush HDP */
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);
+ } else if (params.ib->length_dw == 0) {
+ amdgpu_job_free(job);
+ } else {
+ struct amdgpu_bo *root = vm->root.base.bo;
+ struct amdgpu_ring *ring;
+ struct dma_fence *fence;
+
+ ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ sched);
+
+ amdgpu_ring_pad_ib(ring, params.ib);
+ amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
+ if (root->shadow)
+ amdgpu_sync_resv(adev, &job->sync,
+ root->shadow->tbo.resv,
+ AMDGPU_FENCE_OWNER_VM, false);
+
+ WARN_ON(params.ib->length_dw > ndw);
+ r = amdgpu_job_submit(job, ring, &vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &fence);
+ if (r)
+ goto error;
+
+ amdgpu_bo_fence(root, fence, true);
+ dma_fence_put(vm->last_update);
+ vm->last_update = fence;
}
+ if (!list_empty(&vm->relocated))
+ goto restart;
+
+ return 0;
+
+error:
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+ adev->vm_manager.root_level);
+ amdgpu_job_free(job);
return r;
}
@@ -1319,19 +912,19 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
struct amdgpu_vm_pt **entry,
struct amdgpu_vm_pt **parent)
{
- unsigned level = 0;
+ unsigned level = p->adev->vm_manager.root_level;
*parent = NULL;
*entry = &p->vm->root;
while ((*entry)->entries) {
- unsigned idx = addr >> amdgpu_vm_level_shift(p->adev, level++);
+ unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
- idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
*parent = *entry;
- *entry = &(*entry)->entries[idx];
+ *entry = &(*entry)->entries[addr >> shift];
+ addr &= (1ULL << shift) - 1;
}
- if (level != p->adev->vm_manager.num_level)
+ if (level != AMDGPU_VM_PTB)
*entry = NULL;
}
@@ -1363,17 +956,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
!(flags & AMDGPU_PTE_VALID)) {
dst = amdgpu_bo_gpu_offset(entry->base.bo);
- dst = amdgpu_gart_get_vm_pde(p->adev, dst);
flags = AMDGPU_PTE_VALID;
} else {
/* Set the huge page flag to stop scanning at this PDE */
flags |= AMDGPU_PDE_PTE;
}
- if (entry->addr == (dst | flags))
+ if (!entry->huge && !(flags & AMDGPU_PDE_PTE))
return;
+ entry->huge = !!(flags & AMDGPU_PDE_PTE);
- entry->addr = (dst | flags);
+ amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
+ &dst, &flags);
if (use_cpu_update) {
/* In case a huge page is replaced with a system
@@ -1447,7 +1041,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
amdgpu_vm_handle_huge_pages(params, entry, parent,
nptes, dst, flags);
/* We don't need to update PTEs for huge pages */
- if (entry->addr & AMDGPU_PDE_PTE)
+ if (entry->huge)
continue;
pt = entry->base.bo;
@@ -1606,13 +1200,19 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
*
* The second command is for the shadow pagetables.
*/
- ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
+ if (vm->root.base.bo->shadow)
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
+ else
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
/* padding, etc. */
ndw = 64;
/* one PDE write for each huge page */
- ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
+ if (vm->root.base.bo->shadow)
+ ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6 * 2;
+ else
+ ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
if (pages_addr) {
/* copy commands needed */
@@ -1688,7 +1288,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(vm, &vm->root);
+ amdgpu_vm_invalidate_level(adev, vm, &vm->root,
+ adev->vm_manager.root_level);
return r;
}
@@ -2150,8 +1751,26 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- if (bo)
- list_add_tail(&bo_va->base.bo_list, &bo->va);
+ if (!bo)
+ return bo_va;
+
+ list_add_tail(&bo_va->base.bo_list, &bo->va);
+
+ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return bo_va;
+
+ if (bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+ return bo_va;
+
+ /*
+ * We checked all the prerequisites, but it looks like this per VM BO
+ * is currently evicted. add the BO to the evicted list to make sure it
+ * is validated on next VM use to avoid fault.
+ * */
+ spin_lock(&vm->status_lock);
+ list_move_tail(&bo_va->base.vm_status, &vm->evicted);
+ spin_unlock(&vm->status_lock);
return bo_va;
}
@@ -2604,7 +2223,19 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
tmp >>= amdgpu_vm_block_size - 9;
tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
adev->vm_manager.num_level = min(max_level, (unsigned)tmp);
-
+ switch (adev->vm_manager.num_level) {
+ case 3:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB2;
+ break;
+ case 2:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB1;
+ break;
+ case 1:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB0;
+ break;
+ default:
+ dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
+ }
/* block size depends on vm size and hw setup*/
if (amdgpu_vm_block_size != -1)
adev->vm_manager.block_size =
@@ -2643,13 +2274,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int r, i;
u64 flags;
uint64_t init_pde_value = 0;
vm->va = RB_ROOT_CACHED;
- vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
@@ -2663,8 +2293,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
ring_instance %= adev->vm_manager.vm_pte_num_rings;
ring = adev->vm_manager.vm_pte_rings[ring_instance];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &vm->entity,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ r = drm_sched_entity_init(&ring->sched, &vm->entity,
rq, amdgpu_sched_jobs, NULL);
if (r)
return r;
@@ -2698,7 +2328,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
- r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
+ r = amdgpu_bo_create(adev,
+ amdgpu_vm_bo_size(adev, adev->vm_manager.root_level),
+ align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
NULL, NULL, init_pde_value, &vm->root.base.bo);
@@ -2744,7 +2376,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
- amd_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_fini(&ring->sched, &vm->entity);
return r;
}
@@ -2752,26 +2384,31 @@ error_free_sched_entity:
/**
* amdgpu_vm_free_levels - free PD/PT levels
*
- * @level: PD/PT starting level to free
+ * @adev: amdgpu device structure
+ * @parent: PD/PT starting level to free
+ * @level: level of parent structure
*
* Free the page directory or page table level and all sub levels.
*/
-static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
+static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt *parent,
+ unsigned level)
{
- unsigned i;
+ unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
- if (level->base.bo) {
- list_del(&level->base.bo_list);
- list_del(&level->base.vm_status);
- amdgpu_bo_unref(&level->base.bo->shadow);
- amdgpu_bo_unref(&level->base.bo);
+ if (parent->base.bo) {
+ list_del(&parent->base.bo_list);
+ list_del(&parent->base.vm_status);
+ amdgpu_bo_unref(&parent->base.bo->shadow);
+ amdgpu_bo_unref(&parent->base.bo);
}
- if (level->entries)
- for (i = 0; i <= level->last_entry_used; i++)
- amdgpu_vm_free_levels(&level->entries[i]);
+ if (parent->entries)
+ for (i = 0; i < num_entries; i++)
+ amdgpu_vm_free_levels(adev, &parent->entries[i],
+ level + 1);
- kvfree(level->entries);
+ kvfree(parent->entries);
}
/**
@@ -2803,7 +2440,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- amd_sched_entity_fini(vm->entity.sched, &vm->entity);
+ drm_sched_entity_fini(vm->entity.sched, &vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
@@ -2829,13 +2466,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_levels(&vm->root);
+ amdgpu_vm_free_levels(adev, &vm->root,
+ adev->vm_manager.root_level);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
- amdgpu_vm_free_reserved_vmid(adev, vm, i);
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
/**
@@ -2877,23 +2515,9 @@ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i, j;
-
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
+ unsigned i;
- mutex_init(&id_mgr->lock);
- INIT_LIST_HEAD(&id_mgr->ids_lru);
- atomic_set(&id_mgr->reserved_vmid_num, 0);
-
- /* skip over VMID 0, since it is the system VM */
- for (j = 1; j < id_mgr->num_ids; ++j) {
- amdgpu_vm_reset_id(adev, i, j);
- amdgpu_sync_create(&id_mgr->ids[i].active);
- list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
- }
- }
+ amdgpu_vmid_mgr_init(adev);
adev->vm_manager.fence_context =
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
@@ -2901,7 +2525,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
- atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
@@ -2934,24 +2557,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
- unsigned i, j;
-
WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
idr_destroy(&adev->vm_manager.pasid_idr);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
-
- mutex_destroy(&id_mgr->lock);
- for (j = 0; j < AMDGPU_NUM_VM; ++j) {
- struct amdgpu_vm_id *id = &id_mgr->ids[j];
-
- amdgpu_sync_free(&id->active);
- dma_fence_put(id->flushed_updates);
- dma_fence_put(id->last_flush);
- }
- }
+ amdgpu_vmid_mgr_fini(adev);
}
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -2964,13 +2573,12 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* current, we only have requirement to reserve vmid from gfxhub */
- r = amdgpu_vm_alloc_reserved_vmid(adev, &fpriv->vm,
- AMDGPU_GFXHUB);
+ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
if (r)
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
- amdgpu_vm_free_reserved_vmid(adev, &fpriv->vm, AMDGPU_GFXHUB);
+ amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 43ea131dd411..21a80f1bb2b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -24,12 +24,14 @@
#ifndef __AMDGPU_VM_H__
#define __AMDGPU_VM_H__
-#include <linux/rbtree.h>
#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/rbtree.h>
+#include <drm/gpu_scheduler.h>
-#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
+#include "amdgpu_ids.h"
struct amdgpu_bo_va;
struct amdgpu_job;
@@ -39,9 +41,6 @@ struct amdgpu_bo_list_entry;
* GPUVM handling
*/
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
/* Maximum number of PTEs the hardware can write with one command */
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
@@ -69,6 +68,12 @@ struct amdgpu_bo_list_entry;
/* PDE is handled as PTE for VEGA10 */
#define AMDGPU_PDE_PTE (1ULL << 54)
+/* PTE is handled as PDE for VEGA10 (Translate Further) */
+#define AMDGPU_PTE_TF (1ULL << 56)
+
+/* PDE Block Fragment Size for VEGA10 */
+#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
+
/* VEGA10 only */
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
@@ -119,6 +124,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+/* VMPT level enumerate, and the hiberachy is:
+ * PDB2->PDB1->PDB0->PTB
+ */
+enum amdgpu_vm_level {
+ AMDGPU_VM_PDB2,
+ AMDGPU_VM_PDB1,
+ AMDGPU_VM_PDB0,
+ AMDGPU_VM_PTB
+};
+
/* base structure for tracking BO usage in a VM */
struct amdgpu_vm_bo_base {
/* constant after initialization */
@@ -137,11 +152,10 @@ struct amdgpu_vm_bo_base {
struct amdgpu_vm_pt {
struct amdgpu_vm_bo_base base;
- uint64_t addr;
+ bool huge;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
- unsigned last_entry_used;
};
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
@@ -175,13 +189,11 @@ struct amdgpu_vm {
spinlock_t freed_lock;
/* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
- /* client id and PASID (TODO: replace client_id with PASID) */
- u64 client_id;
unsigned int pasid;
/* dedicated to vm */
- struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -196,37 +208,9 @@ struct amdgpu_vm {
unsigned int fault_credit;
};
-struct amdgpu_vm_id {
- struct list_head list;
- struct amdgpu_sync active;
- struct dma_fence *last_flush;
- atomic64_t owner;
-
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct dma_fence *flushed_updates;
-
- uint32_t current_gpu_reset_count;
-
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
-
-struct amdgpu_vm_id_manager {
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
- atomic_t reserved_vmid_num;
-};
-
struct amdgpu_vm_manager {
/* Handling of VMIDs */
- struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
/* Handling of VM fences */
u64 fence_context;
@@ -236,6 +220,7 @@ struct amdgpu_vm_manager {
uint32_t num_level;
uint32_t block_size;
uint32_t fragment_size;
+ enum amdgpu_vm_level root_level;
/* vram base address for page table entry */
u64 vram_base_offset;
/* vm pte handling */
@@ -243,8 +228,6 @@ struct amdgpu_vm_manager {
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
/* partial resident texture handling */
spinlock_t prt_lock;
@@ -263,8 +246,6 @@ struct amdgpu_vm_manager {
spinlock_t pasid_lock;
};
-int amdgpu_vm_alloc_pasid(unsigned int bits);
-void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -282,13 +263,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
- unsigned vmid);
-void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index f11c0aacf19f..a0943aa8d1d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -891,12 +891,12 @@ static void ci_dpm_powergate_uvd(void *handle, bool gate)
if (gate) {
/* stop the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
ci_update_uvd_dpm(adev, gate);
} else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
ci_update_uvd_dpm(adev, gate);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 8ba056a2a5da..8e59e65efd44 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -755,74 +755,74 @@ static void cik_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_program_register_sequence(adev,
- bonaire_mgcg_cgcg_init,
- ARRAY_SIZE(bonaire_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_registers,
- ARRAY_SIZE(bonaire_golden_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_common_registers,
- ARRAY_SIZE(bonaire_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_spm_registers,
- ARRAY_SIZE(bonaire_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_mgcg_cgcg_init,
+ ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_registers,
+ ARRAY_SIZE(bonaire_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_common_registers,
+ ARRAY_SIZE(bonaire_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_spm_registers,
+ ARRAY_SIZE(bonaire_golden_spm_registers));
break;
case CHIP_KABINI:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_registers,
- ARRAY_SIZE(kalindi_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_registers,
+ ARRAY_SIZE(kalindi_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_MULLINS:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- godavari_golden_registers,
- ARRAY_SIZE(godavari_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ godavari_golden_registers,
+ ARRAY_SIZE(godavari_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_KAVERI:
- amdgpu_program_register_sequence(adev,
- spectre_mgcg_cgcg_init,
- ARRAY_SIZE(spectre_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- spectre_golden_registers,
- ARRAY_SIZE(spectre_golden_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_common_registers,
- ARRAY_SIZE(spectre_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_spm_registers,
- ARRAY_SIZE(spectre_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_mgcg_cgcg_init,
+ ARRAY_SIZE(spectre_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_registers,
+ ARRAY_SIZE(spectre_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_common_registers,
+ ARRAY_SIZE(spectre_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_spm_registers,
+ ARRAY_SIZE(spectre_golden_spm_registers));
break;
case CHIP_HAWAII:
- amdgpu_program_register_sequence(adev,
- hawaii_mgcg_cgcg_init,
- ARRAY_SIZE(hawaii_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_registers,
- ARRAY_SIZE(hawaii_golden_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_common_registers,
- ARRAY_SIZE(hawaii_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_spm_registers,
- ARRAY_SIZE(hawaii_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_mgcg_cgcg_init,
+ ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_registers,
+ ARRAY_SIZE(hawaii_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_common_registers,
+ ARRAY_SIZE(hawaii_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_spm_registers,
+ ARRAY_SIZE(hawaii_golden_spm_registers));
break;
default:
break;
@@ -1246,7 +1246,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
- amdgpu_pci_config_reset(adev);
+ amdgpu_device_pci_config_reset(adev);
udelay(100);
@@ -1866,7 +1866,7 @@ static int cik_common_early_init(void *handle)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_get_pcie_info(adev);
+ amdgpu_device_get_pcie_info(adev);
return 0;
}
@@ -1974,77 +1974,77 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_HAWAII:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KAVERI:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KABINI:
case CHIP_MULLINS:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index a870b354e3f7..d5a05c19708f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -280,7 +280,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ed26dcbc4f79..6e8278e689b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -221,9 +221,9 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 extra_bits = vm_id & 0xf;
+ u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -626,7 +626,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -639,7 +639,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
@@ -663,7 +663,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -686,7 +686,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -735,7 +735,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -880,23 +880,23 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (CIK).
*/
static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ if (vmid < 8) {
+ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 6a9e38a3d2a0..cee6e8a3ad9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,7 +562,7 @@
#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
#define SHARED_BASE(x) ((x) << 16) /* LDS */
-#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
+#define KFD_CIK_SDMA_QUEUE_OFFSET (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL)
/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
enum {
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 003a131bad47..567a904804bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -48,7 +48,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
0x00000000, // DB_STENCIL_WRITE_BASE
0x00000000, // DB_STENCIL_WRITE_BASE_HI
0x00000000, // DB_DFSM_CONTROL
- 0x00000000, // DB_RENDER_FILTER
+ 0, // HOLE
0x00000000, // DB_Z_INFO2
0x00000000, // DB_STENCIL_INFO2
0, // HOLE
@@ -259,8 +259,8 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
0x00000000, // PA_SC_RIGHT_VERT_GRID
0x00000000, // PA_SC_LEFT_VERT_GRID
0x00000000, // PA_SC_HORIZ_GRID
- 0x00000000, // PA_SC_FOV_WINDOW_LR
- 0x00000000, // PA_SC_FOV_WINDOW_TB
+ 0, // HOLE
+ 0, // HOLE
0, // HOLE
0, // HOLE
0, // HOLE
@@ -701,7 +701,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
{
0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
0x00000000, // VGT_DRAW_PAYLOAD_CNTL
- 0x00000000, // VGT_INDEX_PAYLOAD_CNTL
+ 0, // HOLE
0x00000000, // VGT_INSTANCE_STEP_RATE_0
0x00000000, // VGT_INSTANCE_STEP_RATE_1
0, // HOLE
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index fa61d649bb44..f576e9cbbc61 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -259,7 +259,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index a397111c2ced..f34bc68aadfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -145,20 +145,20 @@ static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 67e670989e81..26378bd6aba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -154,28 +154,28 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- ARRAY_SIZE(polaris11_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris11_golden_settings_a11,
+ ARRAY_SIZE(polaris11_golden_settings_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- ARRAY_SIZE(polaris10_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris10_golden_settings_a11,
+ ARRAY_SIZE(polaris10_golden_settings_a11));
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index edef17d93527..9870d83b68c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1874,7 +1874,7 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -1889,7 +1889,7 @@ static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
}
static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
@@ -2362,10 +2362,10 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id ));
+ if (vmid < 8) {
+ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid ));
} else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
@@ -2376,7 +2376,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 83d94c23aa78..a066c5eda135 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -48,6 +48,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
+
#define GFX7_NUM_GFX_RINGS 1
#define GFX7_MEC_HPD_SIZE 2048
@@ -2252,7 +2254,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -2267,7 +2269,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring,
@@ -2281,9 +2283,9 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
@@ -3237,19 +3239,19 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using the CP (CIK).
*/
static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
- if (vm_id < 8) {
+ if (vmid < 8) {
amdgpu_ring_write(ring,
- (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring,
- (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+ (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
@@ -3260,7 +3262,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -5277,6 +5279,11 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
+ cu_info->simd_per_cu = NUM_SIMD_PER_CU;
+ cu_info->max_waves_per_simd = 10;
+ cu_info->max_scratch_slots_per_cu = 32;
+ cu_info->wave_front_size = 64;
+ cu_info->lds_size = 64;
}
const struct amdgpu_ip_block_version gfx_v7_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d02493cf9175..4e694ae9f308 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -679,55 +679,55 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- ARRAY_SIZE(golden_settings_iceland_a11));
- amdgpu_program_register_sequence(adev,
- iceland_golden_common_all,
- ARRAY_SIZE(iceland_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_golden_common_all,
+ ARRAY_SIZE(iceland_golden_common_all));
break;
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
- amdgpu_program_register_sequence(adev,
- fiji_golden_common_all,
- ARRAY_SIZE(fiji_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_golden_common_all,
+ ARRAY_SIZE(fiji_golden_common_all));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
- amdgpu_program_register_sequence(adev,
- tonga_golden_common_all,
- ARRAY_SIZE(tonga_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_golden_common_all,
+ ARRAY_SIZE(tonga_golden_common_all));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- ARRAY_SIZE(golden_settings_polaris11_a11));
- amdgpu_program_register_sequence(adev,
- polaris11_golden_common_all,
- ARRAY_SIZE(polaris11_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris11_golden_common_all,
+ ARRAY_SIZE(polaris11_golden_common_all));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- ARRAY_SIZE(golden_settings_polaris10_a11));
- amdgpu_program_register_sequence(adev,
- polaris10_golden_common_all,
- ARRAY_SIZE(polaris10_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ polaris10_golden_common_all,
+ ARRAY_SIZE(polaris10_golden_common_all));
WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
if (adev->pdev->revision == 0xc7 &&
((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
@@ -738,26 +738,26 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
}
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- cz_golden_common_all,
- ARRAY_SIZE(cz_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_common_all,
+ ARRAY_SIZE(cz_golden_common_all));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- stoney_golden_common_all,
- ARRAY_SIZE(stoney_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_common_all,
+ ARRAY_SIZE(stoney_golden_common_all));
break;
default:
break;
@@ -5062,8 +5062,9 @@ static int gfx_v8_0_hw_fini(void *handle)
gfx_v8_0_cp_enable(adev, false);
gfx_v8_0_rlc_stop(adev);
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
return 0;
}
@@ -5480,8 +5481,9 @@ static int gfx_v8_0_late_init(void *handle)
if (r)
return r;
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
return 0;
}
@@ -5492,10 +5494,10 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
if ((adev->asic_type == CHIP_POLARIS11) ||
(adev->asic_type == CHIP_POLARIS12))
/* Send msg to SMU via Powerplay */
- amdgpu_set_powergating_state(adev,
- AMD_IP_BLOCK_TYPE_SMC,
- enable ?
- AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_SMC,
+ enable ?
+ AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
}
@@ -6243,7 +6245,7 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -6252,7 +6254,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
@@ -6273,9 +6275,9 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
amdgpu_ring_write(ring,
@@ -6326,7 +6328,7 @@ static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
}
static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
@@ -6334,12 +6336,12 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)) |
WR_CONFIRM);
- if (vm_id < 8) {
+ if (vmid < 8) {
amdgpu_ring_write(ring,
- (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring,
- (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+ (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
@@ -6351,7 +6353,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@@ -6472,10 +6474,10 @@ static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
mutex_unlock(&adev->srbm_mutex);
}
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum amd_sched_priority priority)
+ enum drm_sched_priority priority)
{
struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
+ bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
return;
@@ -7114,6 +7116,11 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask;
+ cu_info->simd_per_cu = NUM_SIMD_PER_CU;
+ cu_info->max_waves_per_simd = 10;
+ cu_info->max_scratch_slots_per_cu = 32;
+ cu_info->wave_front_size = 64;
+ cu_info->lds_size = 64;
}
const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6c5289ae67be..55670dbacace 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -28,7 +28,6 @@
#include "soc15.h"
#include "soc15d.h"
-#include "soc15ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "vega10_enum.h"
@@ -65,152 +64,84 @@ MODULE_FIRMWARE("amdgpu/raven_mec.bin");
MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
-static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
-{
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
- { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
- SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
+static const struct soc15_reg_golden golden_settings_gc_9_0[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
-static const u32 golden_settings_gc_9_0[] =
-{
- SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
- SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000,
- SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
- SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000,
- SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
- SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
- SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
- SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
-};
-
-static const u32 golden_settings_gc_9_0_vg10[] =
+static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
- SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
- SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
};
-static const u32 golden_settings_gc_9_1[] =
-{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
- SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
- SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
- SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
- SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
- SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000000ff,
- SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
+static const struct soc15_reg_golden golden_settings_gc_9_1[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
-static const u32 golden_settings_gc_9_1_rv1[] =
+static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
{
- SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x24000042,
- SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x24000042,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x04048000,
- SOC15_REG_OFFSET(GC, 0, mmPA_SC_MODE_CNTL_1), 0x06000000, 0x06000000,
- SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
- SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
};
-static const u32 golden_settings_gc_9_x_common[] =
+static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
{
- SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
- SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
};
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
@@ -230,18 +161,18 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_0_vg10,
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
@@ -249,7 +180,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
break;
}
- amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -1137,7 +1068,7 @@ static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
- adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
+ adev->gfx.ngg.gds_reserve_addr = SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE);
adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
/* Primitive Buffer */
@@ -1243,7 +1174,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
}
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[0].mem_size,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
(adev->gds.mem.total_size +
adev->gfx.ngg.gds_reserve_size) >>
AMDGPU_GDS_SHIFT);
@@ -1259,7 +1190,7 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[0].mem_size, 0);
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
amdgpu_ring_commit(ring);
@@ -1598,11 +1529,18 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
for (i = 0; i < 16; i++) {
soc15_grbm_select(adev, 0, 0, 0, i);
/* CP and shaders */
- tmp = 0;
- tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
- SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
- WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ if (i == 0) {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
+ } else {
+ tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
+ SH_MEM_ALIGNMENT_MODE_UNALIGNED);
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
+ tmp = adev->mc.shared_aperture_start >> 48;
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
+ }
}
soc15_grbm_select(adev, 0, 0, 0, 0);
@@ -2474,7 +2412,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
- PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
@@ -3146,6 +3084,8 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
uint32_t gws_base, uint32_t gws_size,
uint32_t oa_base, uint32_t oa_size)
{
+ struct amdgpu_device *adev = ring->adev;
+
gds_base = gds_base >> AMDGPU_GDS_SHIFT;
gds_size = gds_size >> AMDGPU_GDS_SHIFT;
@@ -3157,22 +3097,22 @@ static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
/* GDS Base */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].mem_base,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
gds_base);
/* GDS Size */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].mem_size,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
gds_size);
/* GWS */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].gws,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
/* OA */
gfx_v9_0_write_data_to_reg(ring, 0, false,
- amdgpu_gds_reg_offset[vmid].oa,
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
(1 << (oa_size + oa_base)) - (1 << oa_base));
}
@@ -3617,13 +3557,9 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask, reg_mem_engine;
- const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
- if (ring->adev->flags & AMD_IS_APU)
- nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
- else
- nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
switch (ring->me) {
@@ -3643,20 +3579,22 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
- nbio_hf_reg->hdp_flush_req_offset,
- nbio_hf_reg->hdp_flush_done_offset,
+ adev->nbio_funcs->get_hdp_flush_req_offset(adev),
+ adev->nbio_funcs->get_hdp_flush_done_offset(adev),
ref_and_mask, ref_and_mask, 0x20);
}
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
gfx_v9_0_write_data_to_reg(ring, 0, true,
SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
u32 header, control = 0;
@@ -3665,7 +3603,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
- control |= ib->length_dw | (vm_id << 24);
+ control |= ib->length_dw | (vmid << 24);
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
@@ -3687,9 +3625,9 @@ BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
@@ -3745,22 +3683,23 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
}
static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
+ hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
- hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
+ hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr));
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
@@ -3768,7 +3707,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for the invalidate to complete */
gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
- eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
+ eng, 0, 1 << vmid, 1 << vmid, 0x20);
/* compute doesn't have PFP */
if (usepfp) {
@@ -3811,6 +3750,8 @@ static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned int flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
/* we only allocate 32bit for each seq wb address */
BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index f1effadfbaa6..56f5fe4e2fee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "gfxhub_v1_0.h"
-#include "soc15ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "gc/gc_9_0_default.h"
@@ -144,8 +143,15 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ if (adev->mc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
@@ -183,31 +189,40 @@ static void gfxhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- int i;
+ unsigned num_level, block_size;
uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->mc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
- adev->vm_manager.num_level);
+ num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 468281f10e8d..8e28270d1ea9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -222,8 +222,8 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -395,10 +395,10 @@ static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
@@ -956,7 +956,7 @@ static int gmc_v6_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 68a85051f4b7..86e9d682c59e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -67,12 +67,12 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
@@ -240,8 +240,8 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
}
/**
@@ -480,10 +480,10 @@ static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
/**
@@ -1107,7 +1107,7 @@ static int gmc_v7_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 46ec97e70e5c..9a813d834f1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -120,44 +120,44 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_stoney_common,
- ARRAY_SIZE(golden_settings_stoney_common));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_stoney_common,
+ ARRAY_SIZE(golden_settings_stoney_common));
break;
default:
break;
@@ -405,8 +405,8 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
}
/**
@@ -677,10 +677,10 @@ static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static uint64_t gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, uint64_t addr)
+static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- BUG_ON(addr & 0xFFFFFF0000000FFFULL);
- return addr;
+ BUG_ON(*addr & 0xFFFFFF0000000FFFULL);
}
/**
@@ -1212,7 +1212,7 @@ static int gmc_v8_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index cc972153d401..eb8b1bb66389 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -25,7 +25,6 @@
#include "gmc_v9_0.h"
#include "amdgpu_atomfirmware.h"
-#include "soc15ip.h"
#include "hdp/hdp_4_0_offset.h"
#include "hdp/hdp_4_0_sh_mask.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -35,11 +34,10 @@
#include "mmhub/mmhub_1_0_offset.h"
#include "athub/athub_1_0_offset.h"
+#include "soc15.h"
#include "soc15_common.h"
#include "umc/umc_6_0_sh_mask.h"
-#include "nbio_v6_1.h"
-#include "nbio_v7_0.h"
#include "gfxhub_v1_0.h"
#include "mmhub_v1_0.h"
@@ -74,16 +72,16 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000,
};
-static const u32 golden_settings_mmhub_1_0_0[] =
+static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
{
- SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
- SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
+ SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
+ SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
};
-static const u32 golden_settings_athub_1_0_0[] =
+static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
{
- SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
- SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
+ SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
};
/* Ecc related register addresses, (BASE + reg offset) */
@@ -250,7 +248,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
+ struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
uint32_t status = 0;
u64 addr;
@@ -264,9 +262,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (printk_ratelimit()) {
dev_err(adev->dev,
- "[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
- entry->vm_id_src ? "mmhub" : "gfxhub",
- entry->src_id, entry->ring_id, entry->vm_id,
+ "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n",
+ entry->vmid_src ? "mmhub" : "gfxhub",
+ entry->src_id, entry->ring_id, entry->vmid,
entry->pas_id);
dev_err(adev->dev, " at page 0x%016llx from %d\n",
addr, entry->client_id);
@@ -290,13 +288,13 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
{
u32 req = 0;
- /* invalidate using legacy mode on vm_id*/
+ /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
+ PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
@@ -332,10 +330,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
unsigned i, j;
/* flush hdp cache */
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
+ adev->nbio_funcs->hdp_flush(adev);
spin_lock(&adev->mc.invalidate_lock);
@@ -474,11 +469,28 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
+static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
+ uint64_t *addr, uint64_t *flags)
{
- addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
- BUG_ON(addr & 0xFFFF00000000003FULL);
- return addr;
+ if (!(*flags & AMDGPU_PDE_PTE))
+ *addr = adev->vm_manager.vram_base_offset + *addr -
+ adev->mc.vram_start;
+ BUG_ON(*addr & 0xFFFF00000000003FULL);
+
+ if (!adev->mc.translate_further)
+ return;
+
+ if (level == AMDGPU_VM_PDB1) {
+ /* Set the block fragment size */
+ if (!(*flags & AMDGPU_PDE_PTE))
+ *flags |= AMDGPU_PDE_BFS(0x9);
+
+ } else if (level == AMDGPU_VM_PDB0) {
+ if (*flags & AMDGPU_PDE_PTE)
+ *flags &= ~AMDGPU_PDE_PTE;
+ else
+ *flags |= AMDGPU_PTE_TF;
+ }
}
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
@@ -502,6 +514,14 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_gart_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
+ adev->mc.shared_aperture_start = 0x2000000000000000ULL;
+ adev->mc.shared_aperture_end =
+ adev->mc.shared_aperture_start + (4ULL << 30) - 1;
+ adev->mc.private_aperture_start =
+ adev->mc.shared_aperture_end + 1;
+ adev->mc.private_aperture_end =
+ adev->mc.private_aperture_start + (4ULL << 30) - 1;
+
return 0;
}
@@ -633,8 +653,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = 0;
if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev);
- amdgpu_vram_location(adev, &adev->mc, base);
- amdgpu_gart_location(adev, mc);
+ amdgpu_device_vram_location(adev, &adev->mc, base);
+ amdgpu_device_gart_location(adev, mc);
/* base offset of vram pages */
if (adev->flags & AMD_IS_APU)
adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
@@ -700,8 +720,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
/* size in MB on si */
adev->mc.mc_vram_size =
- ((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
- nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
+ adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = adev->mc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) {
@@ -769,11 +788,14 @@ static int gmc_v9_0_sw_init(void *handle)
switch (adev->asic_type) {
case CHIP_RAVEN:
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
- if (adev->rev_id == 0x0 || adev->rev_id == 0x1)
+ if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
- else
- /* vm_size is 64GB for legacy 2-level page support */
- amdgpu_vm_adjust_size(adev, 64, 9, 1, 48);
+ } else {
+ /* vm_size is 128TB + 512GB for legacy 3-level page support */
+ amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
+ adev->mc.translate_further =
+ adev->vm_manager.num_level > 1;
+ }
break;
case CHIP_VEGA10:
/* XXX Don't know how to get VRAM type yet. */
@@ -883,17 +905,18 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
+
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
@@ -913,9 +936,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
bool value;
u32 tmp;
- amdgpu_program_register_sequence(adev,
- golden_settings_vega10_hdp,
- ARRAY_SIZE(golden_settings_vega10_hdp));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_vega10_hdp,
+ ARRAY_SIZE(golden_settings_vega10_hdp));
if (adev->gart.robj == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
@@ -948,10 +971,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
+ adev->nbio_funcs->hdp_flush(adev);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -1036,7 +1056,7 @@ static int gmc_v9_0_resume(void *handle)
if (r)
return r;
- amdgpu_vm_reset_all_ids(adev);
+ amdgpu_vmid_reset_all(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index bd592cb39f37..c4e4be3dd31d 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -259,7 +259,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index f33d1ffdb20b..d9e9e52a0def 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1682,8 +1682,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
if (gate) {
/* stop the UVD block */
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
kv_update_uvd_dpm(adev, gate);
if (pi->caps_uvd_pg)
/* power off the UVD block */
@@ -1695,8 +1695,8 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
/* re-init the UVD block */
kv_update_uvd_dpm(adev, gate);
- ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index bd160d8700e0..ffd5b7ee49c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "mmhub_v1_0.h"
-#include "soc15ip.h"
#include "mmhub/mmhub_1_0_offset.h"
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "mmhub/mmhub_1_0_default.h"
@@ -156,10 +155,15 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
- tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
- WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
+ if (adev->mc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
tmp = mmVM_L2_CNTL4_DEFAULT;
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
@@ -197,32 +201,40 @@ static void mmhub_v1_0_disable_identity_aperture(struct amdgpu_device *adev)
static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
{
- int i;
+ unsigned num_level, block_size;
uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->mc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
for (i = 0; i <= 14; i++) {
tmp = RREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_DEPTH, adev->vm_manager.num_level);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- PAGE_TABLE_BLOCK_SIZE,
- adev->vm_manager.block_size - 9);
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index ad9054e3903c..271452d3999a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -22,7 +22,6 @@
*/
#include "amdgpu.h"
-#include "soc15ip.h"
#include "nbio/nbio_6_1_offset.h"
#include "nbio/nbio_6_1_sh_mask.h"
#include "gc/gc_9_0_offset.h"
@@ -254,7 +253,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
/* Trigger recovery due to world switch failure */
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -278,7 +277,7 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
int r;
/* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (amdgpu_lockup_timeout == 0) {
+ if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index df52824c0cd4..9fc1c37344ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -279,32 +279,32 @@ void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_mgcg_cgcg_init,
- ARRAY_SIZE(
- xgpu_fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_settings_a10,
- ARRAY_SIZE(
- xgpu_fiji_golden_settings_a10));
- amdgpu_program_register_sequence(adev,
- xgpu_fiji_golden_common_all,
- ARRAY_SIZE(
- xgpu_fiji_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_settings_a10,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_settings_a10));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_fiji_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_fiji_golden_common_all));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_mgcg_cgcg_init,
- ARRAY_SIZE(
- xgpu_tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_settings_a11,
- ARRAY_SIZE(
- xgpu_tonga_golden_settings_a11));
- amdgpu_program_register_sequence(adev,
- xgpu_tonga_golden_common_all,
- ARRAY_SIZE(
- xgpu_tonga_golden_common_all));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(
+ xgpu_tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_settings_a11,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ xgpu_tonga_golden_common_all,
+ ARRAY_SIZE(
+ xgpu_tonga_golden_common_all));
break;
default:
BUG_ON("Doesn't support chip type.\n");
@@ -521,7 +521,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
}
/* Trigger recovery due to world switch failure */
- amdgpu_gpu_recover(adev, NULL);
+ amdgpu_device_gpu_recover(adev, NULL, false);
}
static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
@@ -545,7 +545,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
int r;
/* trigger gpu-reset by hypervisor only if TDR disbaled */
- if (amdgpu_lockup_timeout == 0) {
+ if (!amdgpu_gpu_recovery) {
/* see what event we get */
r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 76db711097c7..d4da663d5eb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -24,7 +24,6 @@
#include "amdgpu_atombios.h"
#include "nbio_v6_1.h"
-#include "soc15ip.h"
#include "nbio/nbio_6_1_default.h"
#include "nbio/nbio_6_1_offset.h"
#include "nbio/nbio_6_1_sh_mask.h"
@@ -34,7 +33,7 @@
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_CONFIG_CNTL 0x11180044
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -44,19 +43,7 @@ u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
return tmp;
}
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx)
-{
- return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
-}
-
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val)
-{
- WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
-}
-
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -66,26 +53,23 @@ void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev)
{
WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_CONFIG_MEMSIZE);
}
-static const u32 nbio_sdma_doorbell_range_reg[] =
-{
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
-};
-
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+static void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index)
{
- u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
+ u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
+
+ u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
@@ -93,17 +77,18 @@ void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
- WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
+ WREG32(reg, doorbell_range);
+
}
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_PF_0_0_RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
{
u32 tmp = 0;
@@ -122,8 +107,8 @@ void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
}
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
+static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
@@ -136,7 +121,7 @@ void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range);
}
-void nbio_v6_1_ih_control(struct amdgpu_device *adev)
+static void nbio_v6_1_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
@@ -152,8 +137,8 @@ void nbio_v6_1_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -180,8 +165,8 @@ void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_PCIE(smnCPM_CONTROL, data);
}
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -200,7 +185,8 @@ void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_CNTL2, data);
}
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+static void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
{
int data;
@@ -215,9 +201,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
- .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
- .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+static u32 nbio_v6_1_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbio_v6_1_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbio_v6_1_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
+}
+
+static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
+}
+
+static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -232,12 +236,7 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
};
-const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
- .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
- .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
-};
-
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
+static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{
uint32_t reg;
@@ -254,7 +253,7 @@ void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
}
}
-void nbio_v6_1_init_registers(struct amdgpu_device *adev)
+static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -265,3 +264,25 @@ void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
}
+
+const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
+ .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v6_1_get_pcie_data_offset,
+ .get_rev_id = nbio_v6_1_get_rev_id,
+ .mc_access_enable = nbio_v6_1_mc_access_enable,
+ .hdp_flush = nbio_v6_1_hdp_flush,
+ .get_memsize = nbio_v6_1_get_memsize,
+ .sdma_doorbell_range = nbio_v6_1_sdma_doorbell_range,
+ .enable_doorbell_aperture = nbio_v6_1_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v6_1_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v6_1_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v6_1_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v6_1_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v6_1_get_clockgating_state,
+ .ih_control = nbio_v6_1_ih_control,
+ .init_registers = nbio_v6_1_init_registers,
+ .detect_hw_virt = nbio_v6_1_detect_hw_virt,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
index 14ca8d45a46c..0743a6f016f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h
@@ -26,30 +26,6 @@
#include "soc15_common.h"
-extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-extern const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
-int nbio_v6_1_init(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx);
-void nbio_v6_1_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val);
-void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev);
-void nbio_v6_1_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index);
-void nbio_v6_1_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v6_1_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
-void nbio_v6_1_ih_control(struct amdgpu_device *adev);
-u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev);
-void nbio_v6_1_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable);
-void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev);
-void nbio_v6_1_init_registers(struct amdgpu_device *adev);
+extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 1fb77174e02c..17a9131a4598 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -24,7 +24,6 @@
#include "amdgpu_atombios.h"
#include "nbio_v7_0.h"
-#include "soc15ip.h"
#include "nbio/nbio_7_0_default.h"
#include "nbio/nbio_7_0_offset.h"
#include "nbio/nbio_7_0_sh_mask.h"
@@ -32,7 +31,10 @@
#define smnNBIF_MGCG_CTRL_LCLK 0x1013a05c
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
+#define smnCPM_CONTROL 0x11180460
+#define smnPCIE_CNTL2 0x11180070
+
+static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
@@ -42,19 +44,7 @@ u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev)
return tmp;
}
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx)
-{
- return RREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx);
-}
-
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val)
-{
- WREG32_SOC15_OFFSET(NBIO, 0, mmBIOS_SCRATCH_0, idx, val);
-}
-
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
+static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
{
if (enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN,
@@ -63,26 +53,23 @@ void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
}
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
+static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev)
{
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
}
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
+static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)
{
return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE);
}
-static const u32 nbio_sdma_doorbell_range_reg[] =
+static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index)
{
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE),
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE)
-};
+ u32 reg = instance == 0 ? SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE) :
+ SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE);
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index)
-{
- u32 doorbell_range = RREG32(nbio_sdma_doorbell_range_reg[instance]);
+ u32 doorbell_range = RREG32(reg);
if (use_doorbell) {
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index);
@@ -90,17 +77,23 @@ void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
} else
doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0);
- WREG32(nbio_sdma_doorbell_range_reg[instance], doorbell_range);
+ WREG32(reg, doorbell_range);
}
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
+ bool enable)
{
WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0);
}
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index)
+static void nbio_v7_0_enable_doorbell_selfring_aperture(struct amdgpu_device *adev,
+ bool enable)
+{
+
+}
+
+static void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index)
{
u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE);
@@ -130,8 +123,8 @@ static void nbio_7_0_write_syshub_ind_mmr(struct amdgpu_device *adev, uint32_t o
WREG32_SOC15(NBIO, 0, mmSYSHUB_DATA, data);
}
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable)
+static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
{
uint32_t def, data;
@@ -169,7 +162,43 @@ void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
nbio_7_0_write_syshub_ind_mmr(adev, ixSYSHUB_MMREG_IND_SYSHUB_MGCG_CTRL_SHUBCLK, data);
}
-void nbio_v7_0_ih_control(struct amdgpu_device *adev)
+static void nbio_v7_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CNTL2);
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
+ data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+ } else {
+ data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
+ PCIE_CNTL2__MST_MEM_LS_EN_MASK |
+ PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
+ }
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CNTL2, data);
+}
+
+static void nbio_v7_0_get_clockgating_state(struct amdgpu_device *adev,
+ u32 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_BIF_MGCG */
+ data = RREG32_PCIE(smnCPM_CONTROL);
+ if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_MGCG;
+
+ /* AMD_CG_SUPPORT_BIF_LS */
+ data = RREG32_PCIE(smnPCIE_CNTL2);
+ if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK)
+ *flags |= AMD_CG_SUPPORT_BIF_LS;
+}
+
+static void nbio_v7_0_ih_control(struct amdgpu_device *adev)
{
u32 interrupt_cntl;
@@ -185,9 +214,27 @@ void nbio_v7_0_ih_control(struct amdgpu_device *adev)
WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl);
}
+static u32 nbio_v7_0_get_hdp_flush_req_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ);
+}
+
+static u32 nbio_v7_0_get_hdp_flush_done_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE);
+}
+
+static u32 nbio_v7_0_get_pcie_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2);
+}
+
+static u32 nbio_v7_0_get_pcie_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2);
+}
+
const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
- .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ),
- .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE),
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
@@ -202,7 +249,35 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data = {
- .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2),
- .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2)
+static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
+static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
+{
+
+}
+
+const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
+ .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg,
+ .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset,
+ .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset,
+ .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset,
+ .get_pcie_data_offset = nbio_v7_0_get_pcie_data_offset,
+ .get_rev_id = nbio_v7_0_get_rev_id,
+ .mc_access_enable = nbio_v7_0_mc_access_enable,
+ .hdp_flush = nbio_v7_0_hdp_flush,
+ .get_memsize = nbio_v7_0_get_memsize,
+ .sdma_doorbell_range = nbio_v7_0_sdma_doorbell_range,
+ .enable_doorbell_aperture = nbio_v7_0_enable_doorbell_aperture,
+ .enable_doorbell_selfring_aperture = nbio_v7_0_enable_doorbell_selfring_aperture,
+ .ih_doorbell_range = nbio_v7_0_ih_doorbell_range,
+ .update_medium_grain_clock_gating = nbio_v7_0_update_medium_grain_clock_gating,
+ .update_medium_grain_light_sleep = nbio_v7_0_update_medium_grain_light_sleep,
+ .get_clockgating_state = nbio_v7_0_get_clockgating_state,
+ .ih_control = nbio_v7_0_ih_control,
+ .init_registers = nbio_v7_0_init_registers,
+ .detect_hw_virt = nbio_v7_0_detect_hw_virt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
index df8fa90f40d7..508d549c5029 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h
@@ -26,24 +26,6 @@
#include "soc15_common.h"
-extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg;
-extern const struct nbio_pcie_index_data nbio_v7_0_pcie_index_data;
-int nbio_v7_0_init(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx);
-void nbio_v7_0_set_atombios_scratch_regs(struct amdgpu_device *adev,
- uint32_t idx, uint32_t val);
-void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable);
-void nbio_v7_0_hdp_flush(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev);
-void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index);
-void nbio_v7_0_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable);
-void nbio_v7_0_ih_doorbell_range(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
-void nbio_v7_0_ih_control(struct amdgpu_device *adev);
-u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev);
-void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
- bool enable);
+extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs;
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 78fe3f2917a0..5a9fe24697f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -30,7 +30,6 @@
#include "soc15_common.h"
#include "psp_v10_0.h"
-#include "soc15ip.h"
#include "mp/mp_10_0_offset.h"
#include "gc/gc_9_1_offset.h"
#include "sdma0/sdma0_4_1_offset.h"
@@ -298,9 +297,10 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
}
static int
-psp_v10_0_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
+psp_v10_0_sram_map(struct amdgpu_device *adev,
+ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+ unsigned int *sram_data_reg_offset,
+ enum AMDGPU_UCODE_ID ucode_id)
{
int ret = 0;
@@ -395,7 +395,7 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
uint32_t *ucode_mem = NULL;
struct amdgpu_device *adev = psp->adev;
- err = psp_v10_0_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+ err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
&fw_sram_data_reg_offset, ucode_type);
if (err)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index e75a23d858ef..19bd1934e63d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -31,7 +31,6 @@
#include "soc15_common.h"
#include "psp_v3_1.h"
-#include "soc15ip.h"
#include "mp/mp_9_0_offset.h"
#include "mp/mp_9_0_sh_mask.h"
#include "gc/gc_9_0_offset.h"
@@ -410,9 +409,10 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
}
static int
-psp_v3_1_sram_map(unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
+psp_v3_1_sram_map(struct amdgpu_device *adev,
+ unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
+ unsigned int *sram_data_reg_offset,
+ enum AMDGPU_UCODE_ID ucode_id)
{
int ret = 0;
@@ -507,7 +507,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
uint32_t *ucode_mem = NULL;
struct amdgpu_device *adev = psp->adev;
- err = psp_v3_1_sram_map(&fw_sram_reg_val, &fw_sram_addr_reg_offset,
+ err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
&fw_sram_data_reg_offset, ucode_type);
if (err)
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 121e628e7cdb..d4787ad4d346 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -93,12 +93,12 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_iceland_a11,
- ARRAY_SIZE(golden_settings_iceland_a11));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_iceland_a11,
+ ARRAY_SIZE(golden_settings_iceland_a11));
break;
default:
break;
@@ -246,15 +246,13 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 vmid = vm_id & 0xf;
-
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
@@ -600,7 +598,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -613,7 +611,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -639,7 +637,7 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -662,7 +660,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -715,7 +713,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -861,14 +859,14 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI).
*/
static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ if (vmid < 8) {
+ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
@@ -876,7 +874,7 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index c8c93f9dac21..521978c40537 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -192,47 +192,47 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris11_a11,
- ARRAY_SIZE(golden_settings_polaris11_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris11_a11,
+ ARRAY_SIZE(golden_settings_polaris11_a11));
break;
case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- golden_settings_polaris10_a11,
- ARRAY_SIZE(golden_settings_polaris10_a11));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_polaris10_a11,
+ ARRAY_SIZE(golden_settings_polaris10_a11));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_golden_settings_a11,
+ ARRAY_SIZE(cz_golden_settings_a11));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_golden_settings_a11,
+ ARRAY_SIZE(stoney_golden_settings_a11));
break;
default:
break;
@@ -355,7 +355,7 @@ static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
u32 wptr;
- if (ring->use_doorbell) {
+ if (ring->use_doorbell || ring->use_pollmem) {
/* XXX check if swapping is necessary on BE */
wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2;
} else {
@@ -380,10 +380,13 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell) {
u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
-
/* XXX check if swapping is necessary on BE */
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
+ } else if (ring->use_pollmem) {
+ u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
+
+ WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
@@ -414,15 +417,13 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 vmid = vm_id & 0xf;
-
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
@@ -718,10 +719,14 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
upper_32_bits(wptr_gpu_addr));
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
- if (amdgpu_sriov_vf(adev))
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ if (ring->use_pollmem)
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ ENABLE, 1);
else
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ ENABLE, 0);
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
/* enable DMA RB */
@@ -860,7 +865,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -873,7 +878,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -899,7 +904,7 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -922,7 +927,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -974,7 +979,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1120,14 +1125,14 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI).
*/
static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ if (vmid < 8) {
+ amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
+ amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
@@ -1135,7 +1140,7 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@@ -1203,9 +1208,13 @@ static int sdma_v3_0_sw_init(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
- ring->use_doorbell = true;
- ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ if (!amdgpu_sriov_vf(adev)) {
+ ring->use_doorbell = true;
+ ring->doorbell_index = (i == 0) ?
+ AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ } else {
+ ring->use_pollmem = true;
+ }
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4c55f21e37a8..e92fb372bc99 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -27,7 +27,6 @@
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
-#include "soc15ip.h"
#include "sdma0/sdma0_4_0_offset.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
#include "sdma1/sdma1_4_0_offset.h"
@@ -53,95 +52,83 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
-static const u32 golden_settings_sdma_4[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0x003ff006, 0x0003c000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_CLK_CTRL), 0xffffffff, 0x3f000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_POWER_CNTL), 0x003ff000, 0x0003c000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL), 0x800f0100, 0x00000100,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL), 0x0000fff0, 0x00403000,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_UTCL1_PAGE), 0x000003ff, 0x000003c0
+static const struct soc15_reg_golden golden_settings_sdma_4[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xff000ff0, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0x003ff006, 0x0003c000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_POWER_CNTL, 0x003ff000, 0x0003c000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
-static const u32 golden_settings_sdma_vg10[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG), 0x0018773f, 0x00104002,
- SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00104002
+static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
-static const u32 golden_settings_sdma_4_1[] =
+static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_POWER_CNTL), 0xfc3fffff, 0x40000051,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL), 0x800f0111, 0x00000100,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UTCL1_PAGE), 0x000003ff, 0x000003c0
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_POWER_CNTL, 0xfc3fffff, 0x40000051),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
};
-static const u32 golden_settings_sdma_rv1[] =
+static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG), 0x0018773f, 0x00000002,
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ), 0x0018773f, 0x00000002
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00000002)
};
-static u32 sdma_v4_0_get_reg_offset(u32 instance, u32 internal_offset)
+static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
+ u32 instance, u32 offset)
{
- u32 base = 0;
-
- switch (instance) {
- case 0:
- base = SDMA0_BASE.instance[0].segment[0];
- break;
- case 1:
- base = SDMA1_BASE.instance[0].segment[0];
- break;
- default:
- BUG();
- break;
- }
-
- return base + internal_offset;
+ return ( 0 == instance ? (adev->reg_offset[SDMA0_HWIP][0][0] + offset) :
+ (adev->reg_offset[SDMA1_HWIP][0][0] + offset));
}
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_vg10,
ARRAY_SIZE(golden_settings_sdma_vg10));
break;
case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
- amdgpu_program_register_sequence(adev,
+ soc15_program_register_sequence(adev,
golden_settings_sdma_rv1,
ARRAY_SIZE(golden_settings_sdma_rv1));
break;
@@ -265,8 +252,8 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
wptr = &local_wptr;
- lowbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
+ lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
+ highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
me, highbit, lowbit);
@@ -315,8 +302,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
+ WREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
}
}
@@ -343,15 +330,13 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
- u32 vmid = vm_id & 0xf;
-
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
- SDMA_PKT_INDIRECT_HEADER_VMID(vmid));
+ SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
/* base must be 32 byte aligned */
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
@@ -370,13 +355,9 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
*/
static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
u32 ref_and_mask = 0;
- const struct nbio_hdp_flush_reg *nbio_hf_reg;
-
- if (ring->adev->flags & AMD_IS_APU)
- nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
- else
- nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
+ const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
if (ring == &ring->adev->sdma.instance[0].ring)
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0;
@@ -386,8 +367,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_done_offset << 2);
- amdgpu_ring_write(ring, nbio_hf_reg->hdp_flush_req_offset << 2);
+ amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
@@ -396,6 +377,8 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
@@ -460,12 +443,12 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
for (i = 0; i < adev->sdma.num_instances; i++) {
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
sdma0->ready = false;
@@ -522,18 +505,18 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
+ f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE0_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE1_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_PHASE2_QUANTUM),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), f32_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
}
}
@@ -557,9 +540,9 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
+ f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), f32_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
}
}
@@ -587,48 +570,48 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
#ifdef __BIG_ENDIAN
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
RPTR_WRITEBACK_SWAP_ENABLE, 1);
#endif
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
/* Initialize the ring buffer's read and write pointers */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_HI), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
/* set the wb address whether it's enabled or not */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
ring->wptr = 0;
/* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
}
- doorbell = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET));
+ doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+ doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
if (ring->use_doorbell) {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
@@ -637,55 +620,53 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
} else {
doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
}
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
- else
- nbio_v6_1_sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+ adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index);
if (amdgpu_sriov_vf(adev))
sdma_v4_0_ring_set_wptr(ring);
/* set minor_ptr_update to 0 after wptr programed */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
/* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL));
+ temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_CNTL), temp);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
- temp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL));
+ temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
}
/* setup the wptr shadow polling */
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
lower_32_bits(wptr_gpu_addr));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
+ wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
if (amdgpu_sriov_vf(adev))
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
else
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL));
+ ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
#ifdef __BIG_ENDIAN
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
#endif
/* enable DMA IBs */
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
ring->ready = true;
@@ -816,12 +797,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
for (j = 0; j < fw_size; j++)
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
- WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+ WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
}
return 0;
@@ -886,7 +867,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -899,7 +880,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 5);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -925,7 +906,7 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -948,7 +929,7 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u32 tmp = 0;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -1000,7 +981,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1152,23 +1133,24 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VEGA10).
*/
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vm_id * 2);
+ amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vm_id * 2);
+ amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
/* flush TLB */
@@ -1183,8 +1165,8 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 0);
- amdgpu_ring_write(ring, 1 << vm_id); /* reference */
- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, 1 << vmid); /* reference */
+ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
}
@@ -1317,7 +1299,7 @@ static bool sdma_v4_0_is_idle(void *handle)
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- u32 tmp = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_STATUS_REG));
+ u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
@@ -1333,8 +1315,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- sdma0 = RREG32(sdma_v4_0_get_reg_offset(0, mmSDMA0_STATUS_REG));
- sdma1 = RREG32(sdma_v4_0_get_reg_offset(1, mmSDMA0_STATUS_REG));
+ sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
+ sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
@@ -1358,8 +1340,8 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
- sdma_v4_0_get_reg_offset(0, mmSDMA0_CNTL) :
- sdma_v4_0_get_reg_offset(1, mmSDMA0_CNTL);
+ sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+ sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
sdma_cntl = RREG32(reg_offset);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 49eef3090f08..543101d5a5ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1390,65 +1390,65 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_TAHITI:
- amdgpu_program_register_sequence(adev,
- tahiti_golden_registers,
- ARRAY_SIZE(tahiti_golden_registers));
- amdgpu_program_register_sequence(adev,
- tahiti_golden_rlc_registers,
- ARRAY_SIZE(tahiti_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- tahiti_mgcg_cgcg_init,
- ARRAY_SIZE(tahiti_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- tahiti_golden_registers2,
- ARRAY_SIZE(tahiti_golden_registers2));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_registers,
+ ARRAY_SIZE(tahiti_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_rlc_registers,
+ ARRAY_SIZE(tahiti_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_mgcg_cgcg_init,
+ ARRAY_SIZE(tahiti_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ tahiti_golden_registers2,
+ ARRAY_SIZE(tahiti_golden_registers2));
break;
case CHIP_PITCAIRN:
- amdgpu_program_register_sequence(adev,
- pitcairn_golden_registers,
- ARRAY_SIZE(pitcairn_golden_registers));
- amdgpu_program_register_sequence(adev,
- pitcairn_golden_rlc_registers,
- ARRAY_SIZE(pitcairn_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- pitcairn_mgcg_cgcg_init,
- ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_golden_registers,
+ ARRAY_SIZE(pitcairn_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_golden_rlc_registers,
+ ARRAY_SIZE(pitcairn_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ pitcairn_mgcg_cgcg_init,
+ ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
break;
case CHIP_VERDE:
- amdgpu_program_register_sequence(adev,
- verde_golden_registers,
- ARRAY_SIZE(verde_golden_registers));
- amdgpu_program_register_sequence(adev,
- verde_golden_rlc_registers,
- ARRAY_SIZE(verde_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- verde_mgcg_cgcg_init,
- ARRAY_SIZE(verde_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- verde_pg_init,
- ARRAY_SIZE(verde_pg_init));
+ amdgpu_device_program_register_sequence(adev,
+ verde_golden_registers,
+ ARRAY_SIZE(verde_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ verde_golden_rlc_registers,
+ ARRAY_SIZE(verde_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ verde_mgcg_cgcg_init,
+ ARRAY_SIZE(verde_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ verde_pg_init,
+ ARRAY_SIZE(verde_pg_init));
break;
case CHIP_OLAND:
- amdgpu_program_register_sequence(adev,
- oland_golden_registers,
- ARRAY_SIZE(oland_golden_registers));
- amdgpu_program_register_sequence(adev,
- oland_golden_rlc_registers,
- ARRAY_SIZE(oland_golden_rlc_registers));
- amdgpu_program_register_sequence(adev,
- oland_mgcg_cgcg_init,
- ARRAY_SIZE(oland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ oland_golden_registers,
+ ARRAY_SIZE(oland_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ oland_golden_rlc_registers,
+ ARRAY_SIZE(oland_golden_rlc_registers));
+ amdgpu_device_program_register_sequence(adev,
+ oland_mgcg_cgcg_init,
+ ARRAY_SIZE(oland_mgcg_cgcg_init));
break;
case CHIP_HAINAN:
- amdgpu_program_register_sequence(adev,
- hainan_golden_registers,
- ARRAY_SIZE(hainan_golden_registers));
- amdgpu_program_register_sequence(adev,
- hainan_golden_registers2,
- ARRAY_SIZE(hainan_golden_registers2));
- amdgpu_program_register_sequence(adev,
- hainan_mgcg_cgcg_init,
- ARRAY_SIZE(hainan_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_golden_registers,
+ ARRAY_SIZE(hainan_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_golden_registers2,
+ ARRAY_SIZE(hainan_golden_registers2));
+ amdgpu_device_program_register_sequence(adev,
+ hainan_mgcg_cgcg_init,
+ ARRAY_SIZE(hainan_mgcg_cgcg_init));
break;
@@ -1959,42 +1959,42 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
case CHIP_VERDE:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
- amdgpu_ip_block_add(adev, &dce_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
- /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
- /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_OLAND:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
else
- amdgpu_ip_block_add(adev, &dce_v6_4_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
- /* amdgpu_ip_block_add(adev, &uvd_v3_1_ip_block); */
- /* amdgpu_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
+ /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */
+ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
- amdgpu_ip_block_add(adev, &si_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &si_dma_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
break;
default:
BUG();
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index ee469a906cd3..9a29c1399091 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -61,14 +61,14 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
while ((lower_32_bits(ring->wptr) & 7) != 5)
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
- amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
+ amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
@@ -221,7 +221,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
return r;
@@ -234,7 +234,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 4);
if (r) {
DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -258,7 +258,7 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
ring->idx, tmp);
r = -EINVAL;
}
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -281,7 +281,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
+ r = amdgpu_device_wb_get(adev, &index);
if (r) {
dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
return r;
@@ -328,7 +328,7 @@ err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -473,25 +473,25 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* using sDMA (VI).
*/
static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm_id < 8)
- amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
+ if (vmid < 8)
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
else
- amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vm_id - 8)));
+ amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-7 are the VM contexts0-7 */
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
/* wait for invalidate to complete */
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0xff << 16); /* retry */
- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, 0); /* value */
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index d2c6b80309c8..60dad63098a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -146,7 +146,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
adev->irq.ih.rptr += 16;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f134ca0c093c..8f2cff7b7e0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -34,7 +34,6 @@
#include "atom.h"
#include "amd_pcie.h"
-#include "soc15ip.h"
#include "uvd/uvd_7_0_offset.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -101,15 +100,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags, address, data;
u32 r;
- const struct nbio_pcie_index_data *nbio_pcie_id;
-
- if (adev->flags & AMD_IS_APU)
- nbio_pcie_id = &nbio_v7_0_pcie_index_data;
- else
- nbio_pcie_id = &nbio_v6_1_pcie_index_data;
-
- address = nbio_pcie_id->index_offset;
- data = nbio_pcie_id->data_offset;
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -122,15 +114,9 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
unsigned long flags, address, data;
- const struct nbio_pcie_index_data *nbio_pcie_id;
- if (adev->flags & AMD_IS_APU)
- nbio_pcie_id = &nbio_v7_0_pcie_index_data;
- else
- nbio_pcie_id = &nbio_v6_1_pcie_index_data;
-
- address = nbio_pcie_id->index_offset;
- data = nbio_pcie_id->data_offset;
+ address = adev->nbio_funcs->get_pcie_index_offset(adev);
+ data = adev->nbio_funcs->get_pcie_data_offset(adev);
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
WREG32(address, reg);
@@ -242,41 +228,9 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return nbio_v7_0_get_memsize(adev);
- else
- return nbio_v6_1_get_memsize(adev);
+ return adev->nbio_funcs->get_memsize(adev);
}
-static const u32 vega10_golden_init[] =
-{
-};
-
-static const u32 raven_golden_init[] =
-{
-};
-
-static void soc15_init_golden_registers(struct amdgpu_device *adev)
-{
- /* Some of the registers might be dependent on GRBM_GFX_INDEX */
- mutex_lock(&adev->grbm_idx_mutex);
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- amdgpu_program_register_sequence(adev,
- vega10_golden_init,
- ARRAY_SIZE(vega10_golden_init));
- break;
- case CHIP_RAVEN:
- amdgpu_program_register_sequence(adev,
- raven_golden_init,
- ARRAY_SIZE(raven_golden_init));
- break;
- default:
- break;
- }
- mutex_unlock(&adev->grbm_idx_mutex);
-}
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
return adev->clock.spll.reference_freq;
@@ -332,25 +286,34 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static struct amdgpu_allowed_register_entry soc15_allowed_read_registers[] = {
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)},
- { SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)},
- { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_STATUS_REG)},
- { SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_STATUS_REG)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STAT)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)},
- { SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)},
- { SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)},
+struct soc15_allowed_register_entry {
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+ bool grbm_indexed;
+};
+
+
+static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)},
+ { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)},
+ { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)},
+ { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)},
+ { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)},
+ { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)},
};
static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
@@ -377,12 +340,9 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev,
if (indexed) {
return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset);
} else {
- switch (reg_offset) {
- case SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG):
+ if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG))
return adev->gfx.config.gb_addr_config;
- default:
- return RREG32(reg_offset);
- }
+ return RREG32(reg_offset);
}
}
@@ -390,10 +350,13 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value)
{
uint32_t i;
+ struct soc15_allowed_register_entry *en;
*value = 0;
for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) {
- if (reg_offset != soc15_allowed_read_registers[i].reg_offset)
+ en = &soc15_allowed_read_registers[i];
+ if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg]
+ + en->reg_offset))
continue;
*value = soc15_get_register_value(adev,
@@ -404,6 +367,43 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
+
+/**
+ * soc15_program_register_sequence - program an array of registers.
+ *
+ * @adev: amdgpu_device pointer
+ * @regs: pointer to the register array
+ * @array_size: size of the register array
+ *
+ * Programs an array or registers with and and or masks.
+ * This is a helper for setting golden registers.
+ */
+
+void soc15_program_register_sequence(struct amdgpu_device *adev,
+ const struct soc15_reg_golden *regs,
+ const u32 array_size)
+{
+ const struct soc15_reg_golden *entry;
+ u32 tmp, reg;
+ int i;
+
+ for (i = 0; i < array_size; ++i) {
+ entry = &regs[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+
+ if (entry->and_mask == 0xffffffff) {
+ tmp = entry->or_mask;
+ } else {
+ tmp = RREG32(reg);
+ tmp &= ~(entry->and_mask);
+ tmp |= entry->or_mask;
+ }
+ WREG32(reg, tmp);
+ }
+
+}
+
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -428,9 +428,8 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = (adev->flags & AMD_IS_APU) ?
- nbio_v7_0_get_memsize(adev) :
- nbio_v6_1_get_memsize(adev);
+ u32 memsize = adev->nbio_funcs->get_memsize(adev);
+
if (memsize != 0xffffffff)
break;
udelay(1);
@@ -495,14 +494,10 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
}
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
- bool enable)
+ bool enable)
{
- if (adev->flags & AMD_IS_APU) {
- nbio_v7_0_enable_doorbell_aperture(adev, enable);
- } else {
- nbio_v6_1_enable_doorbell_aperture(adev, enable);
- nbio_v6_1_enable_doorbell_selfring_aperture(adev, enable);
- }
+ adev->nbio_funcs->enable_doorbell_aperture(adev, enable);
+ adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable);
}
static const struct amdgpu_ip_block_version vega10_common_ip_block =
@@ -516,50 +511,65 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block =
int soc15_set_ip_blocks(struct amdgpu_device *adev)
{
- nbio_v6_1_detect_hw_virt(adev);
+ /* Set IP register base before any HW register access */
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (adev->flags & AMD_IS_APU)
+ adev->nbio_funcs = &nbio_v7_0_funcs;
+ else
+ adev->nbio_funcs = &nbio_v6_1_funcs;
+
+ adev->nbio_funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
switch (adev->asic_type) {
case CHIP_VEGA10:
- amdgpu_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
- amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break;
case CHIP_RAVEN:
- amdgpu_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
- amdgpu_ip_block_add(adev, &psp_v10_0_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#else
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
#endif
- amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
break;
default:
return -EINVAL;
@@ -570,10 +580,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return nbio_v7_0_get_rev_id(adev);
- else
- return nbio_v6_1_get_rev_id(adev);
+ return adev->nbio_funcs->get_rev_id(adev);
}
static const struct amdgpu_asic_funcs soc15_asic_funcs =
@@ -609,8 +616,8 @@ static int soc15_common_early_init(void *handle)
adev->asic_funcs = &soc15_asic_funcs;
- if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
- (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP) &&
+ (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_PSP)))
psp_enabled = true;
adev->rev_id = soc15_get_rev_id(adev);
@@ -675,7 +682,7 @@ static int soc15_common_early_init(void *handle)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_get_pcie_info(adev);
+ amdgpu_device_get_pcie_info(adev);
return 0;
}
@@ -709,15 +716,12 @@ static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* move the golden regs per IP block */
- soc15_init_golden_registers(adev);
/* enable pcie gen2/3 link */
soc15_pcie_gen3_enable(adev);
/* enable aspm */
soc15_program_aspm(adev);
/* setup nbio registers */
- if (!(adev->flags & AMD_IS_APU))
- nbio_v6_1_init_registers(adev);
+ adev->nbio_funcs->init_registers(adev);
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
@@ -878,9 +882,9 @@ static int soc15_common_set_clockgating_state(void *handle,
switch (adev->asic_type) {
case CHIP_VEGA10:
- nbio_v6_1_update_medium_grain_clock_gating(adev,
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- nbio_v6_1_update_medium_grain_light_sleep(adev,
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -894,9 +898,9 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE ? true : false);
break;
case CHIP_RAVEN:
- nbio_v7_0_update_medium_grain_clock_gating(adev,
+ adev->nbio_funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
- nbio_v6_1_update_medium_grain_light_sleep(adev,
+ adev->nbio_funcs->update_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
soc15_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
@@ -921,7 +925,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- nbio_v6_1_get_clockgating_state(adev, flags);
+ adev->nbio_funcs->get_clockgating_state(adev, flags);
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index acb3cdb119f2..26b3feac5d06 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -29,8 +29,28 @@
extern const struct amd_ip_funcs soc15_common_ip_funcs;
+struct soc15_reg_golden {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+ u32 and_mask;
+ u32 or_mask;
+};
+
+#define SOC15_REG_ENTRY(ip, inst, reg) ip##_HWIP, inst, reg##_BASE_IDX, reg
+
+#define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \
+ { ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask }
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int soc15_set_ip_blocks(struct amdgpu_device *adev);
+void soc15_program_register_sequence(struct amdgpu_device *adev,
+ const struct soc15_reg_golden *registers,
+ const u32 array_size);
+
+int vega10_reg_base_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 7a8e4e28abb2..def865067edd 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -24,72 +24,28 @@
#ifndef __SOC15_COMMON_H__
#define __SOC15_COMMON_H__
-struct nbio_hdp_flush_reg {
- u32 hdp_flush_req_offset;
- u32 hdp_flush_done_offset;
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
-};
-
-struct nbio_pcie_index_data {
- u32 index_offset;
- u32 data_offset;
-};
-
/* Register Access Macros */
-#define SOC15_REG_OFFSET(ip, inst, reg) (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg)))))
+#define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define WREG32_FIELD15(ip, idx, reg, field, val) \
- WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+ WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+ (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+ & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
#define RREG32_SOC15(ip, inst, reg) \
- RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))))
+ RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
- RREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))) + offset)
+ RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
#define WREG32_SOC15(ip, inst, reg, value) \
- WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
#define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
- WREG32_NO_KIQ( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))), value)
+ WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
#define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
- WREG32( (0 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG0 + reg : \
- (1 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG1 + reg : \
- (2 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG2 + reg : \
- (3 == reg##_BASE_IDX ? ip##_BASE__INST##inst##_SEG3 + reg : \
- (ip##_BASE__INST##inst##_SEG4 + reg))))) + offset, value)
+ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index aa4e320e31f8..5995ffc183de 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -270,7 +270,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index b13ae34be1c2..8ab10c220910 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -541,7 +541,7 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a4b0f1d842b7..c1fe30cdba32 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -556,7 +556,7 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 0e8b887cf03e..59271055a30e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -412,10 +412,10 @@ static int uvd_v6_0_sw_init(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
ring = &adev->uvd.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
@@ -456,7 +456,7 @@ static int uvd_v6_0_sw_fini(void *handle)
return r;
if (uvd_v6_0_enc_support(adev)) {
- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
@@ -1028,10 +1028,10 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1050,24 +1050,24 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
uint32_t reg;
- if (vm_id < 8)
- reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
+ if (vmid < 8)
+ reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
- reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
+ reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, reg << 2);
@@ -1079,7 +1079,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
@@ -1088,7 +1088,7 @@ static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
- amdgpu_ring_write(ring, 1 << vm_id); /* mask */
+ amdgpu_ring_write(ring, 1 << vmid); /* mask */
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0xC);
}
@@ -1127,14 +1127,14 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
}
static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
}
static bool uvd_v6_0_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 660fa41dc877..6b95f4f344b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -29,7 +29,6 @@
#include "soc15_common.h"
#include "mmsch_v1_0.h"
-#include "soc15ip.h"
#include "uvd/uvd_7_0_offset.h"
#include "uvd/uvd_7_0_sh_mask.h"
#include "vce/vce_4_0_offset.h"
@@ -385,7 +384,7 @@ static int uvd_v7_0_early_init(void *handle)
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
+ struct drm_sched_rq *rq;
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -416,8 +415,8 @@ static int uvd_v7_0_sw_init(void *handle)
}
ring = &adev->uvd.ring_enc[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc,
rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up UVD ENC run queue.\n");
@@ -472,7 +471,7 @@ static int uvd_v7_0_sw_fini(void *handle)
if (r)
return r;
- amd_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
+ drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
@@ -1086,6 +1085,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
@@ -1123,6 +1124,7 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags)
{
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
@@ -1141,6 +1143,8 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
*/
static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
amdgpu_ring_write(ring, 0);
@@ -1155,6 +1159,8 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
*/
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
@@ -1212,11 +1218,13 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
*/
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
@@ -1238,10 +1246,10 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
@@ -1250,6 +1258,8 @@ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -1264,6 +1274,8 @@ static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring,
static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1, uint32_t mask)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -1279,25 +1291,26 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
}
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- uint32_t data0, data1, mask;
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
+ uint32_t data0, data1, mask;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
- data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
+ data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
data1 = upper_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1);
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1);
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
@@ -1309,36 +1322,47 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */
data0 = (hub->vm_inv_eng0_ack + eng) << 2;
- data1 = 1 << vm_id;
- mask = 1 << vm_id;
+ data1 = 1 << vmid;
+ mask = 1 << vmid;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
}
+static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+ struct amdgpu_device *adev = ring->adev;
+
+ for (i = 0; i < count; i++)
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
+
+}
+
static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
}
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
@@ -1350,8 +1374,8 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
+ amdgpu_ring_write(ring, 1 << vmid);
}
#if 0
@@ -1681,7 +1705,7 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = {
static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_UVD,
.align_mask = 0xf,
- .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
+ .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = uvd_v7_0_ring_get_rptr,
@@ -1700,7 +1724,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v7_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = uvd_v7_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index cf81065e3c5a..a5355eb689f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -834,24 +834,24 @@ out:
}
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, pd_addr >> 12);
amdgpu_ring_write(ring, VCE_CMD_FLUSH_TLB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, VCE_CMD_END);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index f2f713650074..7cf2eef68cf2 100755
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -32,7 +32,6 @@
#include "soc15_common.h"
#include "mmsch_v1_0.h"
-#include "soc15ip.h"
#include "vce/vce_4_0_offset.h"
#include "vce/vce_4_0_default.h"
#include "vce/vce_4_0_sh_mask.h"
@@ -424,7 +423,7 @@ static int vce_v4_0_sw_init(void *handle)
if (r)
return r;
- size = (VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE) * 2;
+ size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
size += VCE_V4_0_FW_SIZE;
@@ -939,10 +938,10 @@ static int vce_v4_0_set_powergating_state(void *handle,
#endif
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
@@ -966,25 +965,26 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
}
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
- amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
@@ -996,8 +996,8 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
+ amdgpu_ring_write(ring, 1 << vmid);
}
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index e4673f792545..b99e15c43e45 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -28,7 +28,6 @@
#include "soc15d.h"
#include "soc15_common.h"
-#include "soc15ip.h"
#include "vcn/vcn_1_0_offset.h"
#include "vcn/vcn_1_0_sh_mask.h"
#include "hdp/hdp_4_0_offset.h"
@@ -744,6 +743,8 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, 0);
@@ -761,6 +762,8 @@ static void vcn_v1_0_dec_ring_insert_start(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, VCN_DEC_CMD_PACKET_END << 1);
@@ -777,6 +780,8 @@ static void vcn_v1_0_dec_ring_insert_end(struct amdgpu_ring *ring)
static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags)
{
+ struct amdgpu_device *adev = ring->adev;
+
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
amdgpu_ring_write(ring,
@@ -812,6 +817,8 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
*/
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
@@ -826,11 +833,13 @@ static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
*/
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ unsigned vmid, bool ctx_switch)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
@@ -846,6 +855,8 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -860,6 +871,8 @@ static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1, uint32_t mask)
{
+ struct amdgpu_device *adev = ring->adev;
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
@@ -875,25 +888,26 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
}
static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
- uint32_t data0, data1, mask;
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
+ uint32_t data0, data1, mask;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
- data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
+ data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2;
data1 = upper_32_bits(pd_addr);
vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr);
vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
- data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
+ data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr);
mask = 0xffffffff;
vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
@@ -905,8 +919,8 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */
data0 = (hub->vm_inv_eng0_ack + eng) << 2;
- data1 = 1 << vm_id;
- mask = 1 << vm_id;
+ data1 = 1 << vmid;
+ mask = 1 << vmid;
vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
}
@@ -997,38 +1011,39 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
* Write enc ring commands to execute the indirect buffer
*/
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vm_id, bool ctx_switch)
+ struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
{
amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
- amdgpu_ring_write(ring, vm_id);
+ amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
amdgpu_ring_write(ring, ib->length_dw);
}
static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned int vm_id, uint64_t pd_addr)
+ unsigned int vmid, uint64_t pd_addr)
{
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
- uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
+ uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
- pd_addr = amdgpu_gart_get_vm_pde(ring->adev, pd_addr);
- pd_addr |= AMDGPU_PTE_VALID;
+ amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
+ pd_addr |= flags;
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2);
+ (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring,
- (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2);
+ (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
@@ -1040,8 +1055,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* wait for flush */
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
- amdgpu_ring_write(ring, 1 << vm_id);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_ring_write(ring, 1 << vmid);
+ amdgpu_ring_write(ring, 1 << vmid);
}
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
@@ -1077,6 +1092,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+ struct amdgpu_device *adev = ring->adev;
+
+ for (i = 0; i < count; i++)
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0));
+
+}
+
+
static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
.name = "vcn_v1_0",
.early_init = vcn_v1_0_early_init,
@@ -1100,7 +1126,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_DEC,
.align_mask = 0xf,
- .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
+ .nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.vmhub = AMDGPU_MMHUB,
.get_rptr = vcn_v1_0_dec_ring_get_rptr,
@@ -1118,7 +1144,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
.test_ring = amdgpu_vcn_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib,
- .insert_nop = amdgpu_ring_insert_nop,
+ .insert_nop = vcn_v1_0_ring_insert_nop,
.insert_start = vcn_v1_0_dec_ring_insert_start,
.insert_end = vcn_v1_0_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index ca778cd4e6e8..b69ceafb7888 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -25,8 +25,6 @@
#include "amdgpu_ih.h"
#include "soc15.h"
-
-#include "soc15ip.h"
#include "oss/osssys_4_0_offset.h"
#include "oss/osssys_4_0_sh_mask.h"
@@ -97,10 +95,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
/* disable irqs */
vega10_ih_disable_interrupts(adev);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_ih_control(adev);
- else
- nbio_v6_1_ih_control(adev);
+ adev->nbio_funcs->ih_control(adev);
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL);
/* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
@@ -151,10 +146,8 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ENABLE, 0);
}
WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
- else
- nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
+ adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
@@ -334,8 +327,8 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
entry->client_id = dw[0] & 0xff;
entry->src_id = (dw[0] >> 8) & 0xff;
entry->ring_id = (dw[0] >> 16) & 0xff;
- entry->vm_id = (dw[0] >> 24) & 0xf;
- entry->vm_id_src = (dw[0] >> 31);
+ entry->vmid = (dw[0] >> 24) & 0xf;
+ entry->vmid_src = (dw[0] >> 31);
entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
entry->timestamp_src = dw[2] >> 31;
entry->pas_id = dw[3] & 0xffff;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
new file mode 100644
index 000000000000..b7bdd04793d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "soc15ip.h"
+
+int vega10_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke beend by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
+ adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i]));
+ adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i]));
+
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index bb8ca9489546..da2b99c2d95f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -282,29 +282,29 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
- amdgpu_program_register_sequence(adev,
- iceland_mgcg_cgcg_init,
- ARRAY_SIZE(iceland_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ iceland_mgcg_cgcg_init,
+ ARRAY_SIZE(iceland_mgcg_cgcg_init));
break;
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
break;
case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ cz_mgcg_cgcg_init,
+ ARRAY_SIZE(cz_mgcg_cgcg_init));
break;
case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_mgcg_cgcg_init,
- ARRAY_SIZE(stoney_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ stoney_mgcg_cgcg_init,
+ ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
@@ -449,14 +449,18 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
- uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
- /* bit0: 0 means pf and 1 means vf */
- /* bit31: 0 means disable IOV and 1 means enable */
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+ uint32_t reg = 0;
+
+ if (adev->asic_type == CHIP_TONGA ||
+ adev->asic_type == CHIP_FIJI) {
+ reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ /* bit0: 0 means pf and 1 means vf */
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+ /* bit31: 0 means disable IOV and 1 means enable */
+ if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+ }
if (reg == 0) {
if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
@@ -667,7 +671,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
- amdgpu_pci_config_reset(adev);
+ amdgpu_device_pci_config_reset(adev);
udelay(100);
@@ -891,8 +895,8 @@ static int vi_common_early_init(void *handle)
adev->asic_funcs = &vi_asic_funcs;
- if (amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
- (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC) &&
+ (amdgpu_ip_block_mask & (1 << AMD_IP_BLOCK_TYPE_SMC)))
smc_enabled = true;
adev->rev_id = vi_get_rev_id(adev);
@@ -1074,7 +1078,7 @@ static int vi_common_early_init(void *handle)
/* vi use smc load by default */
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- amdgpu_get_pcie_info(adev);
+ amdgpu_device_get_pcie_info(adev);
return 0;
}
@@ -1487,115 +1491,115 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_TOPAZ:
/* topaz has no DCE, UVD, VCE */
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block);
- amdgpu_ip_block_add(adev, &iceland_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
break;
case CHIP_FIJI:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
- amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
}
break;
case CHIP_TONGA:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev)) {
- amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block);
}
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
case CHIP_CARRIZO:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
- amdgpu_ip_block_add(adev, &acp_ip_block);
+ amdgpu_device_ip_block_add(adev, &acp_ip_block);
#endif
break;
case CHIP_STONEY:
- amdgpu_ip_block_add(adev, &vi_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block);
- amdgpu_ip_block_add(adev, &cz_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cz_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
else if (amdgpu_device_has_dc_support(adev))
- amdgpu_ip_block_add(adev, &dm_ip_block);
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
else
- amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v3_4_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
- amdgpu_ip_block_add(adev, &acp_ip_block);
+ amdgpu_device_ip_block_add(adev, &acp_ip_block);
#endif
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index dbf3703cbd1b..19ddd2312e00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -27,6 +27,8 @@
#define SDMA1_REGISTER_OFFSET 0x200 /* not a register */
#define SDMA_MAX_INSTANCE 2
+#define KFD_VI_SDMA_QUEUE_OFFSET 0x80 /* not a register */
+
/* crtc instance offsets */
#define CRTC0_REGISTER_OFFSET (0x1b9c - 0x1b9c)
#define CRTC1_REGISTER_OFFSET (0x1d9c - 0x1b9c)
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 7bb0bc0ca3d6..a317e76ffb5e 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for Heterogenous System Architecture support for AMD GPU devices
#
@@ -15,6 +35,8 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o \
kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \
kfd_interrupt.o kfd_events.o cik_event_interrupt.o \
- kfd_dbgdev.o kfd_dbgmgr.o
+ kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o
+
+amdkfd-$(CONFIG_DEBUG_FS) += kfd_debugfs.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
new file mode 100644
index 000000000000..997a383dcb8b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -0,0 +1,1384 @@
+/*
+ * Copyright 2015-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#if 0
+HW (VI) source code for CWSR trap handler
+#Version 18 + multiple trap handler
+
+// this performance-optimal version was originally from Seven Xu at SRDC
+
+// Revison #18 --...
+/* Rev History
+** #1. Branch from gc dv. //gfxip/gfx8/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
+** #4. SR Memory Layout:
+** 1. VGPR-SGPR-HWREG-{LDS}
+** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
+** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
+** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
+** #7. Update: 1. don't barrier if noLDS
+** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
+** 2. Fix SQ issue by s_sleep 2
+** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
+** 2. optimize s_buffer save by burst 16sgprs...
+** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
+** #11. Update 1. Add 2 more timestamp for debug version
+** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
+** #13. Integ 1. Always use MUBUF for PV trap shader...
+** #14. Update 1. s_buffer_store soft clause...
+** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
+** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
+** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
+** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
+** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
+** 2. FUNC - Handle non-CWSR traps
+*/
+
+var G8SR_WDMEM_HWREG_OFFSET = 0
+var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
+
+// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
+
+var G8SR_DEBUG_TIMESTAMP = 0
+var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
+var s_g8sr_ts_save_s = s[34:35] // save start
+var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
+var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
+var s_g8sr_ts_save_d = s[40:41] // save end
+var s_g8sr_ts_restore_s = s[42:43] // restore start
+var s_g8sr_ts_restore_d = s[44:45] // restore end
+
+var G8SR_VGPR_SR_IN_DWX4 = 0
+var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
+var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
+
+
+/*************************************************************************/
+/* control on how to run the shader */
+/*************************************************************************/
+//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
+var EMU_RUN_HACK = 0
+var EMU_RUN_HACK_RESTORE_NORMAL = 0
+var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
+var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
+var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
+var SAVE_LDS = 1
+var WG_BASE_ADDR_LO = 0x9000a000
+var WG_BASE_ADDR_HI = 0x0
+var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
+var CTX_SAVE_CONTROL = 0x0
+var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
+var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
+var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
+var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
+var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
+
+/**************************************************************************/
+/* variables */
+/**************************************************************************/
+var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
+
+var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
+var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask
+var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
+var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
+
+var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+var SQ_WAVE_IB_STS_RCNT_SIZE = 4 //FIXME
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1 //FIXME
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
+
+var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
+var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
+
+
+/* Save */
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+
+var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
+var S_SAVE_SPI_INIT_ATC_SHIFT = 27
+var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
+var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_RCNT_SHIFT = 28 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
+var S_SAVE_PC_HI_RCNT_MASK = 0xF0000000 //FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 27 //FIXME
+var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+
+ //tba_lo and tba_hi need to be saved/restored
+var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_status = ttmp4
+var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
+var s_save_xnack_mask_lo = ttmp6
+var s_save_xnack_mask_hi = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+
+var s_save_mem_offset = tma_lo
+var s_save_alloc_size = s_save_trapsts //conflict
+var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time)
+var s_save_m0 = tma_hi
+
+/* Restore */
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
+var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
+var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
+var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
+var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
+var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+
+var s_restore_mem_offset = ttmp2
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp6 //tba_lo/hi need to be restored
+var s_restore_mem_offset_save = s_restore_tmp //no conflict
+
+var s_restore_m0 = s_restore_alloc_size //no conflict
+
+var s_restore_mode = ttmp7
+
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = tma_lo //no conflict
+var s_restore_exec_hi = tma_hi //no conflict
+var s_restore_status = ttmp4
+var s_restore_trapsts = ttmp5
+var s_restore_xnack_mask_lo = xnack_mask_lo
+var s_restore_xnack_mask_hi = xnack_mask_hi
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+
+/**************************************************************************/
+/* trap handler entry points */
+/**************************************************************************/
+/* Shader Main*/
+
+shader main
+ asic(VI)
+ type(CS)
+
+
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
+ //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
+ s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
+ s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
+ //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
+ s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
+ else
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
+ end
+
+L_JUMP_TO_RESTORE:
+ s_branch L_RESTORE //restore
+
+L_SKIP_RESTORE:
+
+ s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_cbranch_scc1 L_SAVE //this is the operation for save
+
+ // ********* Handle non-CWSR traps *******************
+if (!EMU_RUN_HACK)
+ /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
+ s_load_dwordx4 [ttmp8,ttmp9,ttmp10, ttmp11], [tma_lo,tma_hi], 0
+ s_waitcnt lgkmcnt(0)
+ s_or_b32 ttmp7, ttmp8, ttmp9
+ s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
+
+L_NO_NEXT_TRAP:
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
+ s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
+ s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
+ s_addc_u32 ttmp1, ttmp1, 0
+L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ s_rfe_b64 [ttmp0, ttmp1]
+end
+ // ********* End handling of non-CWSR traps *******************
+
+/**************************************************************************/
+/* save routine */
+/**************************************************************************/
+
+L_SAVE:
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_save_s
+ s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+end
+
+ //check whether there is mem_viol
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+ s_cbranch_scc0 L_NO_PC_REWIND
+
+ //if so, need rewind PC assuming GDS operation gets NACKed
+ s_mov_b32 s_save_tmp, 0 //clear mem_viol bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8
+ s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc
+
+L_NO_PC_REWIND:
+ s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK
+ s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi //save XNACK must before any memory operation
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
+
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_sq_save_msg
+ s_waitcnt lgkmcnt(0)
+end
+
+ if (EMU_RUN_HACK)
+
+ else
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+ end
+
+ L_SLEEP:
+ s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
+
+ if (EMU_RUN_HACK)
+
+ else
+ s_cbranch_execz L_SLEEP
+ end
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_spi_wrexec
+ s_waitcnt lgkmcnt(0)
+end
+
+ /* setup Resource Contants */
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+ //calculate wd_addr using absolute thread id
+ v_readlane_b32 s_save_tmp, v9, 0
+ s_lshr_b32 s_save_tmp, s_save_tmp, 6
+ s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
+ s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+ s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+ s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+ else
+ end
+ if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
+ s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
+ s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
+ s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
+ else
+ end
+
+
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
+
+ //FIXME right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi (might need to save them before using them?)
+ s_mov_b32 s_save_m0, m0 //save M0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0
+
+
+
+
+ /* save HW registers */
+ //////////////////////////////
+
+ L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset)
+ get_sgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+
+
+ s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
+
+ if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
+ s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+ s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+ s_mov_b32 tba_lo, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO
+ s_mov_b32 tba_hi, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI
+ end
+
+ write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
+ write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
+ write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset) //STATUS
+
+ //s_save_trapsts conflicts with s_save_alloc_size
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) //TRAPSTS
+
+ write_hwreg_to_mem(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO
+ write_hwreg_to_mem(s_save_xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI
+
+ //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(tba_lo, s_save_buf_rsrc0, s_save_mem_offset) //TBA_LO
+ write_hwreg_to_mem(tba_hi, s_save_buf_rsrc0, s_save_mem_offset) //TBA_HI
+
+
+
+ /* the first wave in the threadgroup */
+ // save fist_wave bits in tba_hi unused bit.26
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK // extract fisrt wave bit
+ //s_or_b32 tba_hi, s_save_tmp, tba_hi // save first wave bit to tba_hi.bits[26]
+ s_mov_b32 s_save_exec_hi, 0x0
+ s_or_b32 s_save_exec_hi, s_save_tmp, s_save_exec_hi // save first wave bit to s_save_exec_hi.bits[26]
+
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+ //////////////////////////////
+
+ // SGPR SR memory offset : size(VGPR)
+ get_vgpr_size_bytes(s_save_mem_offset)
+ // TODO, change RSRC word to rearrange memory layout for SGPRS
+
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+ if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
+ else
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+ end
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
+ //s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0
+ s_mov_b64 s_save_xnack_mask_lo, s_save_buf_rsrc0
+ s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
+ s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) //PV: the best performance should be using s_buffer_store_dwordx4
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //SGPR save is complete?
+ // restore s_save_buf_rsrc0,1
+ //s_mov_b64 s_save_buf_rsrc0, s_save_pc_lo
+ s_mov_b64 s_save_buf_rsrc0, s_save_xnack_mask_lo
+
+
+
+
+ /* save first 4 VGPR, then LDS save could use */
+ // each wave will alloc 4 vgprs at least...
+ /////////////////////////////////////////////////////////////////////////////////////
+
+ s_mov_b32 s_save_mem_offset, 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ // VGPR Allocated in 4-GPR granularity
+
+if G8SR_VGPR_SR_IN_DWX4
+ // the const stride for DWx4 is 4*4 bytes
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+
+ buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+else
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+end
+
+
+
+ /* save LDS */
+ //////////////////////////////
+
+ L_SAVE_LDS:
+
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_barrier //LDS is used? wait for other waves in the same TG
+ //s_and_b32 s_save_tmp, tba_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here
+ s_and_b32 s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset)
+ get_sgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+
+var LDS_DMA_ENABLE = 0
+var UNROLL = 0
+if UNROLL==0 && LDS_DMA_ENABLE==1
+ s_mov_b32 s3, 256*2
+ s_nop 0
+ s_nop 0
+ s_nop 0
+ L_SAVE_LDS_LOOP:
+ //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
+ if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+ end
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
+
+elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
+ // store from higest LDS address to lowest
+ s_mov_b32 s3, 256*2
+ s_sub_u32 m0, s_save_alloc_size, s3
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
+ s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
+ s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
+ s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
+ s_nop 0
+ s_nop 0
+ s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
+ s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
+ s_add_u32 s0, s0,s_save_alloc_size
+ s_addc_u32 s1, s1, 0
+ s_setpc_b64 s[0:1]
+
+
+ for var i =0; i< 128; i++
+ // be careful to make here a 64Byte aligned address, which could improve performance...
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
+ buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
+
+ if i!=127
+ s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
+ s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
+ end
+ end
+
+else // BUFFER_STORE
+ v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
+ v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
+ v_mul_i32_i24 v2, v3, 8 // tid*8
+ v_mov_b32 v3, 256*2
+ s_mov_b32 m0, 0x10000
+ s_mov_b32 s0, s_save_buf_rsrc3
+ s_and_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0xFF7FFFFF // disable add_tid
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0x58000 //DFMT
+
+L_SAVE_LDS_LOOP_VECTOR:
+ ds_read_b64 v[0:1], v2 //x =LDS[a], byte address
+ s_waitcnt lgkmcnt(0)
+ buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1 glc:1 slc:1
+// s_waitcnt vmcnt(0)
+ v_add_u32 v2, vcc[0:1], v2, v3
+ v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+ s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR
+
+ // restore rsrc3
+ s_mov_b32 s_save_buf_rsrc3, s0
+
+end
+
+L_SAVE_LDS_DONE:
+
+
+ /* save VGPRs - set the Rest VGPRs */
+ //////////////////////////////////////////////////////////////////////////////////////
+ L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ // TODO rearrange the RSRC words to use swizzle for VGPR save...
+
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
+ s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
+ if (SWIZZLE_EN)
+ s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+
+ // VGPR Allocated in 4-GPR granularity
+
+if G8SR_VGPR_SR_IN_DWX4
+ // the const stride for DWx4 is 4*4 bytes
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+
+ s_mov_b32 m0, 4 // skip first 4 VGPRs
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
+
+ s_set_gpr_idx_on m0, 0x1 // This will change M0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
+L_SAVE_VGPR_LOOP:
+ v_mov_b32 v0, v0 // v0 = v[0+m0]
+ v_mov_b32 v1, v1
+ v_mov_b32 v2, v2
+ v_mov_b32 v3, v3
+
+
+ buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ s_add_u32 m0, m0, 4
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+ s_set_gpr_idx_off
+L_SAVE_VGPR_LOOP_END:
+
+ s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
+else
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =0
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+
+ s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later
+
+ L_SAVE_VGPR_LOOP:
+ v_mov_b32 v0, v0 //v0 = v[0+m0]
+ v_mov_b32 v1, v1 //v0 = v[0+m0]
+ v_mov_b32 v2, v2 //v0 = v[0+m0]
+ v_mov_b32 v3, v3 //v0 = v[0+m0]
+
+ if(USE_MTBUF_INSTEAD_OF_MUBUF)
+ tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+ else
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+ end
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
+ s_set_gpr_idx_off
+end
+
+L_SAVE_VGPR_END:
+
+
+
+
+
+
+ /* S_PGM_END_SAVED */ //FIXME graphics ONLY
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
+ s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
+ s_rfe_b64 s_save_pc_lo //Return to the main shader program
+ else
+ end
+
+// Save Done timestamp
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_save_d
+ // SGPR SR memory offset : size(VGPR)
+ get_vgpr_size_bytes(s_save_mem_offset)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
+ s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+ // Need reset rsrc2??
+ s_mov_b32 m0, s_save_mem_offset
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
+end
+
+
+ s_branch L_END_PGM
+
+
+
+/**************************************************************************/
+/* restore routine */
+/**************************************************************************/
+
+L_RESTORE:
+ /* Setup Resource Contants */
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ //calculate wd_addr using absolute thread id
+ v_readlane_b32 s_restore_tmp, v9, 0
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
+ s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
+ s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
+ s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
+ s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
+ else
+ end
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_s
+ s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
+ // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
+ s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
+ s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
+end
+
+
+
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
+
+ /* global mem offset */
+// s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0
+
+ /* the first wave in the threadgroup */
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+ //////////////////////////////
+ L_RESTORE_LDS:
+
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
+
+
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+ s_mov_b32 m0, 0x0 //lds_offset initial value = 0
+
+ L_RESTORE_LDS_LOOP:
+ if (SAVE_LDS)
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
+ end
+ s_add_u32 m0, m0, 256*2 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete?
+
+
+ /* restore VGPRs */
+ //////////////////////////////
+ L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+if G8SR_VGPR_SR_IN_DWX4
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+
+ // the const stride for DWx4 is 4*4 bytes
+ s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+
+ s_mov_b32 m0, s_restore_alloc_size
+ s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
+
+L_RESTORE_VGPR_LOOP:
+ buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ s_waitcnt vmcnt(0)
+ s_sub_u32 m0, m0, 4
+ v_mov_b32 v0, v0 // v[0+m0] = v0
+ v_mov_b32 v1, v1
+ v_mov_b32 v2, v2
+ v_mov_b32 v3, v3
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_cmp_eq_u32 m0, 0x8000
+ s_cbranch_scc0 L_RESTORE_VGPR_LOOP
+ s_set_gpr_idx_off
+
+ s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
+
+else
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 1
+ s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
+
+ L_RESTORE_VGPR_LOOP:
+ if(USE_MTBUF_INSTEAD_OF_MUBUF)
+ tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+ else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
+ end
+ s_waitcnt vmcnt(0) //ensure data ready
+ v_mov_b32 v0, v0 //v[0+m0] = v0
+ v_mov_b32 v1, v1
+ v_mov_b32 v2, v2
+ v_mov_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete?
+ s_set_gpr_idx_off
+ /* VGPR restore on v0 */
+ if(USE_MTBUF_INSTEAD_OF_MUBUF)
+ tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
+ else
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ end
+
+end
+
+ /* restore SGPRs */
+ //////////////////////////////
+
+ // SGPR SR memory offset : size(VGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 16*4 // restore SGPR from S[n] to S[0], by 16 sgprs group
+ // TODO, change RSRC word to rearrange memory layout for SGPRS
+
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
+
+ if (SGPR_SAVE_USE_SQC)
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
+ else
+ s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
+ end
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+ /* If 112 SGPRs ar allocated, 4 sgprs are not used TBA(108,109),TMA(110,111),
+ However, we are safe to restore these 4 SGPRs anyway, since TBA,TMA will later be restored by HWREG
+ */
+ s_mov_b32 m0, s_restore_alloc_size
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) //PV: further performance improvement can be made
+ s_waitcnt lgkmcnt(0) //ensure data ready
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP //SGPR restore (except s0) is complete?
+
+ /* restore HW registers */
+ //////////////////////////////
+ L_RESTORE_HWREG:
+
+
+if G8SR_DEBUG_TIMESTAMP
+ s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
+ s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
+end
+
+ // HWREG SR memory offset : size(VGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset)
+ get_sgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
+ if (SWIZZLE_EN)
+ s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
+ else
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+ end
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //EXEC
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset) //STATUS
+ read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset) //TRAPSTS
+ read_hwreg_from_mem(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //XNACK_MASK_LO
+ read_hwreg_from_mem(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset) //XNACK_MASK_HI
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset) //MODE
+ read_hwreg_from_mem(tba_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //TBA_LO
+ read_hwreg_from_mem(tba_hi, s_restore_buf_rsrc0, s_restore_mem_offset) //TBA_HI
+
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+ s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+
+ //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
+ if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
+ s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+ end
+ if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
+ s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
+ s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
+ end
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
+ //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
+ s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
+ //reuse s_restore_m0 as a temp register
+ s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
+ s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero
+ s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
+ s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
+
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
+
+if G8SR_DEBUG_TIMESTAMP
+ s_memrealtime s_g8sr_ts_restore_d
+ s_waitcnt lgkmcnt(0)
+end
+
+// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+ s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
+
+
+/**************************************************************************/
+/* the END */
+/**************************************************************************/
+L_END_PGM:
+ s_endpgm
+
+end
+
+
+/**************************************************************************/
+/* the helper functions */
+/**************************************************************************/
+
+//Only for save hwreg to mem
+function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+ s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
+ s_mov_b32 m0, s_mem_offset
+ s_buffer_store_dword s, s_rsrc, m0 glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+ s_mov_b32 m0, exec_lo
+end
+
+
+// HWREG are saved before SGPRs, so all HWREG could be use.
+function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+ s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 // +scc
+end
+
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+end
+
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+end
+
+
+
+function get_lds_size_bytes(s_lds_size_byte)
+ // SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW
+ s_getreg_b32 s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) // lds_size
+ s_lshl_b32 s_lds_size_byte, s_lds_size_byte, 8 //LDS size in dwords = lds_size * 64 *4Bytes // granularity 64DW
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) //FIXME for GFX, zero is possible
+end
+
+function get_sgpr_size_bytes(s_sgpr_size_byte)
+ s_getreg_b32 s_sgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
+ s_add_u32 s_sgpr_size_byte, s_sgpr_size_byte, 1
+ s_lshl_b32 s_sgpr_size_byte, s_sgpr_size_byte, 6 //Number of SGPRs = (sgpr_size + 1) * 16 *4 (non-zero value)
+end
+
+function get_hwreg_size_bytes
+ return 128 //HWREG size 128 bytes
+end
+
+
+#endif
+
+static const uint32_t cwsr_trap_gfx8_hex[] = {
+ 0xbf820001, 0xbf820123,
+ 0xb8f4f802, 0x89748674,
+ 0xb8f5f803, 0x8675ff75,
+ 0x00000400, 0xbf850011,
+ 0xc00a1e37, 0x00000000,
+ 0xbf8c007f, 0x87777978,
+ 0xbf840002, 0xb974f802,
+ 0xbe801d78, 0xb8f5f803,
+ 0x8675ff75, 0x000001ff,
+ 0xbf850002, 0x80708470,
+ 0x82718071, 0x8671ff71,
+ 0x0000ffff, 0xb974f802,
+ 0xbe801f70, 0xb8f5f803,
+ 0x8675ff75, 0x00000100,
+ 0xbf840006, 0xbefa0080,
+ 0xb97a0203, 0x8671ff71,
+ 0x0000ffff, 0x80f08870,
+ 0x82f18071, 0xbefa0080,
+ 0xb97a0283, 0xbef60068,
+ 0xbef70069, 0xb8fa1c07,
+ 0x8e7a9c7a, 0x87717a71,
+ 0xb8fa03c7, 0x8e7a9b7a,
+ 0x87717a71, 0xb8faf807,
+ 0x867aff7a, 0x00007fff,
+ 0xb97af807, 0xbef2007e,
+ 0xbef3007f, 0xbefe0180,
+ 0xbf900004, 0xbf8e0002,
+ 0xbf88fffe, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+ 0xbefa0080, 0xbefb00ff,
+ 0x00807fac, 0x867aff7f,
+ 0x08000000, 0x8f7a837a,
+ 0x877b7a7b, 0x867aff7f,
+ 0x70000000, 0x8f7a817a,
+ 0x877b7a7b, 0xbeef007c,
+ 0xbeee0080, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x806e7a6e,
+ 0xbefa0084, 0xbefa00ff,
+ 0x01000000, 0xbefe007c,
+ 0xbefc006e, 0xc0611bfc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611c3c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611c7c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611cbc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611cfc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611d3c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xb8f5f803,
+ 0xbefe007c, 0xbefc006e,
+ 0xc0611d7c, 0x0000007c,
+ 0x806e846e, 0xbefc007e,
+ 0xbefe007c, 0xbefc006e,
+ 0xc0611dbc, 0x0000007c,
+ 0x806e846e, 0xbefc007e,
+ 0xbefe007c, 0xbefc006e,
+ 0xc0611dfc, 0x0000007c,
+ 0x806e846e, 0xbefc007e,
+ 0xb8eff801, 0xbefe007c,
+ 0xbefc006e, 0xc0611bfc,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611b3c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc006e, 0xc0611b7c,
+ 0x0000007c, 0x806e846e,
+ 0xbefc007e, 0x867aff7f,
+ 0x04000000, 0xbef30080,
+ 0x8773737a, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0xb8f51605, 0x80758175,
+ 0x8e758475, 0x8e7a8275,
+ 0xbefa00ff, 0x01000000,
+ 0xbef60178, 0x80786e78,
+ 0x82798079, 0xbefc0080,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003c, 0x00000000,
+ 0xc06b013c, 0x00000010,
+ 0xc06b023c, 0x00000020,
+ 0xc06b033c, 0x00000030,
+ 0x8078c078, 0x82798079,
+ 0x807c907c, 0xbf0a757c,
+ 0xbf85ffeb, 0xbef80176,
+ 0xbeee0080, 0xbefe00c1,
+ 0xbeff00c1, 0xbefa00ff,
+ 0x01000000, 0xe0724000,
+ 0x6e1e0000, 0xe0724100,
+ 0x6e1e0100, 0xe0724200,
+ 0x6e1e0200, 0xe0724300,
+ 0x6e1e0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f54306,
+ 0x8675c175, 0xbf84002c,
+ 0xbf8a0000, 0x867aff73,
+ 0x04000000, 0xbf840028,
+ 0x8e758675, 0x8e758275,
+ 0xbefa0075, 0xb8ee2a05,
+ 0x806e816e, 0x8e6e8a6e,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x806e7a6e,
+ 0x806eff6e, 0x00000080,
+ 0xbefa00ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe80007b,
+ 0x867bff7b, 0xff7fffff,
+ 0x877bff7b, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8c007f, 0xe0765000,
+ 0x6e1e0002, 0x32040702,
+ 0xd0c9006a, 0x0000eb02,
+ 0xbf87fff7, 0xbefb0000,
+ 0xbeee00ff, 0x00000400,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8f52a05, 0x80758175,
+ 0x8e758275, 0x8e7a8875,
+ 0xbefa00ff, 0x01000000,
+ 0xbefc0084, 0xbf0a757c,
+ 0xbf840015, 0xbf11017c,
+ 0x8075ff75, 0x00001000,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0xe0724000, 0x6e1e0000,
+ 0xe0724100, 0x6e1e0100,
+ 0xe0724200, 0x6e1e0200,
+ 0xe0724300, 0x6e1e0300,
+ 0x807c847c, 0x806eff6e,
+ 0x00000400, 0xbf0a757c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200ca, 0xbef8007e,
+ 0x8679ff7f, 0x0000ffff,
+ 0x8779ff79, 0x00040000,
+ 0xbefa0080, 0xbefb00ff,
+ 0x00807fac, 0x8676ff7f,
+ 0x08000000, 0x8f768376,
+ 0x877b767b, 0x8676ff7f,
+ 0x70000000, 0x8f768176,
+ 0x877b767b, 0x8676ff7f,
+ 0x04000000, 0xbf84001e,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8f34306, 0x8673c173,
+ 0xbf840019, 0x8e738673,
+ 0x8e738273, 0xbefa0073,
+ 0xb8f22a05, 0x80728172,
+ 0x8e728a72, 0xb8f61605,
+ 0x80768176, 0x8e768676,
+ 0x80727672, 0x8072ff72,
+ 0x00000080, 0xbefa00ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x721e0000,
+ 0xe0510100, 0x721e0000,
+ 0x807cff7c, 0x00000200,
+ 0x8072ff72, 0x00000200,
+ 0xbf0a737c, 0xbf85fff6,
+ 0xbef20080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f32a05,
+ 0x80738173, 0x8e738273,
+ 0x8e7a8873, 0xbefa00ff,
+ 0x01000000, 0xbef60072,
+ 0x8072ff72, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x8073ff73, 0x00008000,
+ 0xe0524000, 0x721e0000,
+ 0xe0524100, 0x721e0100,
+ 0xe0524200, 0x721e0200,
+ 0xe0524300, 0x721e0300,
+ 0xbf8c0f70, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0x807c847c,
+ 0x8072ff72, 0x00000400,
+ 0xbf0a737c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x761e0000, 0xe0524100,
+ 0x761e0100, 0xe0524200,
+ 0x761e0200, 0xe0524300,
+ 0x761e0300, 0xb8f22a05,
+ 0x80728172, 0x8e728a72,
+ 0xb8f61605, 0x80768176,
+ 0x8e768676, 0x80727672,
+ 0x80f2c072, 0xb8f31605,
+ 0x80738173, 0x8e738473,
+ 0x8e7a8273, 0xbefa00ff,
+ 0x01000000, 0xbefc0073,
+ 0xc031003c, 0x00000072,
+ 0x80f2c072, 0xbf8c007f,
+ 0x80fc907c, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff1, 0xb8f22a05,
+ 0x80728172, 0x8e728a72,
+ 0xb8f61605, 0x80768176,
+ 0x8e768676, 0x80727672,
+ 0xbefa0084, 0xbefa00ff,
+ 0x01000000, 0xc0211cfc,
+ 0x00000072, 0x80728472,
+ 0xc0211c3c, 0x00000072,
+ 0x80728472, 0xc0211c7c,
+ 0x00000072, 0x80728472,
+ 0xc0211bbc, 0x00000072,
+ 0x80728472, 0xc0211bfc,
+ 0x00000072, 0x80728472,
+ 0xc0211d3c, 0x00000072,
+ 0x80728472, 0xc0211d7c,
+ 0x00000072, 0x80728472,
+ 0xc0211a3c, 0x00000072,
+ 0x80728472, 0xc0211a7c,
+ 0x00000072, 0x80728472,
+ 0xc0211dfc, 0x00000072,
+ 0x80728472, 0xc0211b3c,
+ 0x00000072, 0x80728472,
+ 0xc0211b7c, 0x00000072,
+ 0x80728472, 0xbf8c007f,
+ 0x8671ff71, 0x0000ffff,
+ 0xbefc0073, 0xbefe006e,
+ 0xbeff006f, 0x867375ff,
+ 0x000003ff, 0xb9734803,
+ 0x867375ff, 0xfffff800,
+ 0x8f738b73, 0xb973a2c3,
+ 0xb977f801, 0x8673ff71,
+ 0xf0000000, 0x8f739c73,
+ 0x8e739073, 0xbef60080,
+ 0x87767376, 0x8673ff71,
+ 0x08000000, 0x8f739b73,
+ 0x8e738f73, 0x87767376,
+ 0x8673ff74, 0x00800000,
+ 0x8f739773, 0xb976f807,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0xb974f802, 0xbf8a0000,
+ 0x95807370, 0xbf810000,
+};
+
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 505d39156acd..62c3d9cd6ef1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -117,7 +117,7 @@ static int kfd_open(struct inode *inode, struct file *filep)
return -EPERM;
}
- process = kfd_create_process(current);
+ process = kfd_create_process(filep);
if (IS_ERR(process))
return PTR_ERR(process);
@@ -206,6 +206,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->ctx_save_restore_area_address =
args->ctx_save_restore_address;
q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
+ q_properties->ctl_stack_size = args->ctl_stack_size;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
@@ -431,6 +432,38 @@ out:
return err;
}
+static int kfd_ioctl_set_trap_handler(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_set_trap_handler_args *args = data;
+ struct kfd_dev *dev;
+ int err = 0;
+ struct kfd_process_device *pdd;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (dev == NULL)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ err = -ESRCH;
+ goto out;
+ }
+
+ if (dev->dqm->ops.set_trap_handler(dev->dqm,
+ &pdd->qpd,
+ args->tba_addr,
+ args->tma_addr))
+ err = -EINVAL;
+
+out:
+ mutex_unlock(&p->mutex);
+
+ return err;
+}
+
static int kfd_ioctl_dbg_register(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -493,7 +526,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep,
long status;
dev = kfd_device_by_id(args->gpu_id);
- if (!dev)
+ if (!dev || !dev->dbgmgr)
return -EINVAL;
if (dev->device_info->asic_family == CHIP_CARRIZO) {
@@ -979,7 +1012,10 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
kfd_ioctl_set_scratch_backing_va, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_TILE_CONFIG,
- kfd_ioctl_get_tile_config, 0)
+ kfd_ioctl_get_tile_config, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_TRAP_HANDLER,
+ kfd_ioctl_set_trap_handler, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
@@ -1088,6 +1124,10 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
KFD_MMAP_EVENTS_MASK) {
vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK;
return kfd_event_mmap(process, vma);
+ } else if ((vma->vm_pgoff & KFD_MMAP_RESERVED_MEM_MASK) ==
+ KFD_MMAP_RESERVED_MEM_MASK) {
+ vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_RESERVED_MEM_MASK;
+ return kfd_reserved_mem_mmap(process, vma);
}
return -EFAULT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
new file mode 100644
index 000000000000..2bc2816767a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -0,0 +1,1267 @@
+/*
+ * Copyright 2015-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/amd-iommu.h>
+#include "kfd_crat.h"
+#include "kfd_priv.h"
+#include "kfd_topology.h"
+
+/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
+ * GPU processor ID are expressed with Bit[31]=1.
+ * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
+ * used in the CRAT.
+ */
+static uint32_t gpu_processor_id_low = 0x80001000;
+
+/* Return the next available gpu_processor_id and increment it for next GPU
+ * @total_cu_count - Total CUs present in the GPU including ones
+ * masked off
+ */
+static inline unsigned int get_and_inc_gpu_processor_id(
+ unsigned int total_cu_count)
+{
+ int current_id = gpu_processor_id_low;
+
+ gpu_processor_id_low += total_cu_count;
+ return current_id;
+}
+
+/* Static table to describe GPU Cache information */
+struct kfd_gpu_cache_info {
+ uint32_t cache_size;
+ uint32_t cache_level;
+ uint32_t flags;
+ /* Indicates how many Compute Units share this cache
+ * Value = 1 indicates the cache is not shared
+ */
+ uint32_t num_cu_shared;
+};
+
+static struct kfd_gpu_cache_info kaveri_cache_info[] = {
+ {
+ /* TCP L1 Cache per CU */
+ .cache_size = 16,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 1,
+
+ },
+ {
+ /* Scalar L1 Instruction Cache (in SQC module) per bank */
+ .cache_size = 16,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_INST_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 2,
+ },
+ {
+ /* Scalar L1 Data Cache (in SQC module) per bank */
+ .cache_size = 8,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 2,
+ },
+
+ /* TODO: Add L2 Cache information */
+};
+
+
+static struct kfd_gpu_cache_info carrizo_cache_info[] = {
+ {
+ /* TCP L1 Cache per CU */
+ .cache_size = 16,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 1,
+ },
+ {
+ /* Scalar L1 Instruction Cache (in SQC module) per bank */
+ .cache_size = 8,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_INST_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 4,
+ },
+ {
+ /* Scalar L1 Data Cache (in SQC module) per bank. */
+ .cache_size = 4,
+ .cache_level = 1,
+ .flags = (CRAT_CACHE_FLAGS_ENABLED |
+ CRAT_CACHE_FLAGS_DATA_CACHE |
+ CRAT_CACHE_FLAGS_SIMD_CACHE),
+ .num_cu_shared = 4,
+ },
+
+ /* TODO: Add L2 Cache information */
+};
+
+/* NOTE: In future if more information is added to struct kfd_gpu_cache_info
+ * the following ASICs may need a separate table.
+ */
+#define hawaii_cache_info kaveri_cache_info
+#define tonga_cache_info carrizo_cache_info
+#define fiji_cache_info carrizo_cache_info
+#define polaris10_cache_info carrizo_cache_info
+#define polaris11_cache_info carrizo_cache_info
+
+static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ dev->node_props.cpu_cores_count = cu->num_cpu_cores;
+ dev->node_props.cpu_core_id_base = cu->processor_id_low;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
+ dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
+
+ pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
+ cu->processor_id_low);
+}
+
+static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
+ struct crat_subtype_computeunit *cu)
+{
+ dev->node_props.simd_id_base = cu->processor_id_low;
+ dev->node_props.simd_count = cu->num_simd_cores;
+ dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
+ dev->node_props.max_waves_per_simd = cu->max_waves_simd;
+ dev->node_props.wave_front_size = cu->wave_front_size;
+ dev->node_props.array_count = cu->array_count;
+ dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
+ dev->node_props.simd_per_cu = cu->num_simd_per_cu;
+ dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
+ if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
+ dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
+ pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
+}
+
+/* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
+ struct list_head *device_list)
+{
+ struct kfd_topology_device *dev;
+
+ pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
+ cu->proximity_domain, cu->hsa_capability);
+ list_for_each_entry(dev, device_list, list) {
+ if (cu->proximity_domain == dev->proximity_domain) {
+ if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
+ kfd_populated_cu_info_cpu(dev, cu);
+
+ if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
+ kfd_populated_cu_info_gpu(dev, cu);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
+ struct list_head *device_list)
+{
+ struct kfd_mem_properties *props;
+ struct kfd_topology_device *dev;
+
+ pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
+ mem->proximity_domain);
+ list_for_each_entry(dev, device_list, list) {
+ if (mem->proximity_domain == dev->proximity_domain) {
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ /* We're on GPU node */
+ if (dev->node_props.cpu_cores_count == 0) {
+ /* APU */
+ if (mem->visibility_type == 0)
+ props->heap_type =
+ HSA_MEM_HEAP_TYPE_FB_PRIVATE;
+ /* dGPU */
+ else
+ props->heap_type = mem->visibility_type;
+ } else
+ props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+
+ if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
+ props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
+ props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+
+ props->size_in_bytes =
+ ((uint64_t)mem->length_high << 32) +
+ mem->length_low;
+ props->width = mem->width;
+
+ dev->node_props.mem_banks_count++;
+ list_add_tail(&props->list, &dev->mem_props);
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
+ struct list_head *device_list)
+{
+ struct kfd_cache_properties *props;
+ struct kfd_topology_device *dev;
+ uint32_t id;
+ uint32_t total_num_of_cu;
+
+ id = cache->processor_id_low;
+
+ pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
+ list_for_each_entry(dev, device_list, list) {
+ total_num_of_cu = (dev->node_props.array_count *
+ dev->node_props.cu_per_simd_array);
+
+ /* Cache infomration in CRAT doesn't have proximity_domain
+ * information as it is associated with a CPU core or GPU
+ * Compute Unit. So map the cache using CPU core Id or SIMD
+ * (GPU) ID.
+ * TODO: This works because currently we can safely assume that
+ * Compute Units are parsed before caches are parsed. In
+ * future, remove this dependency
+ */
+ if ((id >= dev->node_props.cpu_core_id_base &&
+ id <= dev->node_props.cpu_core_id_base +
+ dev->node_props.cpu_cores_count) ||
+ (id >= dev->node_props.simd_id_base &&
+ id < dev->node_props.simd_id_base +
+ total_num_of_cu)) {
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->processor_id_low = id;
+ props->cache_level = cache->cache_level;
+ props->cache_size = cache->cache_size;
+ props->cacheline_size = cache->cache_line_size;
+ props->cachelines_per_tag = cache->lines_per_tag;
+ props->cache_assoc = cache->associativity;
+ props->cache_latency = cache->cache_latency;
+ memcpy(props->sibling_map, cache->sibling_map,
+ sizeof(props->sibling_map));
+
+ if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_DATA;
+ if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
+ if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_CPU;
+ if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
+ props->cache_type |= HSA_CACHE_TYPE_HSACU;
+
+ dev->cache_count++;
+ dev->node_props.caches_count++;
+ list_add_tail(&props->list, &dev->cache_props);
+
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
+ * topology device present in the device_list
+ */
+static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
+ struct list_head *device_list)
+{
+ struct kfd_iolink_properties *props = NULL, *props2;
+ struct kfd_topology_device *dev, *cpu_dev;
+ uint32_t id_from;
+ uint32_t id_to;
+
+ id_from = iolink->proximity_domain_from;
+ id_to = iolink->proximity_domain_to;
+
+ pr_debug("Found IO link entry in CRAT table with id_from=%d\n",
+ id_from);
+ list_for_each_entry(dev, device_list, list) {
+ if (id_from == dev->proximity_domain) {
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->node_from = id_from;
+ props->node_to = id_to;
+ props->ver_maj = iolink->version_major;
+ props->ver_min = iolink->version_minor;
+ props->iolink_type = iolink->io_interface_type;
+
+ if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
+ props->weight = 20;
+ else
+ props->weight = node_distance(id_from, id_to);
+
+ props->min_latency = iolink->minimum_latency;
+ props->max_latency = iolink->maximum_latency;
+ props->min_bandwidth = iolink->minimum_bandwidth_mbs;
+ props->max_bandwidth = iolink->maximum_bandwidth_mbs;
+ props->rec_transfer_size =
+ iolink->recommended_transfer_size;
+
+ dev->io_link_count++;
+ dev->node_props.io_links_count++;
+ list_add_tail(&props->list, &dev->io_link_props);
+ break;
+ }
+ }
+
+ /* CPU topology is created before GPUs are detected, so CPU->GPU
+ * links are not built at that time. If a PCIe type is discovered, it
+ * means a GPU is detected and we are adding GPU->CPU to the topology.
+ * At this time, also add the corresponded CPU->GPU link.
+ */
+ if (props && props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS) {
+ cpu_dev = kfd_topology_device_by_proximity_domain(id_to);
+ if (!cpu_dev)
+ return -ENODEV;
+ /* same everything but the other direction */
+ props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
+ props2->node_from = id_to;
+ props2->node_to = id_from;
+ props2->kobj = NULL;
+ cpu_dev->io_link_count++;
+ cpu_dev->node_props.io_links_count++;
+ list_add_tail(&props2->list, &cpu_dev->io_link_props);
+ }
+
+ return 0;
+}
+
+/* kfd_parse_subtype - parse subtypes and attach it to correct topology device
+ * present in the device_list
+ * @sub_type_hdr - subtype section of crat_image
+ * @device_list - list of topology devices present in this crat_image
+ */
+static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
+ struct list_head *device_list)
+{
+ struct crat_subtype_computeunit *cu;
+ struct crat_subtype_memory *mem;
+ struct crat_subtype_cache *cache;
+ struct crat_subtype_iolink *iolink;
+ int ret = 0;
+
+ switch (sub_type_hdr->type) {
+ case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
+ cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+ ret = kfd_parse_subtype_cu(cu, device_list);
+ break;
+ case CRAT_SUBTYPE_MEMORY_AFFINITY:
+ mem = (struct crat_subtype_memory *)sub_type_hdr;
+ ret = kfd_parse_subtype_mem(mem, device_list);
+ break;
+ case CRAT_SUBTYPE_CACHE_AFFINITY:
+ cache = (struct crat_subtype_cache *)sub_type_hdr;
+ ret = kfd_parse_subtype_cache(cache, device_list);
+ break;
+ case CRAT_SUBTYPE_TLB_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_debug("Found TLB entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
+ /*
+ * For now, nothing to do here
+ */
+ pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
+ break;
+ case CRAT_SUBTYPE_IOLINK_AFFINITY:
+ iolink = (struct crat_subtype_iolink *)sub_type_hdr;
+ ret = kfd_parse_subtype_iolink(iolink, device_list);
+ break;
+ default:
+ pr_warn("Unknown subtype %d in CRAT\n",
+ sub_type_hdr->type);
+ }
+
+ return ret;
+}
+
+/* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
+ * create a kfd_topology_device and add in to device_list. Also parse
+ * CRAT subtypes and attach it to appropriate kfd_topology_device
+ * @crat_image - input image containing CRAT
+ * @device_list - [OUT] list of kfd_topology_device generated after
+ * parsing crat_image
+ * @proximity_domain - Proximity domain of the first device in the table
+ *
+ * Return - 0 if successful else -ve value
+ */
+int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+ uint32_t proximity_domain)
+{
+ struct kfd_topology_device *top_dev = NULL;
+ struct crat_subtype_generic *sub_type_hdr;
+ uint16_t node_id;
+ int ret = 0;
+ struct crat_header *crat_table = (struct crat_header *)crat_image;
+ uint16_t num_nodes;
+ uint32_t image_len;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ if (!list_empty(device_list)) {
+ pr_warn("Error device list should be empty\n");
+ return -EINVAL;
+ }
+
+ num_nodes = crat_table->num_domains;
+ image_len = crat_table->length;
+
+ pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+
+ for (node_id = 0; node_id < num_nodes; node_id++) {
+ top_dev = kfd_create_topology_device(device_list);
+ if (!top_dev)
+ break;
+ top_dev->proximity_domain = proximity_domain++;
+ }
+
+ if (!top_dev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
+ memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
+ CRAT_OEMTABLEID_LENGTH);
+ top_dev->oem_revision = crat_table->oem_revision;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
+ while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
+ ((char *)crat_image) + image_len) {
+ if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
+ ret = kfd_parse_subtype(sub_type_hdr, device_list);
+ if (ret)
+ break;
+ }
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+ }
+
+err:
+ if (ret)
+ kfd_release_topology_device_list(device_list);
+
+ return ret;
+}
+
+/* Helper function. See kfd_fill_gpu_cache_info for parameter description */
+static int fill_in_pcache(struct crat_subtype_cache *pcache,
+ struct kfd_gpu_cache_info *pcache_info,
+ struct kfd_cu_info *cu_info,
+ int mem_available,
+ int cu_bitmask,
+ int cache_type, unsigned int cu_processor_id,
+ int cu_block)
+{
+ unsigned int cu_sibling_map_mask;
+ int first_active_cu;
+
+ /* First check if enough memory is available */
+ if (sizeof(struct crat_subtype_cache) > mem_available)
+ return -ENOMEM;
+
+ cu_sibling_map_mask = cu_bitmask;
+ cu_sibling_map_mask >>= cu_block;
+ cu_sibling_map_mask &=
+ ((1 << pcache_info[cache_type].num_cu_shared) - 1);
+ first_active_cu = ffs(cu_sibling_map_mask);
+
+ /* CU could be inactive. In case of shared cache find the first active
+ * CU. and incase of non-shared cache check if the CU is inactive. If
+ * inactive active skip it
+ */
+ if (first_active_cu) {
+ memset(pcache, 0, sizeof(struct crat_subtype_cache));
+ pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
+ pcache->length = sizeof(struct crat_subtype_cache);
+ pcache->flags = pcache_info[cache_type].flags;
+ pcache->processor_id_low = cu_processor_id
+ + (first_active_cu - 1);
+ pcache->cache_level = pcache_info[cache_type].cache_level;
+ pcache->cache_size = pcache_info[cache_type].cache_size;
+
+ /* Sibling map is w.r.t processor_id_low, so shift out
+ * inactive CU
+ */
+ cu_sibling_map_mask =
+ cu_sibling_map_mask >> (first_active_cu - 1);
+
+ pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
+ pcache->sibling_map[1] =
+ (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
+ pcache->sibling_map[2] =
+ (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
+ pcache->sibling_map[3] =
+ (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
+ return 0;
+ }
+ return 1;
+}
+
+/* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
+ * tables
+ *
+ * @kdev - [IN] GPU device
+ * @gpu_processor_id - [IN] GPU processor ID to which these caches
+ * associate
+ * @available_size - [IN] Amount of memory available in pcache
+ * @cu_info - [IN] Compute Unit info obtained from KGD
+ * @pcache - [OUT] memory into which cache data is to be filled in.
+ * @size_filled - [OUT] amount of data used up in pcache.
+ * @num_of_entries - [OUT] number of caches added
+ */
+static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
+ int gpu_processor_id,
+ int available_size,
+ struct kfd_cu_info *cu_info,
+ struct crat_subtype_cache *pcache,
+ int *size_filled,
+ int *num_of_entries)
+{
+ struct kfd_gpu_cache_info *pcache_info;
+ int num_of_cache_types = 0;
+ int i, j, k;
+ int ct = 0;
+ int mem_available = available_size;
+ unsigned int cu_processor_id;
+ int ret;
+
+ switch (kdev->device_info->asic_family) {
+ case CHIP_KAVERI:
+ pcache_info = kaveri_cache_info;
+ num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
+ break;
+ case CHIP_HAWAII:
+ pcache_info = hawaii_cache_info;
+ num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
+ break;
+ case CHIP_CARRIZO:
+ pcache_info = carrizo_cache_info;
+ num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
+ break;
+ case CHIP_TONGA:
+ pcache_info = tonga_cache_info;
+ num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
+ break;
+ case CHIP_FIJI:
+ pcache_info = fiji_cache_info;
+ num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
+ break;
+ case CHIP_POLARIS10:
+ pcache_info = polaris10_cache_info;
+ num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
+ break;
+ case CHIP_POLARIS11:
+ pcache_info = polaris11_cache_info;
+ num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *size_filled = 0;
+ *num_of_entries = 0;
+
+ /* For each type of cache listed in the kfd_gpu_cache_info table,
+ * go through all available Compute Units.
+ * The [i,j,k] loop will
+ * if kfd_gpu_cache_info.num_cu_shared = 1
+ * will parse through all available CU
+ * If (kfd_gpu_cache_info.num_cu_shared != 1)
+ * then it will consider only one CU from
+ * the shared unit
+ */
+
+ for (ct = 0; ct < num_of_cache_types; ct++) {
+ cu_processor_id = gpu_processor_id;
+ for (i = 0; i < cu_info->num_shader_engines; i++) {
+ for (j = 0; j < cu_info->num_shader_arrays_per_engine;
+ j++) {
+ for (k = 0; k < cu_info->num_cu_per_sh;
+ k += pcache_info[ct].num_cu_shared) {
+
+ ret = fill_in_pcache(pcache,
+ pcache_info,
+ cu_info,
+ mem_available,
+ cu_info->cu_bitmap[i][j],
+ ct,
+ cu_processor_id,
+ k);
+
+ if (ret < 0)
+ break;
+
+ if (!ret) {
+ pcache++;
+ (*num_of_entries)++;
+ mem_available -=
+ sizeof(*pcache);
+ (*size_filled) +=
+ sizeof(*pcache);
+ }
+
+ /* Move to next CU block */
+ cu_processor_id +=
+ pcache_info[ct].num_cu_shared;
+ }
+ }
+ }
+ }
+
+ pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
+
+ return 0;
+}
+
+/*
+ * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
+ * copies CRAT from ACPI (if available).
+ * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
+ *
+ * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
+ * crat_image will be NULL
+ * @size: [OUT] size of crat_image
+ *
+ * Return 0 if successful else return error code
+ */
+int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
+{
+ struct acpi_table_header *crat_table;
+ acpi_status status;
+ void *pcrat_image;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ *crat_image = NULL;
+
+ /* Fetch the CRAT table from ACPI */
+ status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
+ if (status == AE_NOT_FOUND) {
+ pr_warn("CRAT table not found\n");
+ return -ENODATA;
+ } else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+
+ pr_err("CRAT table error: %s\n", err);
+ return -EINVAL;
+ }
+
+ if (ignore_crat) {
+ pr_info("CRAT table disabled by module option\n");
+ return -ENODATA;
+ }
+
+ pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+ if (!pcrat_image)
+ return -ENOMEM;
+
+ memcpy(pcrat_image, crat_table, crat_table->length);
+
+ *crat_image = pcrat_image;
+ *size = crat_table->length;
+
+ return 0;
+}
+
+/* Memory required to create Virtual CRAT.
+ * Since there is no easy way to predict the amount of memory required, the
+ * following amount are allocated for CPU and GPU Virtual CRAT. This is
+ * expected to cover all known conditions. But to be safe additional check
+ * is put in the code to ensure we don't overwrite.
+ */
+#define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE)
+#define VCRAT_SIZE_FOR_GPU (3 * PAGE_SIZE)
+
+/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
+ *
+ * @numa_node_id: CPU NUMA node id
+ * @avail_size: Available size in the memory
+ * @sub_type_hdr: Memory into which compute info will be filled in
+ *
+ * Return 0 if successful else return -ve value
+ */
+static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
+ int proximity_domain,
+ struct crat_subtype_computeunit *sub_type_hdr)
+{
+ const struct cpumask *cpumask;
+
+ *avail_size -= sizeof(struct crat_subtype_computeunit);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ cpumask = cpumask_of_node(numa_node_id);
+
+ /* Fill in CU data */
+ sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
+ sub_type_hdr->proximity_domain = proximity_domain;
+ sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
+ if (sub_type_hdr->processor_id_low == -1)
+ return -EINVAL;
+
+ sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
+
+ return 0;
+}
+
+/* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
+ *
+ * @numa_node_id: CPU NUMA node id
+ * @avail_size: Available size in the memory
+ * @sub_type_hdr: Memory into which compute info will be filled in
+ *
+ * Return 0 if successful else return -ve value
+ */
+static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
+ int proximity_domain,
+ struct crat_subtype_memory *sub_type_hdr)
+{
+ uint64_t mem_in_bytes = 0;
+ pg_data_t *pgdat;
+ int zone_type;
+
+ *avail_size -= sizeof(struct crat_subtype_memory);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_memory);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill in Memory Subunit data */
+
+ /* Unlike si_meminfo, si_meminfo_node is not exported. So
+ * the following lines are duplicated from si_meminfo_node
+ * function
+ */
+ pgdat = NODE_DATA(numa_node_id);
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+ mem_in_bytes += pgdat->node_zones[zone_type].managed_pages;
+ mem_in_bytes <<= PAGE_SHIFT;
+
+ sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
+ sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
+ sub_type_hdr->proximity_domain = proximity_domain;
+
+ return 0;
+}
+
+static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
+ uint32_t *num_entries,
+ struct crat_subtype_iolink *sub_type_hdr)
+{
+ int nid;
+ struct cpuinfo_x86 *c = &cpu_data(0);
+ uint8_t link_type;
+
+ if (c->x86_vendor == X86_VENDOR_AMD)
+ link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
+ else
+ link_type = CRAT_IOLINK_TYPE_QPI_1_1;
+
+ *num_entries = 0;
+
+ /* Create IO links from this node to other CPU nodes */
+ for_each_online_node(nid) {
+ if (nid == numa_node_id) /* node itself */
+ continue;
+
+ *avail_size -= sizeof(struct crat_subtype_iolink);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill in IO link data */
+ sub_type_hdr->proximity_domain_from = numa_node_id;
+ sub_type_hdr->proximity_domain_to = nid;
+ sub_type_hdr->io_interface_type = link_type;
+
+ (*num_entries)++;
+ sub_type_hdr++;
+ }
+
+ return 0;
+}
+
+/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
+ *
+ * @pcrat_image: Fill in VCRAT for CPU
+ * @size: [IN] allocated size of crat_image.
+ * [OUT] actual size of data filled in crat_image
+ */
+static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
+{
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct acpi_table_header *acpi_table;
+ acpi_status status;
+ struct crat_subtype_generic *sub_type_hdr;
+ int avail_size = *size;
+ int numa_node_id;
+ uint32_t entries = 0;
+ int ret = 0;
+
+ if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
+ return -EINVAL;
+
+ /* Fill in CRAT Header.
+ * Modify length and total_entries as subunits are added.
+ */
+ avail_size -= sizeof(struct crat_header);
+ if (avail_size < 0)
+ return -ENOMEM;
+
+ memset(crat_table, 0, sizeof(struct crat_header));
+ memcpy(&crat_table->signature, CRAT_SIGNATURE,
+ sizeof(crat_table->signature));
+ crat_table->length = sizeof(struct crat_header);
+
+ status = acpi_get_table("DSDT", 0, &acpi_table);
+ if (status == AE_NOT_FOUND)
+ pr_warn("DSDT table not found for OEM information\n");
+ else {
+ crat_table->oem_revision = acpi_table->revision;
+ memcpy(crat_table->oem_id, acpi_table->oem_id,
+ CRAT_OEMID_LENGTH);
+ memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
+ CRAT_OEMTABLEID_LENGTH);
+ }
+ crat_table->total_entries = 0;
+ crat_table->num_domains = 0;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
+
+ for_each_online_node(numa_node_id) {
+ if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
+ continue;
+
+ /* Fill in Subtype: Compute Unit */
+ ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
+ crat_table->num_domains,
+ (struct crat_subtype_computeunit *)sub_type_hdr);
+ if (ret < 0)
+ return ret;
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+
+ /* Fill in Subtype: Memory */
+ ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
+ crat_table->num_domains,
+ (struct crat_subtype_memory *)sub_type_hdr);
+ if (ret < 0)
+ return ret;
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+
+ /* Fill in Subtype: IO Link */
+ ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
+ &entries,
+ (struct crat_subtype_iolink *)sub_type_hdr);
+ if (ret < 0)
+ return ret;
+ crat_table->length += (sub_type_hdr->length * entries);
+ crat_table->total_entries += entries;
+
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length * entries);
+
+ crat_table->num_domains++;
+ }
+
+ /* TODO: Add cache Subtype for CPU.
+ * Currently, CPU cache information is available in function
+ * detect_cache_attributes(cpu) defined in the file
+ * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
+ * exported and to get the same information the code needs to be
+ * duplicated.
+ */
+
+ *size = crat_table->length;
+ pr_info("Virtual CRAT table created for CPU\n");
+
+ return 0;
+}
+
+static int kfd_fill_gpu_memory_affinity(int *avail_size,
+ struct kfd_dev *kdev, uint8_t type, uint64_t size,
+ struct crat_subtype_memory *sub_type_hdr,
+ uint32_t proximity_domain,
+ const struct kfd_local_mem_info *local_mem_info)
+{
+ *avail_size -= sizeof(struct crat_subtype_memory);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
+ sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_memory);
+ sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ sub_type_hdr->proximity_domain = proximity_domain;
+
+ pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
+ type, size);
+
+ sub_type_hdr->length_low = lower_32_bits(size);
+ sub_type_hdr->length_high = upper_32_bits(size);
+
+ sub_type_hdr->width = local_mem_info->vram_width;
+ sub_type_hdr->visibility_type = type;
+
+ return 0;
+}
+
+/* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
+ * to its NUMA node
+ * @avail_size: Available size in the memory
+ * @kdev - [IN] GPU device
+ * @sub_type_hdr: Memory into which io link info will be filled in
+ * @proximity_domain - proximity domain of the GPU node
+ *
+ * Return 0 if successful else return -ve value
+ */
+static int kfd_fill_gpu_direct_io_link(int *avail_size,
+ struct kfd_dev *kdev,
+ struct crat_subtype_iolink *sub_type_hdr,
+ uint32_t proximity_domain)
+{
+ *avail_size -= sizeof(struct crat_subtype_iolink);
+ if (*avail_size < 0)
+ return -ENOMEM;
+
+ memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
+
+ /* Fill in subtype header data */
+ sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
+ sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill in IOLINK subtype.
+ * TODO: Fill-in other fields of iolink subtype
+ */
+ sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
+ sub_type_hdr->proximity_domain_from = proximity_domain;
+#ifdef CONFIG_NUMA
+ if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
+ sub_type_hdr->proximity_domain_to = 0;
+ else
+ sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
+#else
+ sub_type_hdr->proximity_domain_to = 0;
+#endif
+ return 0;
+}
+
+/* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
+ *
+ * @pcrat_image: Fill in VCRAT for GPU
+ * @size: [IN] allocated size of crat_image.
+ * [OUT] actual size of data filled in crat_image
+ */
+static int kfd_create_vcrat_image_gpu(void *pcrat_image,
+ size_t *size, struct kfd_dev *kdev,
+ uint32_t proximity_domain)
+{
+ struct crat_header *crat_table = (struct crat_header *)pcrat_image;
+ struct crat_subtype_generic *sub_type_hdr;
+ struct crat_subtype_computeunit *cu;
+ struct kfd_cu_info cu_info;
+ struct amd_iommu_device_info iommu_info;
+ int avail_size = *size;
+ uint32_t total_num_of_cu;
+ int num_of_cache_entries = 0;
+ int cache_mem_filled = 0;
+ int ret = 0;
+ const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
+ AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+ struct kfd_local_mem_info local_mem_info;
+
+ if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
+ return -EINVAL;
+
+ /* Fill the CRAT Header.
+ * Modify length and total_entries as subunits are added.
+ */
+ avail_size -= sizeof(struct crat_header);
+ if (avail_size < 0)
+ return -ENOMEM;
+
+ memset(crat_table, 0, sizeof(struct crat_header));
+
+ memcpy(&crat_table->signature, CRAT_SIGNATURE,
+ sizeof(crat_table->signature));
+ /* Change length as we add more subtypes*/
+ crat_table->length = sizeof(struct crat_header);
+ crat_table->num_domains = 1;
+ crat_table->total_entries = 0;
+
+ /* Fill in Subtype: Compute Unit
+ * First fill in the sub type header and then sub type data
+ */
+ avail_size -= sizeof(struct crat_subtype_computeunit);
+ if (avail_size < 0)
+ return -ENOMEM;
+
+ sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
+ memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
+
+ sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
+ sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
+ sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
+
+ /* Fill CU subtype data */
+ cu = (struct crat_subtype_computeunit *)sub_type_hdr;
+ cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
+ cu->proximity_domain = proximity_domain;
+
+ kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
+ cu->num_simd_per_cu = cu_info.simd_per_cu;
+ cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
+ cu->max_waves_simd = cu_info.max_waves_per_simd;
+
+ cu->wave_front_size = cu_info.wave_front_size;
+ cu->array_count = cu_info.num_shader_arrays_per_engine *
+ cu_info.num_shader_engines;
+ total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
+ cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
+ cu->num_cu_per_array = cu_info.num_cu_per_sh;
+ cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
+ cu->num_banks = cu_info.num_shader_engines;
+ cu->lds_size_in_kb = cu_info.lds_size;
+
+ cu->hsa_capability = 0;
+
+ /* Check if this node supports IOMMU. During parsing this flag will
+ * translate to HSA_CAP_ATS_PRESENT
+ */
+ iommu_info.flags = 0;
+ if (amd_iommu_device_info(kdev->pdev, &iommu_info) == 0) {
+ if ((iommu_info.flags & required_iommu_flags) ==
+ required_iommu_flags)
+ cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
+ }
+
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ /* Fill in Subtype: Memory. Only on systems with large BAR (no
+ * private FB), report memory as public. On other systems
+ * report the total FB size (public+private) as a single
+ * private heap.
+ */
+ kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+
+ if (local_mem_info.local_mem_size_private == 0)
+ ret = kfd_fill_gpu_memory_affinity(&avail_size,
+ kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
+ local_mem_info.local_mem_size_public,
+ (struct crat_subtype_memory *)sub_type_hdr,
+ proximity_domain,
+ &local_mem_info);
+ else
+ ret = kfd_fill_gpu_memory_affinity(&avail_size,
+ kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
+ local_mem_info.local_mem_size_public +
+ local_mem_info.local_mem_size_private,
+ (struct crat_subtype_memory *)sub_type_hdr,
+ proximity_domain,
+ &local_mem_info);
+ if (ret < 0)
+ return ret;
+
+ crat_table->length += sizeof(struct crat_subtype_memory);
+ crat_table->total_entries++;
+
+ /* TODO: Fill in cache information. This information is NOT readily
+ * available in KGD
+ */
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ sub_type_hdr->length);
+ ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
+ avail_size,
+ &cu_info,
+ (struct crat_subtype_cache *)sub_type_hdr,
+ &cache_mem_filled,
+ &num_of_cache_entries);
+
+ if (ret < 0)
+ return ret;
+
+ crat_table->length += cache_mem_filled;
+ crat_table->total_entries += num_of_cache_entries;
+ avail_size -= cache_mem_filled;
+
+ /* Fill in Subtype: IO_LINKS
+ * Only direct links are added here which is Link from GPU to
+ * to its NUMA node. Indirect links are added by userspace.
+ */
+ sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+ cache_mem_filled);
+ ret = kfd_fill_gpu_direct_io_link(&avail_size, kdev,
+ (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
+
+ if (ret < 0)
+ return ret;
+
+ crat_table->length += sub_type_hdr->length;
+ crat_table->total_entries++;
+
+ *size = crat_table->length;
+ pr_info("Virtual CRAT table created for GPU\n");
+
+ return ret;
+}
+
+/* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
+ * creates a Virtual CRAT (VCRAT) image
+ *
+ * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
+ *
+ * @crat_image: VCRAT image created because ACPI does not have a
+ * CRAT for this device
+ * @size: [OUT] size of virtual crat_image
+ * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
+ * COMPUTE_UNIT_GPU - Create VCRAT for GPU
+ * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
+ * -- this option is not currently implemented.
+ * The assumption is that all AMD APUs will have CRAT
+ * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
+ *
+ * Return 0 if successful else return -ve value
+ */
+int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+ int flags, struct kfd_dev *kdev,
+ uint32_t proximity_domain)
+{
+ void *pcrat_image = NULL;
+ int ret = 0;
+
+ if (!crat_image)
+ return -EINVAL;
+
+ *crat_image = NULL;
+
+ /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
+ * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
+ * all the current conditions. A check is put not to overwrite beyond
+ * allocated size
+ */
+ switch (flags) {
+ case COMPUTE_UNIT_CPU:
+ pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
+ if (!pcrat_image)
+ return -ENOMEM;
+ *size = VCRAT_SIZE_FOR_CPU;
+ ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
+ break;
+ case COMPUTE_UNIT_GPU:
+ if (!kdev)
+ return -EINVAL;
+ pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
+ if (!pcrat_image)
+ return -ENOMEM;
+ *size = VCRAT_SIZE_FOR_GPU;
+ ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
+ proximity_domain);
+ break;
+ case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
+ /* TODO: */
+ ret = -EINVAL;
+ pr_err("VCRAT not implemented for APU\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ *crat_image = pcrat_image;
+ else
+ kfree(pcrat_image);
+
+ return ret;
+}
+
+
+/* kfd_destroy_crat_image
+ *
+ * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
+ *
+ */
+void kfd_destroy_crat_image(void *crat_image)
+{
+ kfree(crat_image);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
index a374fa3d3ee6..b5cd182b9edd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -44,6 +44,10 @@
#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
+/* Compute Unit flags */
+#define COMPUTE_UNIT_CPU (1 << 0) /* Create Virtual CRAT for CPU */
+#define COMPUTE_UNIT_GPU (1 << 1) /* Create Virtual CRAT for GPU */
+
struct crat_header {
uint32_t signature;
uint32_t length;
@@ -105,7 +109,7 @@ struct crat_subtype_computeunit {
uint8_t wave_front_size;
uint8_t num_banks;
uint16_t micro_engine_id;
- uint8_t num_arrays;
+ uint8_t array_count;
uint8_t num_cu_per_array;
uint8_t num_simd_per_cu;
uint8_t max_slots_scatch_cu;
@@ -127,13 +131,14 @@ struct crat_subtype_memory {
uint8_t length;
uint16_t reserved;
uint32_t flags;
- uint32_t promixity_domain;
+ uint32_t proximity_domain;
uint32_t base_addr_low;
uint32_t base_addr_high;
uint32_t length_low;
uint32_t length_high;
uint32_t width;
- uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH];
+ uint8_t visibility_type; /* for virtual (dGPU) CRAT */
+ uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH - 1];
};
/*
@@ -222,9 +227,12 @@ struct crat_subtype_ccompute {
/*
* HSA IO Link Affinity structure and definitions
*/
-#define CRAT_IOLINK_FLAGS_ENABLED 0x00000001
-#define CRAT_IOLINK_FLAGS_COHERENCY 0x00000002
-#define CRAT_IOLINK_FLAGS_RESERVED 0xfffffffc
+#define CRAT_IOLINK_FLAGS_ENABLED (1 << 0)
+#define CRAT_IOLINK_FLAGS_NON_COHERENT (1 << 1)
+#define CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT (1 << 2)
+#define CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT (1 << 3)
+#define CRAT_IOLINK_FLAGS_NO_PEER_TO_PEER_DMA (1 << 4)
+#define CRAT_IOLINK_FLAGS_RESERVED_MASK 0xffffffe0
/*
* IO interface types
@@ -232,10 +240,18 @@ struct crat_subtype_ccompute {
#define CRAT_IOLINK_TYPE_UNDEFINED 0
#define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
#define CRAT_IOLINK_TYPE_PCIEXPRESS 2
-#define CRAT_IOLINK_TYPE_OTHER 3
+#define CRAT_IOLINK_TYPE_AMBA 3
+#define CRAT_IOLINK_TYPE_MIPI 4
+#define CRAT_IOLINK_TYPE_QPI_1_1 5
+#define CRAT_IOLINK_TYPE_RESERVED1 6
+#define CRAT_IOLINK_TYPE_RESERVED2 7
+#define CRAT_IOLINK_TYPE_RAPID_IO 8
+#define CRAT_IOLINK_TYPE_INFINIBAND 9
+#define CRAT_IOLINK_TYPE_RESERVED3 10
+#define CRAT_IOLINK_TYPE_OTHER 11
#define CRAT_IOLINK_TYPE_MAX 255
-#define CRAT_IOLINK_RESERVED_LENGTH 24
+#define CRAT_IOLINK_RESERVED_LENGTH 24
struct crat_subtype_iolink {
uint8_t type;
@@ -291,4 +307,14 @@ struct cdit_header {
#pragma pack()
+struct kfd_dev;
+
+int kfd_create_crat_image_acpi(void **crat_image, size_t *size);
+void kfd_destroy_crat_image(void *crat_image);
+int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
+ uint32_t proximity_domain);
+int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
+ int flags, struct kfd_dev *kdev,
+ uint32_t proximity_domain);
+
#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index c407f6bd9956..afb26f205d29 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -95,7 +95,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
ib_packet->bitfields3.ib_base_hi = largep->u.high_part;
ib_packet->control = (1 << 23) | (1 << 31) |
- ((size_in_bytes / sizeof(uint32_t)) & 0xfffff);
+ ((size_in_bytes / 4) & 0xfffff);
ib_packet->bitfields5.pasid = pasid;
@@ -126,8 +126,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
rm_packet->header.opcode = IT_RELEASE_MEM;
rm_packet->header.type = PM4_TYPE_3;
- rm_packet->header.count = sizeof(struct pm4__release_mem) /
- sizeof(unsigned int) - 2;
+ rm_packet->header.count = sizeof(struct pm4__release_mem) / 4 - 2;
rm_packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
rm_packet->bitfields2.event_index =
@@ -652,8 +651,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packets_vec[0].header.opcode = IT_SET_UCONFIG_REG;
packets_vec[0].header.type = PM4_TYPE_3;
packets_vec[0].bitfields2.reg_offset =
- GRBM_GFX_INDEX / (sizeof(uint32_t)) -
- USERCONFIG_REG_BASE;
+ GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE;
packets_vec[0].bitfields2.insert_vmid = 0;
packets_vec[0].reg_data[0] = reg_gfx_index.u32All;
@@ -661,8 +659,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packets_vec[1].header.count = 1;
packets_vec[1].header.opcode = IT_SET_CONFIG_REG;
packets_vec[1].header.type = PM4_TYPE_3;
- packets_vec[1].bitfields2.reg_offset = SQ_CMD / (sizeof(uint32_t)) -
- AMD_CONFIG_REG_BASE;
+ packets_vec[1].bitfields2.reg_offset = SQ_CMD / 4 - AMD_CONFIG_REG_BASE;
packets_vec[1].bitfields2.vmid_shift = SQ_CMD_VMID_OFFSET;
packets_vec[1].bitfields2.insert_vmid = 1;
@@ -678,8 +675,7 @@ static int dbgdev_wave_control_diq(struct kfd_dbgdev *dbgdev,
packets_vec[2].ordinal1 = packets_vec[0].ordinal1;
packets_vec[2].bitfields2.reg_offset =
- GRBM_GFX_INDEX / (sizeof(uint32_t)) -
- USERCONFIG_REG_BASE;
+ GRBM_GFX_INDEX / 4 - USERCONFIG_REG_BASE;
packets_vec[2].bitfields2.insert_vmid = 0;
packets_vec[2].reg_data[0] = reg_gfx_index.u32All;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
new file mode 100644
index 000000000000..4bd6ebfaf425
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include "kfd_priv.h"
+
+static struct dentry *debugfs_root;
+
+static int kfd_debugfs_open(struct inode *inode, struct file *file)
+{
+ int (*show)(struct seq_file *, void *) = inode->i_private;
+
+ return single_open(file, show, NULL);
+}
+
+static const struct file_operations kfd_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void kfd_debugfs_init(void)
+{
+ struct dentry *ent;
+
+ debugfs_root = debugfs_create_dir("kfd", NULL);
+ if (!debugfs_root || debugfs_root == ERR_PTR(-ENODEV)) {
+ pr_warn("Failed to create kfd debugfs dir\n");
+ return;
+ }
+
+ ent = debugfs_create_file("mqds", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_mqds_by_process,
+ &kfd_debugfs_fops);
+ if (!ent)
+ pr_warn("Failed to create mqds in kfd debugfs\n");
+
+ ent = debugfs_create_file("hqds", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_hqds_by_device,
+ &kfd_debugfs_fops);
+ if (!ent)
+ pr_warn("Failed to create hqds in kfd debugfs\n");
+
+ ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
+ kfd_debugfs_rls_by_device,
+ &kfd_debugfs_fops);
+ if (!ent)
+ pr_warn("Failed to create rls in kfd debugfs\n");
+}
+
+void kfd_debugfs_fini(void)
+{
+ debugfs_remove_recursive(debugfs_root);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 621a3b53a038..a8fa33a08de3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -27,6 +27,7 @@
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_vi.h"
+#include "cwsr_trap_handler_gfx8.asm"
#define MQD_SIZE_ALIGNED 768
@@ -38,7 +39,8 @@ static const struct kfd_device_info kaveri_device_info = {
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = false,
};
static const struct kfd_device_info carrizo_device_info = {
@@ -49,7 +51,8 @@ static const struct kfd_device_info carrizo_device_info = {
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
- .mqd_size_aligned = MQD_SIZE_ALIGNED
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
};
struct kfd_deviceid {
@@ -212,6 +215,17 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
return AMD_IOMMU_INV_PRI_RSP_INVALID;
}
+static void kfd_cwsr_init(struct kfd_dev *kfd)
+{
+ if (cwsr_enable && kfd->device_info->supports_cwsr) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
+
+ kfd->cwsr_isa = cwsr_trap_gfx8_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
+ kfd->cwsr_enabled = true;
+ }
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources)
{
@@ -224,6 +238,17 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1;
+ /* Verify module parameters regarding mapped process number*/
+ if ((hws_max_conc_proc < 0)
+ || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
+ dev_err(kfd_device,
+ "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
+ hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
+ kfd->vm_info.vmid_num_kfd);
+ kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
+ } else
+ kfd->max_proc_per_quantum = hws_max_conc_proc;
+
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -286,6 +311,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_iommu_pasid_error;
}
+ kfd_cwsr_init(kfd);
+
if (kfd_resume(kfd))
goto kfd_resume_error;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e202921c150e..d0693fd8cbf8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -149,8 +149,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
- struct qcm_process_device *qpd,
- int *allocated_vmid)
+ struct qcm_process_device *qpd)
{
int retval;
@@ -170,9 +169,11 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
goto out_unlock;
}
- *allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
+
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd);
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
@@ -181,10 +182,8 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
retval = -EINVAL;
if (retval) {
- if (list_empty(&qpd->queues_list)) {
+ if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
- *allocated_vmid = 0;
- }
goto out_unlock;
}
@@ -809,16 +808,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
- struct qcm_process_device *qpd, int *allocate_vmid)
+ struct qcm_process_device *qpd)
{
int retval;
struct mqd_manager *mqd;
retval = 0;
- if (allocate_vmid)
- *allocate_vmid = 0;
-
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
@@ -846,6 +842,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
}
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
+
+ q->properties.tba_addr = qpd->tba_addr;
+ q->properties.tma_addr = qpd->tma_addr;
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
@@ -1110,6 +1109,26 @@ out:
return retval;
}
+static int set_trap_handler(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ uint64_t tba_addr,
+ uint64_t tma_addr)
+{
+ uint64_t *tma;
+
+ if (dqm->dev->cwsr_enabled) {
+ /* Jump from CWSR trap handler to user trap */
+ tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
+ tma[0] = tba_addr;
+ tma[1] = tma_addr;
+ } else {
+ qpd->tba_addr = tba_addr;
+ qpd->tma_addr = tma_addr;
+ }
+
+ return 0;
+}
+
static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -1241,6 +1260,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.set_trap_handler = set_trap_handler;
dqm->ops.process_termination = process_termination_cpsch;
break;
case KFD_SCHED_POLICY_NO_HWS:
@@ -1256,6 +1276,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.initialize = initialize_nocpsch;
dqm->ops.uninitialize = uninitialize;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
+ dqm->ops.set_trap_handler = set_trap_handler;
dqm->ops.process_termination = process_termination_nocpsch;
break;
default:
@@ -1290,3 +1311,74 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
dqm->ops.uninitialize(dqm);
kfree(dqm);
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static void seq_reg_dump(struct seq_file *m,
+ uint32_t (*dump)[2], uint32_t n_regs)
+{
+ uint32_t i, count;
+
+ for (i = 0, count = 0; i < n_regs; i++) {
+ if (count == 0 ||
+ dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
+ seq_printf(m, "%s %08x: %08x",
+ i ? "\n" : "",
+ dump[i][0], dump[i][1]);
+ count = 7;
+ } else {
+ seq_printf(m, " %08x", dump[i][1]);
+ count--;
+ }
+ }
+
+ seq_puts(m, "\n");
+}
+
+int dqm_debugfs_hqds(struct seq_file *m, void *data)
+{
+ struct device_queue_manager *dqm = data;
+ uint32_t (*dump)[2], n_regs;
+ int pipe, queue;
+ int r = 0;
+
+ for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
+ int pipe_offset = pipe * get_queues_per_pipe(dqm);
+
+ for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
+ if (!test_bit(pipe_offset + queue,
+ dqm->dev->shared_resources.queue_bitmap))
+ continue;
+
+ r = dqm->dev->kfd2kgd->hqd_dump(
+ dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ if (r)
+ break;
+
+ seq_printf(m, " CP Pipe %d, Queue %d\n",
+ pipe, queue);
+ seq_reg_dump(m, dump, n_regs);
+
+ kfree(dump);
+ }
+ }
+
+ for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
+ for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
+ r = dqm->dev->kfd2kgd->hqd_sdma_dump(
+ dqm->dev->kgd, pipe, queue, &dump, &n_regs);
+ if (r)
+ break;
+
+ seq_printf(m, " SDMA Engine %d, RLC %d\n",
+ pipe, queue);
+ seq_reg_dump(m, dump, n_regs);
+
+ kfree(dump);
+ }
+ }
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 5b77cb69f732..c61b693bfa8c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -84,8 +84,7 @@ struct device_process_node {
struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q,
- struct qcm_process_device *qpd,
- int *allocate_vmid);
+ struct qcm_process_device *qpd);
int (*destroy_queue)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
@@ -123,6 +122,11 @@ struct device_queue_manager_ops {
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
+ int (*set_trap_handler)(struct device_queue_manager *dqm,
+ struct qcm_process_device *qpd,
+ uint64_t tba_addr,
+ uint64_t tma_addr);
+
int (*process_termination)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index feb76c235b1a..ebb4da14e3df 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -116,8 +116,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
pr_debug("doorbell aperture size == 0x%08lX\n",
kfd->shared_resources.doorbell_aperture_size);
- pr_debug("doorbell kernel address == 0x%08lX\n",
- (uintptr_t)kfd->doorbell_kernel_ptr);
+ pr_debug("doorbell kernel address == %p\n", kfd->doorbell_kernel_ptr);
return 0;
}
@@ -194,8 +193,8 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
- " kernel address == 0x%08lX\n",
- *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
+ " kernel address == %p\n",
+ *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
return kfd->doorbell_kernel_ptr + inx;
}
@@ -215,7 +214,7 @@ inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
{
if (db) {
writel(value, db);
- pr_debug("Writing %d to doorbell address 0x%p\n", value, db);
+ pr_debug("Writing %d to doorbell address %p\n", value, db);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index cb92d4b72400..93aae5c1e78b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -441,7 +441,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
- * running so the lookup function returns a locked process.
+ * running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -493,7 +493,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
}
mutex_unlock(&p->event_mutex);
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
}
static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
@@ -847,7 +847,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
- * running so the lookup function returns a locked process.
+ * running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
struct mm_struct *mm;
@@ -860,7 +860,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
*/
mm = get_task_mm(p->lead_thread);
if (!mm) {
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
return; /* Process is exiting */
}
@@ -903,7 +903,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
&memory_exception_data);
mutex_unlock(&p->event_mutex);
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
}
void kfd_signal_hw_exception_event(unsigned int pasid)
@@ -911,7 +911,7 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
/*
* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
- * running so the lookup function returns a locked process.
+ * running so the lookup function increments the process ref count.
*/
struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
@@ -924,5 +924,5 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
mutex_unlock(&p->event_mutex);
- mutex_unlock(&p->mutex);
+ kfd_unref_process(p);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index c59384bbbc5f..7377513050e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -300,9 +300,14 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_process_device *pdd;
/*Iterating over all devices*/
- while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
+ while (kfd_topology_enum_kfd_devices(id, &dev) == 0 &&
id < NUM_OF_SUPPORTED_GPUS) {
+ if (!dev) {
+ id++; /* Skip non GPU devices */
+ continue;
+ }
+
pdd = kfd_create_process_device_data(dev, process);
if (!pdd) {
pr_err("Failed to create process device data\n");
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 8b0c0645d7c0..5dc6567d4a13 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -218,7 +218,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
rptr = *kq->rptr_kernel;
wptr = *kq->wptr_kernel;
queue_address = (unsigned int *)kq->pq_kernel_addr;
- queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
+ queue_size_dwords = kq->queue->properties.queue_size / 4;
pr_debug("rptr: %d\n", rptr);
pr_debug("wptr: %d\n", wptr);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index f744caeaee04..3ac72bed4f31 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,6 +50,15 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
+int hws_max_conc_proc = 8;
+module_param(hws_max_conc_proc, int, 0444);
+MODULE_PARM_DESC(hws_max_conc_proc,
+ "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
+
+int cwsr_enable = 1;
+module_param(cwsr_enable, int, 0444);
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_device,
@@ -60,6 +69,11 @@ module_param(send_sigterm, int, 0444);
MODULE_PARM_DESC(send_sigterm,
"Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+int ignore_crat;
+module_param(ignore_crat, int, 0444);
+MODULE_PARM_DESC(ignore_crat,
+ "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
+
static int amdkfd_init_completed;
int kgd2kfd_init(unsigned int interface_version,
@@ -114,6 +128,8 @@ static int __init kfd_module_init(void)
kfd_process_create_wq();
+ kfd_debugfs_init();
+
amdkfd_init_completed = 1;
dev_info(kfd_device, "Initialized module\n");
@@ -130,6 +146,7 @@ static void __exit kfd_module_exit(void)
{
amdkfd_init_completed = 0;
+ kfd_debugfs_fini();
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 1f3a6ba7eed2..8972bcfbf701 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -85,6 +85,10 @@ struct mqd_manager {
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id);
+#if defined(CONFIG_DEBUG_FS)
+ int (*debugfs_show_mqd)(struct seq_file *m, void *data);
+#endif
+
struct mutex mqd_mutex;
struct kfd_dev *dev;
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4728fad3fd74..f8ef4a051e08 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -36,6 +36,11 @@ static inline struct cik_mqd *get_mqd(void *mqd)
return (struct cik_mqd *)mqd;
}
+static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+{
+ return (struct cik_sdma_rlc_registers *)mqd;
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -149,7 +154,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
@@ -160,7 +165,9 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
struct queue_properties *p, struct mm_struct *mms)
{
- return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ (uint32_t __user *)p->write_ptr,
+ mms);
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
@@ -176,8 +183,7 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
* Calculating queue size which is log base 2 of actual queue size -1
* dwords and another -1 for ffs
*/
- m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- - 1 - 1;
+ m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
@@ -202,7 +208,7 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ m->sdma_rlc_rb_cntl = order_base_2(q->queue_size / 4)
<< SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
@@ -343,8 +349,7 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
* Calculating queue size which is log base 2 of actual queue
* size -1 dwords
*/
- m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- - 1 - 1;
+ m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
@@ -360,15 +365,25 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
return 0;
}
-struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
-{
- struct cik_sdma_rlc_registers *m;
+#if defined(CONFIG_DEBUG_FS)
- m = (struct cik_sdma_rlc_registers *)mqd;
+static int debugfs_show_mqd(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct cik_mqd), false);
+ return 0;
+}
- return m;
+static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct cik_sdma_rlc_registers), false);
+ return 0;
}
+#endif
+
+
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
@@ -392,6 +407,9 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
@@ -400,6 +418,9 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_SDMA:
mqd->init_mqd = init_mqd_sdma;
@@ -408,6 +429,9 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = destroy_mqd_sdma;
mqd->is_occupied = is_occupied_sdma;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+#endif
break;
default:
kfree(mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 4ea854f9007b..971aec0637dc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -30,7 +30,7 @@
#include "vi_structs.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_enum.h"
-
+#include "oss/oss_3_0_sh_mask.h"
#define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8
static inline struct vi_mqd *get_mqd(void *mqd)
@@ -38,6 +38,11 @@ static inline struct vi_mqd *get_mqd(void *mqd)
return (struct vi_mqd *)mqd;
}
+static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct vi_sdma_mqd *)mqd;
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -84,6 +89,28 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_iq_rptr = 1;
+ if (q->tba_addr) {
+ m->compute_tba_lo = lower_32_bits(q->tba_addr >> 8);
+ m->compute_tba_hi = upper_32_bits(q->tba_addr >> 8);
+ m->compute_tma_lo = lower_32_bits(q->tma_addr >> 8);
+ m->compute_tma_hi = upper_32_bits(q->tma_addr >> 8);
+ m->compute_pgm_rsrc2 |=
+ (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
+ }
+
+ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
+ m->cp_hqd_persistent_state |=
+ (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
+ m->cp_hqd_ctx_save_base_addr_lo =
+ lower_32_bits(q->ctx_save_restore_area_address);
+ m->cp_hqd_ctx_save_base_addr_hi =
+ upper_32_bits(q->ctx_save_restore_area_address);
+ m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
+ m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
+ m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
+ m->cp_hqd_wg_state_offset = q->ctl_stack_size;
+ }
+
*mqd = m;
if (gart_addr)
*gart_addr = addr;
@@ -98,7 +125,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
{
/* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
- uint32_t wptr_mask = (uint32_t)((p->queue_size / sizeof(uint32_t)) - 1);
+ uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1);
return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id,
(uint32_t __user *)p->write_ptr,
@@ -116,8 +143,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT |
atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT |
mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT;
- m->cp_hqd_pq_control |=
- ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+ m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
@@ -147,7 +173,7 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
* is safe, giving a maximum field value of 0xA.
*/
m->cp_hqd_eop_control |= min(0xA,
- ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
+ order_base_2(q->eop_ring_buffer_size / 4) - 1);
m->cp_hqd_eop_base_addr_lo =
lower_32_bits(q->eop_ring_buffer_address >> 8);
m->cp_hqd_eop_base_addr_hi =
@@ -163,6 +189,11 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT;
}
+ if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
+ m->cp_hqd_ctx_save_control =
+ atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
+ mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0);
@@ -234,6 +265,117 @@ static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
return retval;
}
+static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
+ struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
+ struct queue_properties *q)
+{
+ int retval;
+ struct vi_sdma_mqd *m;
+
+
+ retval = kfd_gtt_sa_allocate(mm->dev,
+ sizeof(struct vi_sdma_mqd),
+ mqd_mem_obj);
+
+ if (retval != 0)
+ return -ENOMEM;
+
+ m = (struct vi_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr;
+
+ memset(m, 0, sizeof(struct vi_sdma_mqd));
+
+ *mqd = m;
+ if (gart_addr != NULL)
+ *gart_addr = (*mqd_mem_obj)->gpu_addr;
+
+ retval = mm->update_mqd(mm, m, q);
+
+ return retval;
+}
+
+static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct kfd_mem_obj *mqd_mem_obj)
+{
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
+}
+
+static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd,
+ (uint32_t __user *)p->write_ptr,
+ mms);
+}
+
+static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_sdma_mqd *m;
+
+ m = get_sdma_mqd(mqd);
+ m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
+ 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
+ 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+
+ m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
+ m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
+ m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
+ m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
+ m->sdmax_rlcx_doorbell =
+ q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT;
+
+ m->sdmax_rlcx_virtual_addr = q->sdma_vm_addr;
+
+ m->sdma_engine_id = q->sdma_engine_id;
+ m->sdma_queue_id = q->sdma_queue_id;
+
+ q->is_active = (q->queue_size > 0 &&
+ q->queue_address != 0 &&
+ q->queue_percent > 0);
+
+ return 0;
+}
+
+/*
+ * * preempt type here is ignored because there is only one way
+ * * to preempt sdma queue
+ */
+static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
+ enum kfd_preempt_type type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
+}
+
+static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int debugfs_show_mqd(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct vi_mqd), false);
+ return 0;
+}
+
+static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
+{
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ data, sizeof(struct vi_sdma_mqd), false);
+ return 0;
+}
+
+#endif
+
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
@@ -257,6 +399,9 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
@@ -265,8 +410,20 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd;
+#endif
break;
case KFD_MQD_TYPE_SDMA:
+ mqd->init_mqd = init_mqd_sdma;
+ mqd->uninit_mqd = uninit_mqd_sdma;
+ mqd->load_mqd = load_mqd_sdma;
+ mqd->update_mqd = update_mqd_sdma;
+ mqd->destroy_mqd = destroy_mqd_sdma;
+ mqd->is_occupied = is_occupied_sdma;
+#if defined(CONFIG_DEBUG_FS)
+ mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
+#endif
break;
default:
kfree(mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 16da8ad02d8b..0ecbd1f9b606 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -45,7 +45,7 @@ static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
header.u32All = 0;
header.opcode = opcode;
- header.count = packet_size/sizeof(uint32_t) - 2;
+ header.count = packet_size / 4 - 2;
header.type = PM4_TYPE_3;
return header.u32All;
@@ -55,15 +55,27 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
bool *over_subscription)
{
- unsigned int process_count, queue_count;
+ unsigned int process_count, queue_count, compute_queue_count;
unsigned int map_queue_size;
+ unsigned int max_proc_per_quantum = 1;
+ struct kfd_dev *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->queue_count;
+ compute_queue_count = queue_count - pm->dqm->sdma_queue_count;
- /* check if there is over subscription*/
+ /* check if there is over subscription
+ * Note: the arbitration between the number of VMIDs and
+ * hws_max_conc_proc has been done in
+ * kgd2kfd_device_init().
+ */
*over_subscription = false;
- if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
+
+ if (dev->max_proc_per_quantum > 1)
+ max_proc_per_quantum = dev->max_proc_per_quantum;
+
+ if ((process_count > max_proc_per_quantum) ||
+ compute_queue_count > get_queues_num(pm->dqm)) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
@@ -116,10 +128,24 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
struct pm4_mes_runlist *packet;
+ int concurrent_proc_cnt = 0;
+ struct kfd_dev *kfd = pm->dqm->dev;
if (WARN_ON(!ib))
return -EFAULT;
+ /* Determine the number of processes to map together to HW:
+ * it can not exceed the number of VMIDs available to the
+ * scheduler, and it is determined by the smaller of the number
+ * of processes in the runlist and kfd module parameter
+ * hws_max_conc_proc.
+ * Note: the arbitration between the number of VMIDs and
+ * hws_max_conc_proc has been done in
+ * kgd2kfd_device_init().
+ */
+ concurrent_proc_cnt = min(pm->dqm->processes_count,
+ kfd->max_proc_per_quantum);
+
packet = (struct pm4_mes_runlist *)buffer;
memset(buffer, 0, sizeof(struct pm4_mes_runlist));
@@ -130,6 +156,7 @@ static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields4.chain = chain ? 1 : 0;
packet->bitfields4.offload_polling = 0;
packet->bitfields4.valid = 1;
+ packet->bitfields4.process_cnt = concurrent_proc_cnt;
packet->ordinal2 = lower_32_bits(ib);
packet->bitfields3.ib_base_hi = upper_32_bits(ib);
@@ -251,6 +278,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
return retval;
*rl_size_bytes = alloc_size_bytes;
+ pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n",
pm->dqm->processes_count, pm->dqm->queue_count);
@@ -564,3 +592,26 @@ void pm_release_ib(struct packet_manager *pm)
}
mutex_unlock(&pm->lock);
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int pm_debugfs_runlist(struct seq_file *m, void *data)
+{
+ struct packet_manager *pm = data;
+
+ mutex_lock(&pm->lock);
+
+ if (!pm->allocated) {
+ seq_puts(m, " No active runlist\n");
+ goto out;
+ }
+
+ seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
+ pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
+
+out:
+ mutex_unlock(&pm->lock);
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index d6a796144269..15fff4420e53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -59,7 +59,7 @@ unsigned int kfd_pasid_alloc(void)
struct kfd_dev *dev = NULL;
unsigned int i = 0;
- while ((dev = kfd_topology_enum_kfd_devices(i)) != NULL) {
+ while ((kfd_topology_enum_kfd_devices(i, &dev)) == 0) {
if (dev && dev->kfd2kgd) {
kfd2kgd = dev->kfd2kgd;
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 9e4134c5b481..6a48d29ada47 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -33,6 +33,8 @@
#include <linux/kfd_ioctl.h>
#include <linux/idr.h>
#include <linux/kfifo.h>
+#include <linux/seq_file.h>
+#include <linux/kref.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
@@ -41,6 +43,7 @@
#define KFD_MMAP_DOORBELL_MASK 0x8000000000000
#define KFD_MMAP_EVENTS_MASK 0x4000000000000
+#define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000
/*
* When working with cp scheduler we should assign the HIQ manually or via
@@ -63,6 +66,15 @@
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
+ * Size of the per-process TBA+TMA buffer: 2 pages
+ *
+ * The first page is the TBA used for the CWSR ISA code. The second
+ * page is used as TMA for daisy changing a user-mode trap handler.
+ */
+#define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
+#define KFD_CWSR_TMA_OFFSET PAGE_SIZE
+
+/*
* Kernel module parameter to specify maximum number of supported queues per
* device
*/
@@ -79,11 +91,25 @@ extern int max_num_of_queues_per_device;
extern int sched_policy;
/*
+ * Kernel module parameter to specify the maximum process
+ * number per HW scheduler
+ */
+extern int hws_max_conc_proc;
+
+extern int cwsr_enable;
+
+/*
* Kernel module parameter to specify whether to send sigterm to HSA process on
* unhandled exception
*/
extern int send_sigterm;
+/*
+ * Ignore CRAT table during KFD initialization, can be used to work around
+ * broken CRAT tables on some AMD systems
+ */
+extern int ignore_crat;
+
/**
* enum kfd_sched_policy
*
@@ -131,6 +157,7 @@ struct kfd_device_info {
size_t ih_ring_entry_size;
uint8_t num_of_watch_points;
uint16_t mqd_size_aligned;
+ bool supports_cwsr;
};
struct kfd_mem_obj {
@@ -200,6 +227,14 @@ struct kfd_dev {
/* Debug manager */
struct kfd_dbgmgr *dbgmgr;
+
+ /* Maximum process number mapped to HW scheduler */
+ unsigned int max_proc_per_quantum;
+
+ /* CWSR */
+ bool cwsr_enabled;
+ const void *cwsr_isa;
+ unsigned int cwsr_isa_size;
};
/* KGD2KFD callbacks */
@@ -332,6 +367,9 @@ struct queue_properties {
uint32_t eop_ring_buffer_size;
uint64_t ctx_save_restore_area_address;
uint32_t ctx_save_restore_area_size;
+ uint32_t ctl_stack_size;
+ uint64_t tba_addr;
+ uint64_t tma_addr;
};
/**
@@ -439,6 +477,11 @@ struct qcm_process_device {
uint32_t num_gws;
uint32_t num_oac;
uint32_t sh_hidden_private_base;
+
+ /* CWSR memory */
+ void *cwsr_kaddr;
+ uint64_t tba_addr;
+ uint64_t tma_addr;
};
@@ -501,6 +544,9 @@ struct kfd_process {
*/
void *mm;
+ struct kref ref;
+ struct work_struct release_work;
+
struct mutex mutex;
/*
@@ -563,9 +609,10 @@ struct amdkfd_ioctl_desc {
void kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
-struct kfd_process *kfd_create_process(const struct task_struct *);
+struct kfd_process *kfd_create_process(struct file *filep);
struct kfd_process *kfd_get_process(const struct task_struct *);
struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
+void kfd_unref_process(struct kfd_process *p);
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p);
@@ -577,6 +624,9 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
+int kfd_reserved_mem_mmap(struct kfd_process *process,
+ struct vm_area_struct *vma);
+
/* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(
struct kfd_process *p);
@@ -624,9 +674,12 @@ int kfd_topology_init(void);
void kfd_topology_shutdown(void);
int kfd_topology_add_device(struct kfd_dev *gpu);
int kfd_topology_remove_device(struct kfd_dev *gpu);
+struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+ uint32_t proximity_domain);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
-struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
+int kfd_numa_node_to_apic_id(int numa_node_id);
/* Interrupts */
int kfd_interrupt_init(struct kfd_dev *dev);
@@ -643,8 +696,6 @@ int kgd2kfd_resume(struct kfd_dev *kfd);
int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
-struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
-
int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
@@ -699,6 +750,7 @@ struct packet_manager {
struct mutex lock;
bool allocated;
struct kfd_mem_obj *ib_buffer_obj;
+ unsigned int ib_size_bytes;
};
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
@@ -745,4 +797,23 @@ int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+/* Debugfs */
+#if defined(CONFIG_DEBUG_FS)
+
+void kfd_debugfs_init(void);
+void kfd_debugfs_fini(void);
+int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
+int pqm_debugfs_mqds(struct seq_file *m, void *data);
+int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
+int dqm_debugfs_hqds(struct seq_file *m, void *data);
+int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
+int pm_debugfs_runlist(struct seq_file *m, void *data);
+
+#else
+
+static inline void kfd_debugfs_init(void) {}
+static inline void kfd_debugfs_fini(void) {}
+
+#endif
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1f5ccd28bd41..a22fb0710f15 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -24,10 +24,12 @@
#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/compat.h>
+#include <linux/mman.h>
struct mm_struct;
@@ -46,13 +48,12 @@ DEFINE_STATIC_SRCU(kfd_processes_srcu);
static struct workqueue_struct *kfd_process_wq;
-struct kfd_process_release_work {
- struct work_struct kfd_work;
- struct kfd_process *p;
-};
-
static struct kfd_process *find_process(const struct task_struct *thread);
-static struct kfd_process *create_process(const struct task_struct *thread);
+static void kfd_process_ref_release(struct kref *ref);
+static struct kfd_process *create_process(const struct task_struct *thread,
+ struct file *filep);
+static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep);
+
void kfd_process_create_wq(void)
{
@@ -68,9 +69,10 @@ void kfd_process_destroy_wq(void)
}
}
-struct kfd_process *kfd_create_process(const struct task_struct *thread)
+struct kfd_process *kfd_create_process(struct file *filep)
{
struct kfd_process *process;
+ struct task_struct *thread = current;
if (!thread->mm)
return ERR_PTR(-EINVAL);
@@ -79,9 +81,6 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
if (thread->group_leader->mm != thread->mm)
return ERR_PTR(-EINVAL);
- /* Take mmap_sem because we call __mmu_notifier_register inside */
- down_write(&thread->mm->mmap_sem);
-
/*
* take kfd processes mutex before starting of process creation
* so there won't be a case where two threads of the same process
@@ -93,14 +92,11 @@ struct kfd_process *kfd_create_process(const struct task_struct *thread)
process = find_process(thread);
if (process)
pr_debug("Process already found\n");
-
- if (!process)
- process = create_process(thread);
+ else
+ process = create_process(thread, filep);
mutex_unlock(&kfd_processes_mutex);
- up_write(&thread->mm->mmap_sem);
-
return process;
}
@@ -144,63 +140,75 @@ static struct kfd_process *find_process(const struct task_struct *thread)
return p;
}
-static void kfd_process_wq_release(struct work_struct *work)
+void kfd_unref_process(struct kfd_process *p)
+{
+ kref_put(&p->ref, kfd_process_ref_release);
+}
+
+static void kfd_process_destroy_pdds(struct kfd_process *p)
{
- struct kfd_process_release_work *my_work;
struct kfd_process_device *pdd, *temp;
- struct kfd_process *p;
- my_work = (struct kfd_process_release_work *) work;
+ list_for_each_entry_safe(pdd, temp, &p->per_device_data,
+ per_device_list) {
+ pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pdd->dev->id, p->pasid);
- p = my_work->p;
+ list_del(&pdd->per_device_list);
- pr_debug("Releasing process (pasid %d) in workqueue\n",
- p->pasid);
+ if (pdd->qpd.cwsr_kaddr)
+ free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
+ get_order(KFD_CWSR_TBA_TMA_SIZE));
- mutex_lock(&p->mutex);
+ kfree(pdd);
+ }
+}
- list_for_each_entry_safe(pdd, temp, &p->per_device_data,
- per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d) in workqueue\n",
- pdd->dev->id, p->pasid);
+/* No process locking is needed in this function, because the process
+ * is not findable any more. We must assume that no other thread is
+ * using it any more, otherwise we couldn't safely free the process
+ * structure in the end.
+ */
+static void kfd_process_wq_release(struct work_struct *work)
+{
+ struct kfd_process *p = container_of(work, struct kfd_process,
+ release_work);
+ struct kfd_process_device *pdd;
+
+ pr_debug("Releasing process (pasid %d) in workqueue\n", p->pasid);
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
if (pdd->bound == PDD_BOUND)
amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
-
- list_del(&pdd->per_device_list);
- kfree(pdd);
}
+ kfd_process_destroy_pdds(p);
+
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
kfd_free_process_doorbells(p);
- mutex_unlock(&p->mutex);
-
mutex_destroy(&p->mutex);
- kfree(p);
+ put_task_struct(p->lead_thread);
- kfree(work);
+ kfree(p);
}
-static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+static void kfd_process_ref_release(struct kref *ref)
{
- struct kfd_process_release_work *work;
- struct kfd_process *p;
+ struct kfd_process *p = container_of(ref, struct kfd_process, ref);
- p = container_of(rcu, struct kfd_process, rcu);
-
- mmdrop(p->mm);
+ INIT_WORK(&p->release_work, kfd_process_wq_release);
+ queue_work(kfd_process_wq, &p->release_work);
+}
- work = kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
+static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+{
+ struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
- if (work) {
- INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
- work->p = p;
- queue_work(kfd_process_wq, (struct work_struct *) work);
- }
+ kfd_unref_process(p);
}
static void kfd_process_notifier_release(struct mmu_notifier *mn,
@@ -244,15 +252,12 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
kfd_process_dequeue_from_all_devices(p);
pqm_uninit(&p->pqm);
+ /* Indicate to other users that MM is no longer valid */
+ p->mm = NULL;
+
mutex_unlock(&p->mutex);
- /*
- * Because we drop mm_count inside kfd_process_destroy_delayed
- * and because the mmu_notifier_unregister function also drop
- * mm_count we need to take an extra count here.
- */
- mmgrab(p->mm);
- mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
+ mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
}
@@ -260,7 +265,44 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.release = kfd_process_notifier_release,
};
-static struct kfd_process *create_process(const struct task_struct *thread)
+static int kfd_process_init_cwsr(struct kfd_process *p, struct file *filep)
+{
+ unsigned long offset;
+ struct kfd_process_device *pdd = NULL;
+ struct kfd_dev *dev = NULL;
+ struct qcm_process_device *qpd = NULL;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ dev = pdd->dev;
+ qpd = &pdd->qpd;
+ if (!dev->cwsr_enabled || qpd->cwsr_kaddr)
+ continue;
+ offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT;
+ qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
+ KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
+ MAP_SHARED, offset);
+
+ if (IS_ERR_VALUE(qpd->tba_addr)) {
+ int err = qpd->tba_addr;
+
+ pr_err("Failure to set tba address. error %d.\n", err);
+ qpd->tba_addr = 0;
+ qpd->cwsr_kaddr = NULL;
+ return err;
+ }
+
+ memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);
+
+ qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
+ pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
+ qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
+ }
+
+ return 0;
+}
+
+static struct kfd_process *create_process(const struct task_struct *thread,
+ struct file *filep)
{
struct kfd_process *process;
int err = -ENOMEM;
@@ -277,13 +319,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (kfd_alloc_process_doorbells(process) < 0)
goto err_alloc_doorbells;
+ kref_init(&process->ref);
+
mutex_init(&process->mutex);
process->mm = thread->mm;
/* register notifier */
process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
- err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
+ err = mmu_notifier_register(&process->mmu_notifier, process->mm);
if (err)
goto err_mmu_notifier;
@@ -291,6 +335,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
(uintptr_t)process->mm);
process->lead_thread = thread->group_leader;
+ get_task_struct(process->lead_thread);
INIT_LIST_HEAD(&process->per_device_data);
@@ -306,8 +351,14 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_init_apertures;
+ err = kfd_process_init_cwsr(process, filep);
+ if (err)
+ goto err_init_cwsr;
+
return process;
+err_init_cwsr:
+ kfd_process_destroy_pdds(process);
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
@@ -343,16 +394,18 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process_device *pdd = NULL;
pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
- if (pdd != NULL) {
- pdd->dev = dev;
- INIT_LIST_HEAD(&pdd->qpd.queues_list);
- INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
- pdd->qpd.dqm = dev->dqm;
- pdd->process = p;
- pdd->bound = PDD_UNBOUND;
- pdd->already_dequeued = false;
- list_add(&pdd->per_device_list, &p->per_device_data);
- }
+ if (!pdd)
+ return NULL;
+
+ pdd->dev = dev;
+ INIT_LIST_HEAD(&pdd->qpd.queues_list);
+ INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
+ pdd->qpd.dqm = dev->dqm;
+ pdd->qpd.pqm = &p->pqm;
+ pdd->process = p;
+ pdd->bound = PDD_UNBOUND;
+ pdd->already_dequeued = false;
+ list_add(&pdd->per_device_list, &p->per_device_data);
return pdd;
}
@@ -483,6 +536,8 @@ void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
mutex_unlock(kfd_get_dbgmgr_mutex());
+ mutex_lock(&p->mutex);
+
pdd = kfd_get_process_device_data(dev, p);
if (pdd)
/* For GPU relying on IOMMU, we need to dequeue here
@@ -491,6 +546,8 @@ void kfd_process_iommu_unbind_callback(struct kfd_dev *dev, unsigned int pasid)
kfd_process_dequeue_from_device(pdd);
mutex_unlock(&p->mutex);
+
+ kfd_unref_process(p);
}
struct kfd_process_device *kfd_get_first_process_device_data(
@@ -515,22 +572,86 @@ bool kfd_has_process_device_data(struct kfd_process *p)
return !(list_empty(&p->per_device_data));
}
-/* This returns with process->mutex locked. */
+/* This increments the process->ref counter. */
struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
{
- struct kfd_process *p;
+ struct kfd_process *p, *ret_p = NULL;
unsigned int temp;
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
if (p->pasid == pasid) {
- mutex_lock(&p->mutex);
+ kref_get(&p->ref);
+ ret_p = p;
break;
}
}
srcu_read_unlock(&kfd_processes_srcu, idx);
- return p;
+ return ret_p;
}
+
+int kfd_reserved_mem_mmap(struct kfd_process *process,
+ struct vm_area_struct *vma)
+{
+ struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff);
+ struct kfd_process_device *pdd;
+ struct qcm_process_device *qpd;
+
+ if (!dev)
+ return -EINVAL;
+ if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
+ pr_err("Incorrect CWSR mapping size.\n");
+ return -EINVAL;
+ }
+
+ pdd = kfd_get_process_device_data(dev, process);
+ if (!pdd)
+ return -EINVAL;
+ qpd = &pdd->qpd;
+
+ qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(KFD_CWSR_TBA_TMA_SIZE));
+ if (!qpd->cwsr_kaddr) {
+ pr_err("Error allocating per process CWSR buffer.\n");
+ return -ENOMEM;
+ }
+
+ vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+ | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+ /* Mapping pages to user process */
+ return remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(__pa(qpd->cwsr_kaddr)),
+ KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
+{
+ struct kfd_process *p;
+ unsigned int temp;
+ int r = 0;
+
+ int idx = srcu_read_lock(&kfd_processes_srcu);
+
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ seq_printf(m, "Process %d PASID %d:\n",
+ p->lead_thread->tgid, p->pasid);
+
+ mutex_lock(&p->mutex);
+ r = pqm_debugfs_mqds(m, &p->pqm);
+ mutex_unlock(&p->mutex);
+
+ if (r)
+ break;
+ }
+
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index a3f1e62c60ba..876380632668 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -178,10 +178,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
return retval;
if (list_empty(&pdd->qpd.queues_list) &&
- list_empty(&pdd->qpd.priv_queue_list)) {
- pdd->qpd.pqm = pqm;
+ list_empty(&pdd->qpd.priv_queue_list))
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
- }
pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
if (!pqn) {
@@ -203,8 +201,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
- retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
- &q->properties.vmid);
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -224,8 +221,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
- retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
- &q->properties.vmid);
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -315,6 +311,10 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
+ if (retval) {
+ pr_debug("Destroy queue failed, returned %d\n", retval);
+ goto err_destroy_queue;
+ }
uninit_queue(pqn->q);
}
@@ -326,6 +326,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
list_empty(&pdd->qpd.priv_queue_list))
dqm->ops.unregister_process(dqm, &pdd->qpd);
+err_destroy_queue:
return retval;
}
@@ -367,4 +368,67 @@ struct kernel_queue *pqm_get_kernel_queue(
return NULL;
}
+#if defined(CONFIG_DEBUG_FS)
+
+int pqm_debugfs_mqds(struct seq_file *m, void *data)
+{
+ struct process_queue_manager *pqm = data;
+ struct process_queue_node *pqn;
+ struct queue *q;
+ enum KFD_MQD_TYPE mqd_type;
+ struct mqd_manager *mqd_manager;
+ int r = 0;
+
+ list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
+ if (pqn->q) {
+ q = pqn->q;
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_SDMA:
+ seq_printf(m, " SDMA queue on device %x\n",
+ q->device->id);
+ mqd_type = KFD_MQD_TYPE_SDMA;
+ break;
+ case KFD_QUEUE_TYPE_COMPUTE:
+ seq_printf(m, " Compute queue on device %x\n",
+ q->device->id);
+ mqd_type = KFD_MQD_TYPE_CP;
+ break;
+ default:
+ seq_printf(m,
+ " Bad user queue type %d on device %x\n",
+ q->properties.type, q->device->id);
+ continue;
+ }
+ mqd_manager = q->device->dqm->ops.get_mqd_manager(
+ q->device->dqm, mqd_type);
+ } else if (pqn->kq) {
+ q = pqn->kq->queue;
+ mqd_manager = pqn->kq->mqd;
+ switch (q->properties.type) {
+ case KFD_QUEUE_TYPE_DIQ:
+ seq_printf(m, " DIQ on device %x\n",
+ pqn->kq->dev->id);
+ mqd_type = KFD_MQD_TYPE_HIQ;
+ break;
+ default:
+ seq_printf(m,
+ " Bad kernel queue type %d on device %x\n",
+ q->properties.type,
+ pqn->kq->dev->id);
+ continue;
+ }
+ } else {
+ seq_printf(m,
+ " Weird: Queue node with neither kernel nor user queue\n");
+ continue;
+ }
+
+ r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+ if (r != 0)
+ break;
+ }
+
+ return r;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 19ce59028d6b..c6a76090a725 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -28,27 +28,32 @@
#include <linux/hash.h>
#include <linux/cpufreq.h>
#include <linux/log2.h>
+#include <linux/dmi.h>
+#include <linux/atomic.h>
#include "kfd_priv.h"
#include "kfd_crat.h"
#include "kfd_topology.h"
+#include "kfd_device_queue_manager.h"
+/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
-static int topology_crat_parsed;
static struct kfd_system_properties sys_props;
static DECLARE_RWSEM(topology_lock);
+static atomic_t topology_crat_proximity_domain;
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
+ uint32_t proximity_domain)
{
struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
+ struct kfd_topology_device *device = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu_id == gpu_id) {
- device = top_dev->gpu;
+ if (top_dev->proximity_domain == proximity_domain) {
+ device = top_dev;
break;
}
@@ -57,7 +62,7 @@ struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
return device;
}
-struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
{
struct kfd_topology_device *top_dev;
struct kfd_dev *device = NULL;
@@ -65,7 +70,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu->pdev == pdev) {
+ if (top_dev->gpu_id == gpu_id) {
device = top_dev->gpu;
break;
}
@@ -75,282 +80,31 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
return device;
}
-static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
-{
- struct acpi_table_header *crat_table;
- acpi_status status;
-
- if (!size)
- return -EINVAL;
-
- /*
- * Fetch the CRAT table from ACPI
- */
- status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
- if (status == AE_NOT_FOUND) {
- pr_warn("CRAT table not found\n");
- return -ENODATA;
- } else if (ACPI_FAILURE(status)) {
- const char *err = acpi_format_exception(status);
-
- pr_err("CRAT table error: %s\n", err);
- return -EINVAL;
- }
-
- if (*size >= crat_table->length && crat_image != NULL)
- memcpy(crat_image, crat_table, crat_table->length);
-
- *size = crat_table->length;
-
- return 0;
-}
-
-static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
- struct crat_subtype_computeunit *cu)
-{
- dev->node_props.cpu_cores_count = cu->num_cpu_cores;
- dev->node_props.cpu_core_id_base = cu->processor_id_low;
- if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
- dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
-
- pr_info("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
- cu->processor_id_low);
-}
-
-static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
- struct crat_subtype_computeunit *cu)
-{
- dev->node_props.simd_id_base = cu->processor_id_low;
- dev->node_props.simd_count = cu->num_simd_cores;
- dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
- dev->node_props.max_waves_per_simd = cu->max_waves_simd;
- dev->node_props.wave_front_size = cu->wave_front_size;
- dev->node_props.mem_banks_count = cu->num_banks;
- dev->node_props.array_count = cu->num_arrays;
- dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
- dev->node_props.simd_per_cu = cu->num_simd_per_cu;
- dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
- if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
- dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
- pr_info("CU GPU: simds=%d id_base=%d\n", cu->num_simd_cores,
- cu->processor_id_low);
-}
-
-/* kfd_parse_subtype_cu is called when the topology mutex is already acquired */
-static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
-{
- struct kfd_topology_device *dev;
- int i = 0;
-
- pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
- cu->proximity_domain, cu->hsa_capability);
- list_for_each_entry(dev, &topology_device_list, list) {
- if (cu->proximity_domain == i) {
- if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
- kfd_populated_cu_info_cpu(dev, cu);
-
- if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
- kfd_populated_cu_info_gpu(dev, cu);
- break;
- }
- i++;
- }
-
- return 0;
-}
-
-/*
- * kfd_parse_subtype_mem is called when the topology mutex is
- * already acquired
- */
-static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
-{
- struct kfd_mem_properties *props;
- struct kfd_topology_device *dev;
- int i = 0;
-
- pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
- mem->promixity_domain);
- list_for_each_entry(dev, &topology_device_list, list) {
- if (mem->promixity_domain == i) {
- props = kfd_alloc_struct(props);
- if (props == NULL)
- return -ENOMEM;
-
- if (dev->node_props.cpu_cores_count == 0)
- props->heap_type = HSA_MEM_HEAP_TYPE_FB_PRIVATE;
- else
- props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
-
- if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
- props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
- if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
- props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
-
- props->size_in_bytes =
- ((uint64_t)mem->length_high << 32) +
- mem->length_low;
- props->width = mem->width;
-
- dev->mem_bank_count++;
- list_add_tail(&props->list, &dev->mem_props);
-
- break;
- }
- i++;
- }
-
- return 0;
-}
-
-/*
- * kfd_parse_subtype_cache is called when the topology mutex
- * is already acquired
- */
-static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
-{
- struct kfd_cache_properties *props;
- struct kfd_topology_device *dev;
- uint32_t id;
-
- id = cache->processor_id_low;
-
- pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
- list_for_each_entry(dev, &topology_device_list, list)
- if (id == dev->node_props.cpu_core_id_base ||
- id == dev->node_props.simd_id_base) {
- props = kfd_alloc_struct(props);
- if (props == NULL)
- return -ENOMEM;
-
- props->processor_id_low = id;
- props->cache_level = cache->cache_level;
- props->cache_size = cache->cache_size;
- props->cacheline_size = cache->cache_line_size;
- props->cachelines_per_tag = cache->lines_per_tag;
- props->cache_assoc = cache->associativity;
- props->cache_latency = cache->cache_latency;
-
- if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_DATA;
- if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
- if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_CPU;
- if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
- props->cache_type |= HSA_CACHE_TYPE_HSACU;
-
- dev->cache_count++;
- dev->node_props.caches_count++;
- list_add_tail(&props->list, &dev->cache_props);
-
- break;
- }
-
- return 0;
-}
-
-/*
- * kfd_parse_subtype_iolink is called when the topology mutex
- * is already acquired
- */
-static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
+struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
{
- struct kfd_iolink_properties *props;
- struct kfd_topology_device *dev;
- uint32_t i = 0;
- uint32_t id_from;
- uint32_t id_to;
-
- id_from = iolink->proximity_domain_from;
- id_to = iolink->proximity_domain_to;
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
- pr_info("Found IO link entry in CRAT table with id_from=%d\n", id_from);
- list_for_each_entry(dev, &topology_device_list, list) {
- if (id_from == i) {
- props = kfd_alloc_struct(props);
- if (props == NULL)
- return -ENOMEM;
-
- props->node_from = id_from;
- props->node_to = id_to;
- props->ver_maj = iolink->version_major;
- props->ver_min = iolink->version_minor;
-
- /*
- * weight factor (derived from CDIR), currently always 1
- */
- props->weight = 1;
-
- props->min_latency = iolink->minimum_latency;
- props->max_latency = iolink->maximum_latency;
- props->min_bandwidth = iolink->minimum_bandwidth_mbs;
- props->max_bandwidth = iolink->maximum_bandwidth_mbs;
- props->rec_transfer_size =
- iolink->recommended_transfer_size;
-
- dev->io_link_count++;
- dev->node_props.io_links_count++;
- list_add_tail(&props->list, &dev->io_link_props);
+ down_read(&topology_lock);
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
break;
}
- i++;
- }
- return 0;
-}
-
-static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
-{
- struct crat_subtype_computeunit *cu;
- struct crat_subtype_memory *mem;
- struct crat_subtype_cache *cache;
- struct crat_subtype_iolink *iolink;
- int ret = 0;
-
- switch (sub_type_hdr->type) {
- case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
- cu = (struct crat_subtype_computeunit *)sub_type_hdr;
- ret = kfd_parse_subtype_cu(cu);
- break;
- case CRAT_SUBTYPE_MEMORY_AFFINITY:
- mem = (struct crat_subtype_memory *)sub_type_hdr;
- ret = kfd_parse_subtype_mem(mem);
- break;
- case CRAT_SUBTYPE_CACHE_AFFINITY:
- cache = (struct crat_subtype_cache *)sub_type_hdr;
- ret = kfd_parse_subtype_cache(cache);
- break;
- case CRAT_SUBTYPE_TLB_AFFINITY:
- /*
- * For now, nothing to do here
- */
- pr_info("Found TLB entry in CRAT table (not processing)\n");
- break;
- case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
- /*
- * For now, nothing to do here
- */
- pr_info("Found CCOMPUTE entry in CRAT table (not processing)\n");
- break;
- case CRAT_SUBTYPE_IOLINK_AFFINITY:
- iolink = (struct crat_subtype_iolink *)sub_type_hdr;
- ret = kfd_parse_subtype_iolink(iolink);
- break;
- default:
- pr_warn("Unknown subtype (%d) in CRAT\n",
- sub_type_hdr->type);
- }
+ up_read(&topology_lock);
- return ret;
+ return device;
}
+/* Called with write topology_lock acquired */
static void kfd_release_topology_device(struct kfd_topology_device *dev)
{
struct kfd_mem_properties *mem;
struct kfd_cache_properties *cache;
struct kfd_iolink_properties *iolink;
+ struct kfd_perf_properties *perf;
list_del(&dev->list);
@@ -375,25 +129,35 @@ static void kfd_release_topology_device(struct kfd_topology_device *dev)
kfree(iolink);
}
- kfree(dev);
+ while (dev->perf_props.next != &dev->perf_props) {
+ perf = container_of(dev->perf_props.next,
+ struct kfd_perf_properties, list);
+ list_del(&perf->list);
+ kfree(perf);
+ }
- sys_props.num_devices--;
+ kfree(dev);
}
-static void kfd_release_live_view(void)
+void kfd_release_topology_device_list(struct list_head *device_list)
{
struct kfd_topology_device *dev;
- while (topology_device_list.next != &topology_device_list) {
- dev = container_of(topology_device_list.next,
- struct kfd_topology_device, list);
+ while (!list_empty(device_list)) {
+ dev = list_first_entry(device_list,
+ struct kfd_topology_device, list);
kfd_release_topology_device(dev);
+ }
}
+static void kfd_release_live_view(void)
+{
+ kfd_release_topology_device_list(&topology_device_list);
memset(&sys_props, 0, sizeof(sys_props));
}
-static struct kfd_topology_device *kfd_create_topology_device(void)
+struct kfd_topology_device *kfd_create_topology_device(
+ struct list_head *device_list)
{
struct kfd_topology_device *dev;
@@ -406,65 +170,13 @@ static struct kfd_topology_device *kfd_create_topology_device(void)
INIT_LIST_HEAD(&dev->mem_props);
INIT_LIST_HEAD(&dev->cache_props);
INIT_LIST_HEAD(&dev->io_link_props);
+ INIT_LIST_HEAD(&dev->perf_props);
- list_add_tail(&dev->list, &topology_device_list);
- sys_props.num_devices++;
+ list_add_tail(&dev->list, device_list);
return dev;
}
-static int kfd_parse_crat_table(void *crat_image)
-{
- struct kfd_topology_device *top_dev;
- struct crat_subtype_generic *sub_type_hdr;
- uint16_t node_id;
- int ret;
- struct crat_header *crat_table = (struct crat_header *)crat_image;
- uint16_t num_nodes;
- uint32_t image_len;
-
- if (!crat_image)
- return -EINVAL;
-
- num_nodes = crat_table->num_domains;
- image_len = crat_table->length;
-
- pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
-
- for (node_id = 0; node_id < num_nodes; node_id++) {
- top_dev = kfd_create_topology_device();
- if (!top_dev) {
- kfd_release_live_view();
- return -ENOMEM;
- }
- }
-
- sys_props.platform_id =
- (*((uint64_t *)crat_table->oem_id)) & CRAT_OEMID_64BIT_MASK;
- sys_props.platform_oem = *((uint64_t *)crat_table->oem_table_id);
- sys_props.platform_rev = crat_table->revision;
-
- sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
- while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
- ((char *)crat_image) + image_len) {
- if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
- ret = kfd_parse_subtype(sub_type_hdr);
- if (ret != 0) {
- kfd_release_live_view();
- return ret;
- }
- }
-
- sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
- sub_type_hdr->length);
- }
-
- sys_props.generation_count++;
- topology_crat_parsed = 1;
-
- return 0;
-}
-
#define sysfs_show_gen_prop(buffer, fmt, ...) \
snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
@@ -501,11 +213,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
return ret;
}
+static void kfd_topology_kobj_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
static const struct sysfs_ops sysprops_ops = {
.show = sysprops_show,
};
static struct kobj_type sysprops_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &sysprops_ops,
};
@@ -541,6 +259,7 @@ static const struct sysfs_ops iolink_ops = {
};
static struct kobj_type iolink_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &iolink_ops,
};
@@ -568,6 +287,7 @@ static const struct sysfs_ops mem_ops = {
};
static struct kobj_type mem_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &mem_ops,
};
@@ -575,7 +295,7 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
ssize_t ret;
- uint32_t i;
+ uint32_t i, j;
struct kfd_cache_properties *cache;
/* Making sure that the buffer is an empty string */
@@ -593,12 +313,18 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency);
sysfs_show_32bit_prop(buffer, "type", cache->cache_type);
snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer);
- for (i = 0; i < KFD_TOPOLOGY_CPU_SIBLINGS; i++)
- ret = snprintf(buffer, PAGE_SIZE, "%s%d%s",
- buffer, cache->sibling_map[i],
- (i == KFD_TOPOLOGY_CPU_SIBLINGS-1) ?
- "\n" : ",");
-
+ for (i = 0; i < CRAT_SIBLINGMAP_SIZE; i++)
+ for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++) {
+ /* Check each bit */
+ if (cache->sibling_map[i] & (1 << j))
+ ret = snprintf(buffer, PAGE_SIZE,
+ "%s%d%s", buffer, 1, ",");
+ else
+ ret = snprintf(buffer, PAGE_SIZE,
+ "%s%d%s", buffer, 0, ",");
+ }
+ /* Replace the last "," with end of line */
+ *(buffer + strlen(buffer) - 1) = 0xA;
return ret;
}
@@ -607,9 +333,43 @@ static const struct sysfs_ops cache_ops = {
};
static struct kobj_type cache_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &cache_ops,
};
+/****** Sysfs of Performance Counters ******/
+
+struct kfd_perf_attr {
+ struct kobj_attribute attr;
+ uint32_t data;
+};
+
+static ssize_t perf_show(struct kobject *kobj, struct kobj_attribute *attrs,
+ char *buf)
+{
+ struct kfd_perf_attr *attr;
+
+ buf[0] = 0;
+ attr = container_of(attrs, struct kfd_perf_attr, attr);
+ if (!attr->data) /* invalid data for PMC */
+ return 0;
+ else
+ return sysfs_show_32bit_val(buf, attr->data);
+}
+
+#define KFD_PERF_DESC(_name, _data) \
+{ \
+ .attr = __ATTR(_name, 0444, perf_show, NULL), \
+ .data = _data, \
+}
+
+static struct kfd_perf_attr perf_attr_iommu[] = {
+ KFD_PERF_DESC(max_concurrent, 0),
+ KFD_PERF_DESC(num_counters, 0),
+ KFD_PERF_DESC(counter_ids, 0),
+};
+/****************************************/
+
static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
@@ -646,18 +406,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
dev->node_props.simd_count);
-
- if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
- pr_info_once("mem_banks_count truncated from %d to %d\n",
- dev->node_props.mem_banks_count,
- dev->mem_bank_count);
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->mem_bank_count);
- } else {
- sysfs_show_32bit_prop(buffer, "mem_banks_count",
- dev->node_props.mem_banks_count);
- }
-
+ sysfs_show_32bit_prop(buffer, "mem_banks_count",
+ dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, "caches_count",
dev->node_props.caches_count);
sysfs_show_32bit_prop(buffer, "io_links_count",
@@ -705,9 +455,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
}
+ if (dev->gpu->device_info->asic_family == CHIP_TONGA)
+ dev->node_props.capability |=
+ HSA_CAP_AQL_QUEUE_DOUBLE_MAP;
+
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
- dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
- dev->gpu->kgd));
+ dev->node_props.max_engine_clk_fcompute);
sysfs_show_64bit_prop(buffer, "local_mem_size",
(unsigned long long int) 0);
@@ -729,6 +482,7 @@ static const struct sysfs_ops node_ops = {
};
static struct kobj_type node_type = {
+ .release = kfd_topology_kobj_release,
.sysfs_ops = &node_ops,
};
@@ -744,6 +498,7 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
struct kfd_iolink_properties *iolink;
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
+ struct kfd_perf_properties *perf;
if (dev->kobj_iolink) {
list_for_each_entry(iolink, &dev->io_link_props, list)
@@ -780,6 +535,16 @@ static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
dev->kobj_mem = NULL;
}
+ if (dev->kobj_perf) {
+ list_for_each_entry(perf, &dev->perf_props, list) {
+ kfree(perf->attr_group);
+ perf->attr_group = NULL;
+ }
+ kobject_del(dev->kobj_perf);
+ kobject_put(dev->kobj_perf);
+ dev->kobj_perf = NULL;
+ }
+
if (dev->kobj_node) {
sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
sysfs_remove_file(dev->kobj_node, &dev->attr_name);
@@ -796,8 +561,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
struct kfd_iolink_properties *iolink;
struct kfd_cache_properties *cache;
struct kfd_mem_properties *mem;
+ struct kfd_perf_properties *perf;
int ret;
- uint32_t i;
+ uint32_t i, num_attrs;
+ struct attribute **attrs;
if (WARN_ON(dev->kobj_node))
return -EEXIST;
@@ -826,6 +593,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
if (!dev->kobj_iolink)
return -ENOMEM;
+ dev->kobj_perf = kobject_create_and_add("perf", dev->kobj_node);
+ if (!dev->kobj_perf)
+ return -ENOMEM;
+
/*
* Creating sysfs files for node properties
*/
@@ -903,11 +674,38 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
if (ret < 0)
return ret;
i++;
-}
+ }
+
+ /* All hardware blocks have the same number of attributes. */
+ num_attrs = sizeof(perf_attr_iommu)/sizeof(struct kfd_perf_attr);
+ list_for_each_entry(perf, &dev->perf_props, list) {
+ perf->attr_group = kzalloc(sizeof(struct kfd_perf_attr)
+ * num_attrs + sizeof(struct attribute_group),
+ GFP_KERNEL);
+ if (!perf->attr_group)
+ return -ENOMEM;
+
+ attrs = (struct attribute **)(perf->attr_group + 1);
+ if (!strcmp(perf->block_name, "iommu")) {
+ /* Information of IOMMU's num_counters and counter_ids is shown
+ * under /sys/bus/event_source/devices/amd_iommu. We don't
+ * duplicate here.
+ */
+ perf_attr_iommu[0].data = perf->max_concurrent;
+ for (i = 0; i < num_attrs; i++)
+ attrs[i] = &perf_attr_iommu[i].attr.attr;
+ }
+ perf->attr_group->name = perf->block_name;
+ perf->attr_group->attrs = attrs;
+ ret = sysfs_create_group(dev->kobj_perf, perf->attr_group);
+ if (ret < 0)
+ return ret;
+ }
return 0;
}
+/* Called with write topology lock acquired */
static int kfd_build_sysfs_node_tree(void)
{
struct kfd_topology_device *dev;
@@ -924,6 +722,7 @@ static int kfd_build_sysfs_node_tree(void)
return 0;
}
+/* Called with write topology lock acquired */
static void kfd_remove_sysfs_node_tree(void)
{
struct kfd_topology_device *dev;
@@ -995,75 +794,246 @@ static void kfd_topology_release_sysfs(void)
}
}
+/* Called with write topology_lock acquired */
+static void kfd_topology_update_device_list(struct list_head *temp_list,
+ struct list_head *master_list)
+{
+ while (!list_empty(temp_list)) {
+ list_move_tail(temp_list->next, master_list);
+ sys_props.num_devices++;
+ }
+}
+
+static void kfd_debug_print_topology(void)
+{
+ struct kfd_topology_device *dev;
+
+ down_read(&topology_lock);
+
+ dev = list_last_entry(&topology_device_list,
+ struct kfd_topology_device, list);
+ if (dev) {
+ if (dev->node_props.cpu_cores_count &&
+ dev->node_props.simd_count) {
+ pr_info("Topology: Add APU node [0x%0x:0x%0x]\n",
+ dev->node_props.device_id,
+ dev->node_props.vendor_id);
+ } else if (dev->node_props.cpu_cores_count)
+ pr_info("Topology: Add CPU node\n");
+ else if (dev->node_props.simd_count)
+ pr_info("Topology: Add dGPU node [0x%0x:0x%0x]\n",
+ dev->node_props.device_id,
+ dev->node_props.vendor_id);
+ }
+ up_read(&topology_lock);
+}
+
+/* Helper function for intializing platform_xx members of
+ * kfd_system_properties. Uses OEM info from the last CPU/APU node.
+ */
+static void kfd_update_system_properties(void)
+{
+ struct kfd_topology_device *dev;
+
+ down_read(&topology_lock);
+ dev = list_last_entry(&topology_device_list,
+ struct kfd_topology_device, list);
+ if (dev) {
+ sys_props.platform_id =
+ (*((uint64_t *)dev->oem_id)) & CRAT_OEMID_64BIT_MASK;
+ sys_props.platform_oem = *((uint64_t *)dev->oem_table_id);
+ sys_props.platform_rev = dev->oem_revision;
+ }
+ up_read(&topology_lock);
+}
+
+static void find_system_memory(const struct dmi_header *dm,
+ void *private)
+{
+ struct kfd_mem_properties *mem;
+ u16 mem_width, mem_clock;
+ struct kfd_topology_device *kdev =
+ (struct kfd_topology_device *)private;
+ const u8 *dmi_data = (const u8 *)(dm + 1);
+
+ if (dm->type == DMI_ENTRY_MEM_DEVICE && dm->length >= 0x15) {
+ mem_width = (u16)(*(const u16 *)(dmi_data + 0x6));
+ mem_clock = (u16)(*(const u16 *)(dmi_data + 0x11));
+ list_for_each_entry(mem, &kdev->mem_props, list) {
+ if (mem_width != 0xFFFF && mem_width != 0)
+ mem->width = mem_width;
+ if (mem_clock != 0)
+ mem->mem_clk_max = mem_clock;
+ }
+ }
+}
+
+/*
+ * Performance counters information is not part of CRAT but we would like to
+ * put them in the sysfs under topology directory for Thunk to get the data.
+ * This function is called before updating the sysfs.
+ */
+static int kfd_add_perf_to_topology(struct kfd_topology_device *kdev)
+{
+ struct kfd_perf_properties *props;
+
+ if (amd_iommu_pc_supported()) {
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+ strcpy(props->block_name, "iommu");
+ props->max_concurrent = amd_iommu_pc_get_max_banks(0) *
+ amd_iommu_pc_get_max_counters(0); /* assume one iommu */
+ list_add_tail(&props->list, &kdev->perf_props);
+ }
+
+ return 0;
+}
+
+/* kfd_add_non_crat_information - Add information that is not currently
+ * defined in CRAT but is necessary for KFD topology
+ * @dev - topology device to which addition info is added
+ */
+static void kfd_add_non_crat_information(struct kfd_topology_device *kdev)
+{
+ /* Check if CPU only node. */
+ if (!kdev->gpu) {
+ /* Add system memory information */
+ dmi_walk(find_system_memory, kdev);
+ }
+ /* TODO: For GPU node, rearrange code from kfd_topology_add_device */
+}
+
+/* kfd_is_acpi_crat_invalid - CRAT from ACPI is valid only for AMD APU devices.
+ * Ignore CRAT for all other devices. AMD APU is identified if both CPU
+ * and GPU cores are present.
+ * @device_list - topology device list created by parsing ACPI CRAT table.
+ * @return - TRUE if invalid, FALSE is valid.
+ */
+static bool kfd_is_acpi_crat_invalid(struct list_head *device_list)
+{
+ struct kfd_topology_device *dev;
+
+ list_for_each_entry(dev, device_list, list) {
+ if (dev->node_props.cpu_cores_count &&
+ dev->node_props.simd_count)
+ return false;
+ }
+ pr_info("Ignoring ACPI CRAT on non-APU system\n");
+ return true;
+}
+
int kfd_topology_init(void)
{
void *crat_image = NULL;
size_t image_size = 0;
int ret;
-
- /*
- * Initialize the head for the topology device list
+ struct list_head temp_topology_device_list;
+ int cpu_only_node = 0;
+ struct kfd_topology_device *kdev;
+ int proximity_domain;
+
+ /* topology_device_list - Master list of all topology devices
+ * temp_topology_device_list - temporary list created while parsing CRAT
+ * or VCRAT. Once parsing is complete the contents of list is moved to
+ * topology_device_list
*/
+
+ /* Initialize the head for the both the lists */
INIT_LIST_HEAD(&topology_device_list);
+ INIT_LIST_HEAD(&temp_topology_device_list);
init_rwsem(&topology_lock);
- topology_crat_parsed = 0;
memset(&sys_props, 0, sizeof(sys_props));
+ /* Proximity domains in ACPI CRAT tables start counting at
+ * 0. The same should be true for virtual CRAT tables created
+ * at this stage. GPUs added later in kfd_topology_add_device
+ * use a counter.
+ */
+ proximity_domain = 0;
+
/*
- * Get the CRAT image from the ACPI
+ * Get the CRAT image from the ACPI. If ACPI doesn't have one
+ * or if ACPI CRAT is invalid create a virtual CRAT.
+ * NOTE: The current implementation expects all AMD APUs to have
+ * CRAT. If no CRAT is available, it is assumed to be a CPU
*/
- ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
- if (ret == 0 && image_size > 0) {
- pr_info("Found CRAT image with size=%zd\n", image_size);
- crat_image = kmalloc(image_size, GFP_KERNEL);
- if (!crat_image) {
- ret = -ENOMEM;
- pr_err("No memory for allocating CRAT image\n");
- goto err;
+ ret = kfd_create_crat_image_acpi(&crat_image, &image_size);
+ if (!ret) {
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret ||
+ kfd_is_acpi_crat_invalid(&temp_topology_device_list)) {
+ kfd_release_topology_device_list(
+ &temp_topology_device_list);
+ kfd_destroy_crat_image(crat_image);
+ crat_image = NULL;
}
- ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
-
- if (ret == 0) {
- down_write(&topology_lock);
- ret = kfd_parse_crat_table(crat_image);
- if (ret == 0)
- ret = kfd_topology_update_sysfs();
- up_write(&topology_lock);
- } else {
- pr_err("Couldn't get CRAT table size from ACPI\n");
+ }
+
+ if (!crat_image) {
+ ret = kfd_create_crat_image_virtual(&crat_image, &image_size,
+ COMPUTE_UNIT_CPU, NULL,
+ proximity_domain);
+ cpu_only_node = 1;
+ if (ret) {
+ pr_err("Error creating VCRAT table for CPU\n");
+ return ret;
}
- kfree(crat_image);
- } else if (ret == -ENODATA) {
- ret = 0;
- } else {
- pr_err("Couldn't get CRAT table size from ACPI\n");
+
+ ret = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (ret) {
+ pr_err("Error parsing VCRAT table for CPU\n");
+ goto err;
+ }
+ }
+
+ kdev = list_first_entry(&temp_topology_device_list,
+ struct kfd_topology_device, list);
+ kfd_add_perf_to_topology(kdev);
+
+ down_write(&topology_lock);
+ kfd_topology_update_device_list(&temp_topology_device_list,
+ &topology_device_list);
+ atomic_set(&topology_crat_proximity_domain, sys_props.num_devices-1);
+ ret = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+ if (!ret) {
+ sys_props.generation_count++;
+ kfd_update_system_properties();
+ kfd_debug_print_topology();
+ pr_info("Finished initializing topology\n");
+ } else
+ pr_err("Failed to update topology in sysfs ret=%d\n", ret);
+
+ /* For nodes with GPU, this information gets added
+ * when GPU is detected (kfd_topology_add_device).
+ */
+ if (cpu_only_node) {
+ /* Add additional information to CPU only node created above */
+ down_write(&topology_lock);
+ kdev = list_first_entry(&topology_device_list,
+ struct kfd_topology_device, list);
+ up_write(&topology_lock);
+ kfd_add_non_crat_information(kdev);
}
err:
- pr_info("Finished initializing topology ret=%d\n", ret);
+ kfd_destroy_crat_image(crat_image);
return ret;
}
void kfd_topology_shutdown(void)
{
+ down_write(&topology_lock);
kfd_topology_release_sysfs();
kfd_release_live_view();
-}
-
-static void kfd_debug_print_topology(void)
-{
- struct kfd_topology_device *dev;
- uint32_t i = 0;
-
- pr_info("DEBUG PRINT OF TOPOLOGY:");
- list_for_each_entry(dev, &topology_device_list, list) {
- pr_info("Node: %d\n", i);
- pr_info("\tGPU assigned: %s\n", (dev->gpu ? "yes" : "no"));
- pr_info("\tCPU count: %d\n", dev->node_props.cpu_cores_count);
- pr_info("\tSIMD count: %d", dev->node_props.simd_count);
- i++;
- }
+ up_write(&topology_lock);
}
static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
@@ -1072,11 +1042,15 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
uint32_t buf[7];
uint64_t local_mem_size;
int i;
+ struct kfd_local_mem_info local_mem_info;
if (!gpu)
return 0;
- local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
+ gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info);
+
+ local_mem_size = local_mem_info.local_mem_size_private +
+ local_mem_info.local_mem_size_public;
buf[0] = gpu->pdev->devfn;
buf[1] = gpu->pdev->subsystem_vendor;
@@ -1091,19 +1065,26 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
return hashout;
}
-
+/* kfd_assign_gpu - Attach @gpu to the correct kfd topology device. If
+ * the GPU device is not already present in the topology device
+ * list then return NULL. This means a new topology device has to
+ * be created for this GPU.
+ * TODO: Rather than assiging @gpu to first topology device withtout
+ * gpu attached, it will better to have more stringent check.
+ */
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
-
+ up_write(&topology_lock);
return out_dev;
}
@@ -1115,84 +1096,196 @@ static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
*/
}
+/* kfd_fill_mem_clk_max_info - Since CRAT doesn't have memory clock info,
+ * patch this after CRAT parsing.
+ */
+static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *mem;
+ struct kfd_local_mem_info local_mem_info;
+
+ if (!dev)
+ return;
+
+ /* Currently, amdgpu driver (amdgpu_mc) deals only with GPUs with
+ * single bank of VRAM local memory.
+ * for dGPUs - VCRAT reports only one bank of Local Memory
+ * for APUs - If CRAT from ACPI reports more than one bank, then
+ * all the banks will report the same mem_clk_max information
+ */
+ dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd,
+ &local_mem_info);
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->mem_clk_max = local_mem_info.mem_clk_max;
+}
+
+static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev)
+{
+ struct kfd_iolink_properties *link;
+
+ if (!dev || !dev->gpu)
+ return;
+
+ /* GPU only creates direck links so apply flags setting to all */
+ if (dev->gpu->device_info->asic_family == CHIP_HAWAII)
+ list_for_each_entry(link, &dev->io_link_props, list)
+ link->flags = CRAT_IOLINK_FLAGS_ENABLED |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT |
+ CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT;
+}
+
int kfd_topology_add_device(struct kfd_dev *gpu)
{
uint32_t gpu_id;
struct kfd_topology_device *dev;
- int res;
+ struct kfd_cu_info cu_info;
+ int res = 0;
+ struct list_head temp_topology_device_list;
+ void *crat_image = NULL;
+ size_t image_size = 0;
+ int proximity_domain;
+
+ INIT_LIST_HEAD(&temp_topology_device_list);
gpu_id = kfd_generate_gpu_id(gpu);
pr_debug("Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
- down_write(&topology_lock);
- /*
- * Try to assign the GPU to existing topology device (generated from
- * CRAT table
+ proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
+
+ /* Check to see if this gpu device exists in the topology_device_list.
+ * If so, assign the gpu to that device,
+ * else create a Virtual CRAT for this gpu device and then parse that
+ * CRAT to create a new topology device. Once created assign the gpu to
+ * that topology device
*/
dev = kfd_assign_gpu(gpu);
if (!dev) {
- pr_info("GPU was not found in the current topology. Extending.\n");
- kfd_debug_print_topology();
- dev = kfd_create_topology_device();
- if (!dev) {
- res = -ENOMEM;
+ res = kfd_create_crat_image_virtual(&crat_image, &image_size,
+ COMPUTE_UNIT_GPU, gpu,
+ proximity_domain);
+ if (res) {
+ pr_err("Error creating VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
+ return res;
+ }
+ res = kfd_parse_crat_table(crat_image,
+ &temp_topology_device_list,
+ proximity_domain);
+ if (res) {
+ pr_err("Error parsing VCRAT for GPU (ID: 0x%x)\n",
+ gpu_id);
goto err;
}
- dev->gpu = gpu;
- /*
- * TODO: Make a call to retrieve topology information from the
- * GPU vBIOS
- */
+ down_write(&topology_lock);
+ kfd_topology_update_device_list(&temp_topology_device_list,
+ &topology_device_list);
/* Update the SYSFS tree, since we added another topology
* device
*/
- if (kfd_topology_update_sysfs() < 0)
- kfd_topology_release_sysfs();
-
+ res = kfd_topology_update_sysfs();
+ up_write(&topology_lock);
+
+ if (!res)
+ sys_props.generation_count++;
+ else
+ pr_err("Failed to update GPU (ID: 0x%x) to sysfs topology. res=%d\n",
+ gpu_id, res);
+ dev = kfd_assign_gpu(gpu);
+ if (WARN_ON(!dev)) {
+ res = -ENODEV;
+ goto err;
+ }
}
dev->gpu_id = gpu_id;
gpu->id = gpu_id;
+
+ /* TODO: Move the following lines to function
+ * kfd_add_non_crat_information
+ */
+
+ /* Fill-in additional information that is not available in CRAT but
+ * needed for the topology
+ */
+
+ dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info);
+ dev->node_props.simd_arrays_per_engine =
+ cu_info.num_shader_arrays_per_engine;
+
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
- dev->node_props.location_id = (gpu->pdev->bus->number << 24) +
- (gpu->pdev->devfn & 0xffffff);
- /*
- * TODO: Retrieve max engine clock values from KGD
- */
+ dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
+ gpu->pdev->devfn);
+ dev->node_props.max_engine_clk_fcompute =
+ dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd);
+ dev->node_props.max_engine_clk_ccompute =
+ cpufreq_quick_get_max(0) / 1000;
+
+ kfd_fill_mem_clk_max_info(dev);
+ kfd_fill_iolink_non_crat_info(dev);
+
+ switch (dev->gpu->device_info->asic_family) {
+ case CHIP_KAVERI:
+ case CHIP_HAWAII:
+ case CHIP_TONGA:
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_PRE_1_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ case CHIP_CARRIZO:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ pr_debug("Adding doorbell packet type capability\n");
+ dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
+ HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
+ break;
+ default:
+ WARN(1, "Unexpected ASIC family %u",
+ dev->gpu->device_info->asic_family);
+ }
+ /* Fix errors in CZ CRAT.
+ * simd_count: Carrizo CRAT reports wrong simd_count, probably
+ * because it doesn't consider masked out CUs
+ * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd
+ * capability flag: Carrizo CRAT doesn't report IOMMU flags
+ */
if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) {
- dev->node_props.capability |= HSA_CAP_DOORBELL_PACKET_TYPE;
- pr_info("Adding doorbell packet type capability\n");
+ dev->node_props.simd_count =
+ cu_info.simd_per_cu * cu_info.cu_active_number;
+ dev->node_props.max_waves_per_simd = 10;
+ dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
}
- res = 0;
-
-err:
- up_write(&topology_lock);
+ kfd_debug_print_topology();
- if (res == 0)
+ if (!res)
kfd_notify_gpu_change(gpu_id, 1);
-
+err:
+ kfd_destroy_crat_image(crat_image);
return res;
}
int kfd_topology_remove_device(struct kfd_dev *gpu)
{
- struct kfd_topology_device *dev;
+ struct kfd_topology_device *dev, *tmp;
uint32_t gpu_id;
int res = -ENODEV;
down_write(&topology_lock);
- list_for_each_entry(dev, &topology_device_list, list)
+ list_for_each_entry_safe(dev, tmp, &topology_device_list, list)
if (dev->gpu == gpu) {
gpu_id = dev->gpu_id;
kfd_remove_sysfs_node_entry(dev);
kfd_release_topology_device(dev);
+ sys_props.num_devices--;
res = 0;
if (kfd_topology_update_sysfs() < 0)
kfd_topology_release_sysfs();
@@ -1201,28 +1294,32 @@ int kfd_topology_remove_device(struct kfd_dev *gpu)
up_write(&topology_lock);
- if (res == 0)
+ if (!res)
kfd_notify_gpu_change(gpu_id, 0);
return res;
}
-/*
- * When idx is out of bounds, the function will return NULL
+/* kfd_topology_enum_kfd_devices - Enumerate through all devices in KFD
+ * topology. If GPU device is found @idx, then valid kfd_dev pointer is
+ * returned through @kdev
+ * Return - 0: On success (@kdev will be NULL for non GPU nodes)
+ * -1: If end of list
*/
-struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
+int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
{
struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
uint8_t device_idx = 0;
+ *kdev = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list) {
if (device_idx == idx) {
- device = top_dev->gpu;
- break;
+ *kdev = top_dev->gpu;
+ up_read(&topology_lock);
+ return 0;
}
device_idx++;
@@ -1230,6 +1327,88 @@ struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
up_read(&topology_lock);
- return device;
+ return -1;
+
+}
+
+static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
+{
+ const struct cpuinfo_x86 *cpuinfo;
+ int first_cpu_of_numa_node;
+
+ if (!cpumask || cpumask == cpu_none_mask)
+ return -1;
+ first_cpu_of_numa_node = cpumask_first(cpumask);
+ if (first_cpu_of_numa_node >= nr_cpu_ids)
+ return -1;
+ cpuinfo = &cpu_data(first_cpu_of_numa_node);
+ return cpuinfo->apicid;
}
+
+/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
+ * of the given NUMA node (numa_node_id)
+ * Return -1 on failure
+ */
+int kfd_numa_node_to_apic_id(int numa_node_id)
+{
+ if (numa_node_id == -1) {
+ pr_warn("Invalid NUMA Node. Use online CPU mask\n");
+ return kfd_cpumask_to_apic_id(cpu_online_mask);
+ }
+ return kfd_cpumask_to_apic_id(cpumask_of_node(numa_node_id));
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data)
+{
+ struct kfd_topology_device *dev;
+ unsigned int i = 0;
+ int r = 0;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (!dev->gpu) {
+ i++;
+ continue;
+ }
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = dqm_debugfs_hqds(m, dev->gpu->dqm);
+ if (r)
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return r;
+}
+
+int kfd_debugfs_rls_by_device(struct seq_file *m, void *data)
+{
+ struct kfd_topology_device *dev;
+ unsigned int i = 0;
+ int r = 0;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(dev, &topology_device_list, list) {
+ if (!dev->gpu) {
+ i++;
+ continue;
+ }
+
+ seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id);
+ r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
+ if (r)
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index c3ddb9b95ff8..53fca1f45401 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -39,8 +39,13 @@
#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
-#define HSA_CAP_RESERVED 0xfffff000
-#define HSA_CAP_DOORBELL_PACKET_TYPE 0x00001000
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
+#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
+#define HSA_CAP_RESERVED 0xffffc000
+
+#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
+#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
+#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
struct kfd_node_properties {
uint32_t cpu_cores_count;
@@ -91,8 +96,6 @@ struct kfd_mem_properties {
struct attribute attr;
};
-#define KFD_TOPOLOGY_CPU_SIBLINGS 256
-
#define HSA_CACHE_TYPE_DATA 0x00000001
#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
#define HSA_CACHE_TYPE_CPU 0x00000004
@@ -109,7 +112,7 @@ struct kfd_cache_properties {
uint32_t cache_assoc;
uint32_t cache_latency;
uint32_t cache_type;
- uint8_t sibling_map[KFD_TOPOLOGY_CPU_SIBLINGS];
+ uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
struct kobject *kobj;
struct attribute attr;
};
@@ -132,24 +135,36 @@ struct kfd_iolink_properties {
struct attribute attr;
};
+struct kfd_perf_properties {
+ struct list_head list;
+ char block_name[16];
+ uint32_t max_concurrent;
+ struct attribute_group *attr_group;
+};
+
struct kfd_topology_device {
struct list_head list;
uint32_t gpu_id;
+ uint32_t proximity_domain;
struct kfd_node_properties node_props;
- uint32_t mem_bank_count;
struct list_head mem_props;
uint32_t cache_count;
struct list_head cache_props;
uint32_t io_link_count;
struct list_head io_link_props;
+ struct list_head perf_props;
struct kfd_dev *gpu;
struct kobject *kobj_node;
struct kobject *kobj_mem;
struct kobject *kobj_cache;
struct kobject *kobj_iolink;
+ struct kobject *kobj_perf;
struct attribute attr_gpuid;
struct attribute attr_name;
struct attribute attr_props;
+ uint8_t oem_id[CRAT_OEMID_LENGTH];
+ uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
+ uint32_t oem_revision;
};
struct kfd_system_properties {
@@ -164,6 +179,12 @@ struct kfd_system_properties {
struct attribute attr_props;
};
+struct kfd_topology_device *kfd_create_topology_device(
+ struct list_head *device_list);
+void kfd_release_topology_device_list(struct list_head *device_list);
+extern bool amd_iommu_pc_supported(void);
+extern u8 amd_iommu_pc_get_max_banks(u16 devid);
+extern u8 amd_iommu_pc_get_max_counters(u16 devid);
#endif /* __KFD_TOPOLOGY_H__ */
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 8ba37dd9cf7f..c27c81cdeed3 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the DAL (Display Abstract Layer), which is a sub-component
# of the AMDGPU drm driver.
# It provides the HW control for display related functionalities.
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
index 46464678f2b3..357d59648401 100644
--- a/drivers/gpu/drm/amd/display/TODO
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -105,3 +105,6 @@ useless with filtering output. dynamic debug printing might be an option.
20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
retimer that we need to program to pass PHY compliance. Currently that's
bypassing the i2c device and goes directly to HW. This should be changed.
+
+21. Remove vector.c from dc/basics. It's used in DDC code which can probably
+be simplified enough to no longer need a vector implementation.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 4699e47aa76b..2b72009844f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'dm' sub-component of DAL.
# It provides the control and status of dm blocks.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ccbf10e3bbb6..1ce4c98385e3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -431,9 +431,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
- DRM_INFO("Display Core initialized!\n");
+ DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
} else {
- DRM_INFO("Display Core failed to initialize!\n");
+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
@@ -2351,7 +2351,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct dm_connector_state *dm_state)
{
struct drm_display_mode *preferred_mode = NULL;
- const struct drm_connector *drm_connector;
+ struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
@@ -2370,11 +2370,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!aconnector->dc_sink) {
/*
- * Exclude MST from creating fake_sink
- * TODO: need to enable MST into fake_sink feature
+ * Create dc_sink when necessary to MST
+ * Don't apply fake_sink to MST
*/
- if (aconnector->mst_port)
- goto stream_create_fail;
+ if (aconnector->mst_port) {
+ dm_dp_mst_dc_sink_create(drm_connector);
+ goto mst_dc_sink_create_done;
+ }
if (create_fake_sink(aconnector))
goto stream_create_fail;
@@ -2425,6 +2427,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream_create_fail:
dm_state_null:
drm_connector_null:
+mst_dc_sink_create_done:
return stream;
}
@@ -2725,8 +2728,7 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
};
struct edid *edid;
- if (!aconnector->base.edid_blob_ptr ||
- !aconnector->base.edid_blob_ptr->data) {
+ if (!aconnector->base.edid_blob_ptr) {
DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
aconnector->base.name);
@@ -4514,18 +4516,15 @@ static int dm_update_crtcs_state(struct dc *dc,
__func__, acrtc->base.base.id);
break;
}
- }
- if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
- dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
-
- new_crtc_state->mode_changed = false;
-
- DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
- new_crtc_state->mode_changed);
+ if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
}
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto next_crtc;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 8a1e4f5dbd64..2faa77a7eeda 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
+
+ bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 707928b88448..f3d87f418d2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -180,6 +180,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
return drm_add_edid_modes(connector, edid);
}
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct edid *edid;
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)aconnector->edid,
+ (aconnector->edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, aconnector->edid);
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -306,6 +342,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter);
+ aconnector->mst_connected = true;
return &aconnector->base;
}
}
@@ -358,6 +395,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
+ aconnector->mst_connected = true;
+
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
@@ -389,6 +428,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
+
+ aconnector->mst_connected = false;
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -399,10 +440,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
+static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
+{
+ mutex_lock(&connector->dev->mode_config.mutex);
+ drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -411,6 +460,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
drm_connector_register(connector);
+ if (aconnector->mst_connected)
+ dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 2da851b40042..8cf51da26657 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
+void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 4f83e3011743..aed538a4d1ba 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for Display Core (dc) component.
#
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 43c5ccdeeb72..bca33bd9a0d2 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -1,9 +1,30 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'utils' sub-component of DAL.
# It provides the general basic services required by other DAL
# subcomponents.
-BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o \
logger.o log_helpers.o vector.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index 23c9a0ec0181..310964915a83 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -46,7 +46,7 @@ uint16_t fixed_point_to_int_frac(
arg));
if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
- numerator = (uint16_t)dal_fixed31_32_floor(
+ numerator = (uint16_t)dal_fixed31_32_round(
dal_fixed31_32_mul_int(
arg,
divisor));
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 26936892c6f5..011a97f82fb6 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -554,6 +554,22 @@ static inline uint32_t ux_dy(
return result | fractional_part;
}
+static inline uint32_t clamp_ux_dy(
+ int64_t value,
+ uint32_t integer_bits,
+ uint32_t fractional_bits,
+ uint32_t min_clamp)
+{
+ uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits);
+
+ if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
+ return (1 << (integer_bits + fractional_bits)) - 1;
+ else if (truncated_val > min_clamp)
+ return truncated_val;
+ else
+ return min_clamp;
+}
+
uint32_t dal_fixed31_32_u2d19(
struct fixed31_32 arg)
{
@@ -565,3 +581,15 @@ uint32_t dal_fixed31_32_u0d19(
{
return ux_dy(arg.value, 0, 19);
}
+
+uint32_t dal_fixed31_32_clamp_u0d14(
+ struct fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+}
+
+uint32_t dal_fixed31_32_clamp_u0d10(
+ struct fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
index 6ec815dce9cc..239e86bbec5a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'bios' sub-component of DAL.
# It provides the parsing and executing controls for atom bios image.
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 86e6438c5cf3..c00e405b63e8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -190,6 +190,7 @@ static struct graphics_object_id bios_parser_get_connector_id(
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct graphics_object_id object_id = dal_graphics_object_id_init(
0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+ uint16_t id;
uint32_t connector_table_offset = bp->object_info_tbl_offset
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
@@ -197,12 +198,19 @@ static struct graphics_object_id bios_parser_get_connector_id(
ATOM_OBJECT_TABLE *tbl =
GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
- if (tbl && tbl->ucNumberOfObjects > i) {
- const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ if (!tbl) {
+ dm_error("Can't get connector table from atom bios.\n");
+ return object_id;
+ }
- object_id = object_id_from_bios_object_id(id);
+ if (tbl->ucNumberOfObjects <= i) {
+ dm_error("Can't find connector id %d in connector table of size %d.\n",
+ i, tbl->ucNumberOfObjects);
+ return object_id;
}
+ id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ object_id = object_id_from_bios_object_id(id);
return object_id;
}
@@ -2254,6 +2262,52 @@ static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
return BP_RESULT_OK;
}
+static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+ bool rc = true;
+
+ switch (id.type) {
+ case OBJECT_TYPE_UNKNOWN:
+ rc = false;
+ break;
+ case OBJECT_TYPE_GPU:
+ case OBJECT_TYPE_ENGINE:
+ /* do NOT check for id.id == 0 */
+ if (id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ default:
+ if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+static bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2)
+{
+ if (false == dal_graphics_object_id_is_valid(id1)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id1'!\n", __func__);
+ return false;
+ }
+
+ if (false == dal_graphics_object_id_is_valid(id2)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id2'!\n", __func__);
+ return false;
+ }
+
+ if (id1.id == id2.id && id1.enum_id == id2.enum_id
+ && id1.type == id2.type)
+ return true;
+
+ return false;
+}
+
static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
struct graphics_object_id id)
{
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 3f7b2dabc2b0..1aefed8cf98b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -387,6 +387,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
+ dm_error("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -910,6 +911,8 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
+ dm_error("Don't have set_pixel_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
}
@@ -1227,6 +1230,8 @@ static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
enable_spread_spectrum_on_ppll_v3;
break;
default:
+ dm_error("Don't have enable_spread_spectrum_on_ppll for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL));
bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
break;
}
@@ -1422,6 +1427,8 @@ static void init_adjust_display_pll(struct bios_parser *bp)
bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
break;
default:
+ dm_error("Don't have adjust_display_pll for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll));
bp->cmd_tbl.adjust_display_pll = NULL;
break;
}
@@ -1695,6 +1702,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
+ dm_error("Don't have set_crtc_timing for dtd v%d\n",
+ dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1704,6 +1713,8 @@ static void init_set_crtc_timing(struct bios_parser *bp)
bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
break;
default:
+ dm_error("Don't have set_crtc_timing for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing));
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -1890,6 +1901,8 @@ static void init_select_crtc_source(struct bios_parser *bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
+ dm_error("Don't select_crtc_source enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
}
@@ -1997,6 +2010,8 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
+ dm_error("Don't have enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -2103,6 +2118,8 @@ static void init_program_clock(struct bios_parser *bp)
bp->cmd_tbl.program_clock = program_clock_v6;
break;
default:
+ dm_error("Don't have program_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock));
bp->cmd_tbl.program_clock = NULL;
break;
}
@@ -2324,6 +2341,8 @@ static void init_enable_disp_power_gating(
enable_disp_power_gating_v2_1;
break;
default:
+ dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating));
bp->cmd_tbl.enable_disp_power_gating = NULL;
break;
}
@@ -2371,6 +2390,8 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
+ dm_error("Don't have set_dce_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index ba68693758a7..946db12388d6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -118,6 +118,7 @@ static void init_dig_encoder_control(struct bios_parser *bp)
bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
break;
default:
+ dm_error("Don't have dig_encoder_control for v%d\n", version);
bp->cmd_tbl.dig_encoder_control = NULL;
break;
}
@@ -205,6 +206,7 @@ static void init_transmitter_control(struct bios_parser *bp)
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
break;
default:
+ dm_error("Don't have transmitter_control for v%d\n", crev);
bp->cmd_tbl.transmitter_control = NULL;
break;
}
@@ -268,6 +270,8 @@ static void init_set_pixel_clock(struct bios_parser *bp)
bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
break;
default:
+ dm_error("Don't have set_pixel_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(setpixelclock));
bp->cmd_tbl.set_pixel_clock = NULL;
break;
}
@@ -379,6 +383,7 @@ static void init_set_crtc_timing(struct bios_parser *bp)
set_crtc_using_dtd_timing_v3;
break;
default:
+ dm_error("Don't have set_crtc_timing for v%d\n", dtd_version);
bp->cmd_tbl.set_crtc_timing = NULL;
break;
}
@@ -498,6 +503,8 @@ static void init_select_crtc_source(struct bios_parser *bp)
bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
break;
default:
+ dm_error("Don't select_crtc_source enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source));
bp->cmd_tbl.select_crtc_source = NULL;
break;
}
@@ -565,6 +572,8 @@ static void init_enable_crtc(struct bios_parser *bp)
bp->cmd_tbl.enable_crtc = enable_crtc_v1;
break;
default:
+ dm_error("Don't have enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(enablecrtc));
bp->cmd_tbl.enable_crtc = NULL;
break;
}
@@ -661,6 +670,8 @@ static void init_enable_disp_power_gating(
enable_disp_power_gating_v2_1;
break;
default:
+ dm_error("Don't enable_disp_power_gating enable_crtc for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating));
bp->cmd_tbl.enable_disp_power_gating = NULL;
break;
}
@@ -710,6 +721,8 @@ static void init_set_dce_clock(struct bios_parser *bp)
bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
break;
default:
+ dm_error("Don't have set_dce_clock for v%d\n",
+ BIOS_CMD_TABLE_PARA_REVISION(setdceclock));
bp->cmd_tbl.set_dce_clock = NULL;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 41ef35995b02..7959e382ed28 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'calcs' sub-component of DAL.
# It calculates Bandwidth and Watermarks values for HW programming
#
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 6347712db834..2e11fac2a63d 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -29,6 +29,15 @@
#include "core_types.h"
#include "dal_asic_id.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/*******************************************************************************
* Private Functions
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
index 626f9cf8aad2..5e2ea12fbb73 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -27,6 +27,15 @@
#include "dcn_calc_auto.h"
#include "dcn_calc_math.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/*REVISION#250*/
void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
{
@@ -773,11 +782,11 @@ void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
v->dst_y_after_scaler = 0.0;
}
v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
- v->v_update_offset[k] =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->v_update_offset[k][j] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
- v->v_update_width[k] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
- v->v_ready_offset[k] =dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
- v->time_setup = (v->v_update_offset[k] + v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+ v->v_update_width[k][j] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
+ v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
+ v->time_setup = (v->v_update_offset[k][j] + v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k];
v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
if (v->pte_enable == dcn_bw_yes) {
v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
index b6abe0f3bb15..7600a4a4abc7 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
@@ -25,37 +25,44 @@
#include "dcn_calc_math.h"
+#define isNaN(number) ((number) != (number))
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
float dcn_bw_mod(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 - arg1 * ((int) (arg1 / arg2));
}
float dcn_bw_min2(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 < arg2 ? arg1 : arg2;
}
unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
{
- if (arg1 != arg1)
- return arg2;
- if (arg2 != arg2)
- return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
float dcn_bw_max2(const float arg1, const float arg2)
{
- if (arg1 != arg1)
+ if (isNaN(arg1))
return arg2;
- if (arg2 != arg2)
+ if (isNaN(arg2))
return arg1;
return arg1 > arg2 ? arg1 : arg2;
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index a4fbca34bcdf..331891c2c71a 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -33,6 +33,15 @@
#include "dcn10/dcn10_resource.h"
#include "dcn_calc_math.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
/* Defaults from spreadsheet rev#247 */
const struct dcn_soc_bounding_box dcn10_soc_defaults = {
/* latencies */
@@ -878,6 +887,17 @@ bool dcn_validate_bandwidth(
+ pipe->bottom_pipe->plane_res.scl_data.recout.width;
}
+ if (pipe->plane_state->rotation % 2 == 0) {
+ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]);
+ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]);
+ } else {
+ ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value
+ || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]);
+ ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value
+ || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]);
+ }
v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
pipe->plane_state->format);
@@ -888,6 +908,15 @@ bool dcn_validate_bandwidth(
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+ /*
+ * Spreadsheet doesn't handle taps_c is one properly,
+ * need to force Chroma to always be scaled to pass
+ * bandwidth validation.
+ */
+ if (v->override_hta_pschroma[input_idx] == 1)
+ v->override_hta_pschroma[input_idx] = 2;
+ if (v->override_vta_pschroma[input_idx] == 1)
+ v->override_vta_pschroma[input_idx] = 2;
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
}
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
@@ -985,9 +1014,9 @@ bool dcn_validate_bandwidth(
if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
continue;
- pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
- pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
- pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
@@ -1026,9 +1055,9 @@ bool dcn_validate_bandwidth(
TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
/* update previously split pipe */
- hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
- hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
- hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
+ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0];
hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
@@ -1556,35 +1585,6 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc)
dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
dc->dcn_ip->dcfclk_cstate_latency);
- dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
- dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
-
- dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
- dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
- dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
- dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
-
- dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
- dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
- dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
- dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
-
- dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
- dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
- dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
- dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
-
- dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
- dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
- dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
- dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
-
- dc->dml.soc.vmin.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
- dc->dml.soc.vmid.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
- dc->dml.soc.vnom.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
- dc->dml.soc.vmax.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index d1488d5ee028..35e84ed031de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -283,19 +283,17 @@ static bool construct(struct dc *dc,
const struct dc_init_data *init_params)
{
struct dal_logger *logger;
- struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
- struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
- GFP_KERNEL);
- struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
- GFP_KERNEL);
+ struct dc_context *dc_ctx;
+ struct bw_calcs_dceip *dc_dceip;
+ struct bw_calcs_vbios *dc_vbios;
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
- struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
- GFP_KERNEL);
- struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__);
goto fail;
@@ -303,6 +301,7 @@ static bool construct(struct dc *dc,
dc->bw_dceip = dc_dceip;
+ dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
if (!dc_vbios) {
dm_error("%s: failed to create vbios\n", __func__);
goto fail;
@@ -310,6 +309,7 @@ static bool construct(struct dc *dc,
dc->bw_vbios = dc_vbios;
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
goto fail;
@@ -317,6 +317,7 @@ static bool construct(struct dc *dc,
dc->dcn_soc = dcn_soc;
+ dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
if (!dcn_ip) {
dm_error("%s: failed to create dcn_ip\n", __func__);
goto fail;
@@ -325,11 +326,18 @@ static bool construct(struct dc *dc,
dc->dcn_ip = dcn_ip;
#endif
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
if (!dc_ctx) {
dm_error("%s: failed to create ctx\n", __func__);
goto fail;
}
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc->ctx = dc_ctx;
+
dc->current_state = dc_create_state();
if (!dc->current_state) {
@@ -337,11 +345,6 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc_ctx->cgs_device = init_params->cgs_device;
- dc_ctx->driver_context = init_params->driver;
- dc_ctx->dc = dc;
- dc_ctx->asic_id = init_params->asic_id;
-
/* Create logger */
logger = dal_logger_create(dc_ctx, init_params->log_mask);
@@ -351,11 +354,10 @@ static bool construct(struct dc *dc,
goto fail;
}
dc_ctx->logger = logger;
- dc->ctx = dc_ctx;
- dc->ctx->dce_environment = init_params->dce_environment;
+ dc_ctx->dce_environment = init_params->dce_environment;
dc_version = resource_parse_asic_id(init_params->asic_id);
- dc->ctx->dce_version = dc_version;
+ dc_ctx->dce_version = dc_version;
#if defined(CONFIG_DRM_AMD_DC_FBC)
dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
@@ -578,7 +580,7 @@ static void program_timing_sync(
for (j = 0; j < group_size; j++) {
struct pipe_ctx *temp;
- if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
if (j == 0)
break;
@@ -591,7 +593,7 @@ static void program_timing_sync(
/* remove any other unblanked pipes as they have already been synced */
for (j = j + 1; j < group_size; j++) {
- if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
@@ -786,6 +788,8 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ dc->optimized_required = false;
+
/* 3rd param should be true, temp w/a for RV*/
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
@@ -981,6 +985,11 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
update_flags->bits.per_pixel_alpha_change = 1;
+ if (u->plane_info->dcc.enable != u->surface->dcc.enable
+ || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
+ || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
+ update_flags->bits.dcc_change = 1;
+
if (pixel_format_to_bpp(u->plane_info->format) !=
pixel_format_to_bpp(u->surface->format))
/* different bytes per element will require full bandwidth
@@ -1178,12 +1187,6 @@ static void commit_planes_for_stream(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL) {
dc->hwss.set_bandwidth(dc, context, false);
context_clock_trace(dc, context);
-
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
- }
}
if (surface_count == 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 2e509382935f..1babac07bcc9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -1,4 +1,26 @@
/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
* dc_debug.c
*
* Created on: Nov 3, 2016
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index 71993d5983bf..ebc96b720083 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -28,6 +28,8 @@
#include "timing_generator.h"
#include "hw_sequencer.h"
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
/* used as index in array of black_color_format */
enum black_color_format {
BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
@@ -38,6 +40,15 @@ enum black_color_format {
BLACK_COLOR_FORMAT_DEBUG,
};
+enum dc_color_space_type {
+ COLOR_SPACE_RGB_TYPE,
+ COLOR_SPACE_RGB_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR601_TYPE,
+ COLOR_SPACE_YCBCR709_TYPE,
+ COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+ COLOR_SPACE_YCBCR709_LIMITED_TYPE
+};
+
static const struct tg_color black_color_format[] = {
/* BlackColorFormat_RGB_FullRange */
{0, 0, 0},
@@ -53,6 +64,140 @@ static const struct tg_color black_color_format[] = {
{0xff, 0xff, 0},
};
+struct out_csc_color_matrix_type {
+ enum dc_color_space_type color_space_type;
+ uint16_t regval[12];
+};
+
+static const struct out_csc_color_matrix_type output_csc_matrix[] = {
+ { COLOR_SPACE_RGB_TYPE,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ { COLOR_SPACE_RGB_LIMITED_TYPE,
+ { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
+ { COLOR_SPACE_YCBCR601_TYPE,
+ { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
+ 0xF6B7, 0xE04, 0x1004} },
+ { COLOR_SPACE_YCBCR709_TYPE,
+ { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
+ 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
+
+ /* TODO: correct values below */
+ { COLOR_SPACE_YCBCR601_LIMITED_TYPE,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+ { COLOR_SPACE_YCBCR709_LIMITED_TYPE,
+ { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+};
+
+static bool is_rgb_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_XR_RGB ||
+ color_space == COLOR_SPACE_MSREF_SCRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
+ color_space == COLOR_SPACE_ADOBERGB ||
+ color_space == COLOR_SPACE_DCIP3 ||
+ color_space == COLOR_SPACE_DOLBYVISION)
+ ret = true;
+ return ret;
+}
+
+static bool is_rgb_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr601_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR601 ||
+ color_space == COLOR_SPACE_XV_YCC_601)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr601_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR601_LIMITED)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr709_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR709 ||
+ color_space == COLOR_SPACE_XV_YCC_709)
+ ret = true;
+ return ret;
+}
+
+static bool is_ycbcr709_limited_type(
+ enum dc_color_space color_space)
+{
+ bool ret = false;
+
+ if (color_space == COLOR_SPACE_YCBCR709_LIMITED)
+ ret = true;
+ return ret;
+}
+enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
+{
+ enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
+
+ if (is_rgb_type(color_space))
+ type = COLOR_SPACE_RGB_TYPE;
+ else if (is_rgb_limited_type(color_space))
+ type = COLOR_SPACE_RGB_LIMITED_TYPE;
+ else if (is_ycbcr601_type(color_space))
+ type = COLOR_SPACE_YCBCR601_TYPE;
+ else if (is_ycbcr709_type(color_space))
+ type = COLOR_SPACE_YCBCR709_TYPE;
+ else if (is_ycbcr601_limited_type(color_space))
+ type = COLOR_SPACE_YCBCR601_LIMITED_TYPE;
+ else if (is_ycbcr709_limited_type(color_space))
+ type = COLOR_SPACE_YCBCR709_LIMITED_TYPE;
+
+ return type;
+}
+
+const uint16_t *find_color_matrix(enum dc_color_space color_space,
+ uint32_t *array_size)
+{
+ int i;
+ enum dc_color_space_type type;
+ const uint16_t *val = NULL;
+ int arr_size = NUM_ELEMENTS(output_csc_matrix);
+
+ type = get_color_space_type(color_space);
+ for (i = 0; i < arr_size; i++)
+ if (output_csc_matrix[i].color_space_type == type) {
+ val = output_csc_matrix[i].regval;
+ *array_size = 12;
+ break;
+ }
+
+ return val;
+}
+
+
void color_space_to_black_color(
const struct dc *dc,
enum dc_color_space colorspace,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7b0e43c0685c..a37428271573 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -938,8 +938,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
- dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
- __func__, init_params->connector_index);
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
@@ -1271,6 +1272,24 @@ static enum dc_status enable_link_dp(
return status;
}
+static enum dc_status enable_link_edp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+
+ status = enable_link_dp(state, pipe_ctx);
+
+ link->dc->hwss.edp_backlight_control(link, true);
+
+ return status;
+}
+
static enum dc_status enable_link_dp_mst(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
@@ -1746,9 +1765,11 @@ static enum dc_status enable_link(
enum dc_status status = DC_ERROR_UNEXPECTED;
switch (pipe_ctx->stream->signal) {
case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_EDP:
status = enable_link_dp(state, pipe_ctx);
break;
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_edp(state, pipe_ctx);
+ break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
status = enable_link_dp_mst(state, pipe_ctx);
msleep(200);
@@ -1801,7 +1822,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
link->link_enc->funcs->disable_output(link->link_enc, signal);
}
-bool dp_active_dongle_validate_timing(
+static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dc_dongle_caps *dongle_caps)
{
@@ -1833,6 +1854,8 @@ bool dp_active_dongle_validate_timing(
/* Check Color Depth and Pixel Clock */
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
required_pix_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ required_pix_clk = required_pix_clk * 2 / 3;
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
@@ -1907,12 +1930,18 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
unsigned int controller_id = 0;
+ bool use_smooth_brightness = true;
int i;
- if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ if ((dmcu == NULL) ||
+ (abm == NULL) ||
+ (abm->funcs->set_backlight_level == NULL))
return false;
+ use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
"New Backlight level: %d (0x%X)\n", level, level);
@@ -1935,7 +1964,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
abm,
level,
frame_ramp,
- controller_id);
+ controller_id,
+ use_smooth_brightness);
}
return true;
@@ -1952,144 +1982,6 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
return true;
}
-bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
-{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
- if (dmcu != NULL && link->psr_enabled)
- dmcu->funcs->get_psr_state(dmcu, psr_state);
-
- return true;
-}
-
-bool dc_link_setup_psr(struct dc_link *link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context)
-{
- struct dc *core_dc = link->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
- int i;
-
- psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
-
- if (link != NULL &&
- dmcu != NULL) {
- /* updateSinkPsrDpcdConfig*/
- union dpcd_psr_configuration psr_configuration;
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
- psr_configuration.bits.ENABLE = 1;
- psr_configuration.bits.CRC_VERIFICATION = 1;
- psr_configuration.bits.FRAME_CAPTURE_INDICATION =
- psr_config->psr_frame_capture_indication_req;
-
- /* Check for PSR v2*/
- if (psr_config->psr_version == 0x2) {
- /* For PSR v2 selective update.
- * Indicates whether sink should start capturing
- * immediately following active scan line,
- * or starting with the 2nd active scan line.
- */
- psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
- /*For PSR v2, determines whether Sink should generate
- * IRQ_HPD when CRC mismatch is detected.
- */
- psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
- }
-
- dm_helpers_dp_write_dpcd(
- link->ctx,
- link,
- 368,
- &psr_configuration.raw,
- sizeof(psr_configuration.raw));
-
- psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
- psr_context->transmitterId = link->link_enc->transmitter;
- psr_context->engineId = link->link_enc->preferred_engine;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
- == stream) {
- /* dmcu -1 for all controller id values,
- * therefore +1 here
- */
- psr_context->controllerId =
- core_dc->current_state->res_ctx.
- pipe_ctx[i].stream_res.tg->inst + 1;
- break;
- }
- }
-
- /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
- psr_context->phyType = PHY_TYPE_UNIPHY;
- /*PhyId is associated with the transmitter id*/
- psr_context->smuPhyId = link->link_enc->transmitter;
-
- psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
- psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
- timing.pix_clk_khz * 1000),
- stream->timing.v_total),
- stream->timing.h_total);
-
- psr_context->psrSupportedDisplayConfig = true;
- psr_context->psrExitLinkTrainingRequired =
- psr_config->psr_exit_link_training_required;
- psr_context->sdpTransmitLineNumDeadline =
- psr_config->psr_sdp_transmit_line_num_deadline;
- psr_context->psrFrameCaptureIndicationReq =
- psr_config->psr_frame_capture_indication_req;
-
- psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
-
- psr_context->numberOfControllers =
- link->dc->res_pool->res_cap->num_timing_generator;
-
- psr_context->rfb_update_auto_en = true;
-
- /* 2 frames before enter PSR. */
- psr_context->timehyst_frames = 2;
- /* half a frame
- * (units in 100 lines, i.e. a value of 1 represents 100 lines)
- */
- psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
- psr_context->aux_repeats = 10;
-
- psr_context->psr_level.u32all = 0;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
- /*skip power down the single pipe since it blocks the cstate*/
- if (ASIC_REV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
-
- /* SMU will perform additional powerdown sequence.
- * For unsupported ASICs, set psr_level flag to skip PSR
- * static screen notification to SMU.
- * (Always set for DAL2, did not check ASIC)
- */
- psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
-
- /* Complete PSR entry before aborting to prevent intermittent
- * freezes on certain eDPs
- */
- psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
-
- /* Controls additional delay after remote frame capture before
- * continuing power down, default = 0
- */
- psr_context->frame_delay = 0;
-
- link->psr_enabled = true;
- dmcu->funcs->setup_psr(dmcu, link, psr_context);
- return true;
- } else
- return false;
-
-}
-
const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
{
return &link->link_status;
@@ -2418,6 +2310,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP)
+ core_dc->hwss.edp_backlight_control(pipe_ctx->stream->sink->link, false);
+
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 00528b214a9f..61e8c3e02d16 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1470,6 +1470,12 @@ void decide_link_settings(struct dc_stream_state *stream,
return;
}
+ /* EDP use the link cap setting */
+ if (stream->sink->sink_signal == SIGNAL_TYPE_EDP) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index f2902569be2e..2096f2a179f2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -88,15 +88,7 @@ void dp_enable_link_phy(
}
if (dc_is_dp_sst_signal(signal)) {
- if (signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_power_control(link, true);
- link_enc->funcs->enable_dp_output(
- link_enc,
- link_settings,
- clock_source);
- link->dc->hwss.edp_backlight_control(link, true);
- } else
- link_enc->funcs->enable_dp_output(
+ link_enc->funcs->enable_dp_output(
link_enc,
link_settings,
clock_source);
@@ -138,7 +130,6 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
dp_receiver_power_ctrl(link, false);
if (signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_backlight_control(link, false);
edp_receiver_ready_T9(link);
link->link_enc->funcs->disable_output(link->link_enc, signal);
link->dc->hwss.edp_power_control(link, false);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 9c5e879f18b3..95b8dd0e53c6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1,5 +1,5 @@
/*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -498,26 +498,15 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
/* Handle hsplit */
- if (pri_split || sec_split) {
- /* HMirror XOR Secondary_pipe XOR Rotation_180 */
- bool right_view = (sec_split != plane_state->horizontal_mirror) !=
- (plane_state->rotation == ROTATION_ANGLE_180);
-
- if (plane_state->rotation == ROTATION_ANGLE_90
- || plane_state->rotation == ROTATION_ANGLE_270)
- /* Secondary_pipe XOR Rotation_270 */
- right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
- if (right_view) {
- data->viewport.x += data->viewport.width / 2;
- data->viewport_c.x += data->viewport_c.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- data->viewport_c.width = (data->viewport_c.width + 1) / 2;
- } else {
- data->viewport.width /= 2;
- data->viewport_c.width /= 2;
- }
+ if (sec_split) {
+ data->viewport.x += data->viewport.width / 2;
+ data->viewport_c.x += data->viewport_c.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
+ } else if (pri_split) {
+ data->viewport.width /= 2;
+ data->viewport_c.width /= 2;
}
if (plane_state->rotation == ROTATION_ANGLE_90 ||
@@ -534,6 +523,11 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
int recout_full_x, recout_full_y;
+ bool pri_split = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
@@ -568,33 +562,43 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip
- pipe_ctx->plane_res.scl_data.recout.y;
/* Handle h & vsplit */
- if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
- pipe_ctx->plane_state) {
- if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
- /* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ if (sec_split && top_bottom_split) {
+ pipe_ctx->plane_res.scl_data.recout.y +=
+ pipe_ctx->plane_res.scl_data.recout.height / 2;
+ /* Floor primary pipe, ceil 2ndary pipe */
+ pipe_ctx->plane_res.scl_data.recout.height =
+ (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ } else if (pri_split && top_bottom_split)
+ pipe_ctx->plane_res.scl_data.recout.height /= 2;
+ else if (pri_split || sec_split) {
+ /* HMirror XOR Secondary_pipe XOR Rotation_180 */
+ bool right_view = (sec_split != plane_state->horizontal_mirror) !=
+ (plane_state->rotation == ROTATION_ANGLE_180);
+
+ if (plane_state->rotation == ROTATION_ANGLE_90
+ || plane_state->rotation == ROTATION_ANGLE_270)
+ /* Secondary_pipe XOR Rotation_270 */
+ right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
+
+ if (right_view) {
+ pipe_ctx->plane_res.scl_data.recout.x +=
+ pipe_ctx->plane_res.scl_data.recout.width / 2;
+ /* Ceil offset pipe */
+ pipe_ctx->plane_res.scl_data.recout.width =
+ (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
} else {
- pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
- pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- }
- } else if (pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
- if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else
pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ }
}
-
/* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
* * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
* ratio)
*/
- recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
* stream->dst.width / stream->src.width -
surf_src.x * plane_state->dst_rect.width / surf_src.width
* stream->dst.width / stream->src.width;
- recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
surf_src.y * plane_state->dst_rect.height / surf_src.height
* stream->dst.height / stream->src.height;
@@ -650,7 +654,20 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
struct rect src = pipe_ctx->plane_state->src_rect;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
+ /*
+ * Need to calculate the scan direction for viewport to make adjustments
+ */
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
+ flip_vert_scan_dir = true;
+ flip_horz_scan_dir = true;
+ } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
+ flip_vert_scan_dir = true;
+ else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ flip_horz_scan_dir = true;
+ if (pipe_ctx->plane_state->horizontal_mirror)
+ flip_horz_scan_dir = !flip_horz_scan_dir;
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
@@ -715,7 +732,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
}
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
+ if (data->viewport.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
@@ -736,7 +753,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
}
- if (data->viewport_c.x) {
+ if (data->viewport_c.x && !flip_horz_scan_dir) {
int int_part;
data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
@@ -757,7 +774,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
}
- if (data->viewport.y) {
+ if (data->viewport.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
@@ -778,7 +795,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r
data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
}
- if (data->viewport_c.y) {
+ if (data->viewport_c.y && !flip_vert_scan_dir) {
int int_part;
data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 375fb457e223..261811e0c094 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -226,7 +226,7 @@ bool dc_stream_set_cursor_attributes(
if (pipe_ctx->plane_res.dpp != NULL &&
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
- pipe_ctx->plane_res.dpp, attributes);
+ pipe_ctx->plane_res.dpp, attributes->color_format);
}
stream->cursor_attributes = *attributes;
@@ -301,6 +301,8 @@ bool dc_stream_set_cursor_position(
}
+ stream->cursor_position = *position;
+
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index c99ed85ba9a2..e2e3c9df79ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.20"
+#define DC_VER "3.1.27"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -250,6 +250,8 @@ struct dc {
*/
struct dm_pp_display_configuration prev_display_config;
+ bool optimized_required;
+
/* FBC compressor */
#if defined(CONFIG_DRM_AMD_DC_FBC)
struct compressor *fbc_compressor;
@@ -340,7 +342,7 @@ struct dc_hdr_static_metadata {
enum dc_transfer_func_type {
TF_TYPE_PREDEFINED,
TF_TYPE_DISTRIBUTED_POINTS,
- TF_TYPE_BYPASS
+ TF_TYPE_BYPASS,
};
struct dc_transfer_func_distributed_points {
@@ -359,6 +361,7 @@ enum dc_transfer_func_predefined {
TRANSFER_FUNCTION_BT709,
TRANSFER_FUNCTION_PQ,
TRANSFER_FUNCTION_LINEAR,
+ TRANSFER_FUNCTION_UNITY,
};
struct dc_transfer_func {
@@ -385,6 +388,7 @@ union surface_update_flags {
struct {
/* Medium updates */
+ uint32_t dcc_change:1;
uint32_t color_space_change:1;
uint32_t input_tf_change:1;
uint32_t horizontal_mirror_change:1;
@@ -436,6 +440,7 @@ struct dc_plane_state {
enum dc_rotation_angle rotation;
enum plane_stereo_format stereo_format;
+ bool is_tiling_rotated;
bool per_pixel_alpha;
bool visible;
bool flip_immediate;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index c584252669fd..48e1fcf53d43 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -1,4 +1,26 @@
/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
* dc_helper.c
*
* Created on: Aug 30, 2016
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 587c0bb3d4ac..03029f72dc3f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -579,8 +579,6 @@ enum dc_timing_standard {
TIMING_STANDARD_MAX
};
-
-
enum dc_color_depth {
COLOR_DEPTH_UNDEFINED,
COLOR_DEPTH_666,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index fed0e5ea9625..01c60f11b2bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -86,6 +86,7 @@ struct dc_stream_state {
struct dc_stream_status status;
struct dc_cursor_attributes cursor_attributes;
+ struct dc_cursor_position cursor_position;
/* from stream struct */
struct kref refcount;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9291a60126ad..9faddfae241d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -218,6 +218,7 @@ struct dc_edid_caps {
bool lte_340mcsc_scramble;
bool edid_hdmi;
+ bool hdr_supported;
};
struct view {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 8abec0bed379..11401fd8e535 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for common 'dce' logic
# HW object file under this folder follow similar pattern for HW programming
# - register offset and/or shift + mask stored in the dec_hw struct
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 3fe8e697483f..b48190f54907 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -385,21 +385,12 @@ static bool dce_abm_init_backlight(struct abm *abm)
return true;
}
-static bool is_dmcu_initialized(struct abm *abm)
-{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int dmcu_uc_reset;
-
- REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
-
- return !dmcu_uc_reset;
-}
-
static bool dce_abm_set_backlight_level(
struct abm *abm,
unsigned int backlight_level,
unsigned int frame_ramp,
- unsigned int controller_id)
+ unsigned int controller_id,
+ bool use_smooth_brightness)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
@@ -408,7 +399,7 @@ static bool dce_abm_set_backlight_level(
backlight_level, backlight_level);
/* If DMCU is in reset state, DMCU is uninitialized */
- if (is_dmcu_initialized(abm))
+ if (use_smooth_brightness)
dmcu_set_backlight_level(abm_dce,
backlight_level,
frame_ramp,
@@ -425,8 +416,7 @@ static const struct abm_funcs dce_funcs = {
.init_backlight = dce_abm_init_backlight,
.set_backlight_level = dce_abm_set_backlight_level,
.get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
- .set_abm_immediate_disable = dce_abm_immediate_disable,
- .is_dmcu_initialized = is_dmcu_initialized
+ .set_abm_immediate_disable = dce_abm_immediate_disable
};
static void dce_abm_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 59e909ec88f2..ff9436966041 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -37,8 +37,7 @@
SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
- SR(MASTER_COMM_DATA_REG1), \
- SR(DMCU_STATUS)
+ SR(MASTER_COMM_DATA_REG1)
#define ABM_DCE110_COMMON_REG_LIST() \
ABM_COMMON_REG_LIST_DCE_BASE(), \
@@ -84,8 +83,7 @@
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
- ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
- ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh)
#define ABM_MASK_SH_LIST_DCE110(mask_sh) \
ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
@@ -174,7 +172,6 @@
type MASTER_COMM_CMD_REG_BYTE2; \
type BL_PWM_REF_DIV; \
type BL_PWM_EN; \
- type UC_IN_RESET; \
type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
type BL_PWM_GRP1_REG_LOCK; \
type BL_PWM_GRP1_REG_UPDATE_PENDING
@@ -206,7 +203,6 @@ struct dce_abm_registers {
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
uint32_t BIOS_SCRATCH_2;
- uint32_t DMCU_STATUS;
uint32_t BL_PWM_GRP1_REG_LOCK;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 9031d22285ea..9e98a5f39a6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -29,7 +29,6 @@
#include "fixed32_32.h"
#include "bios_parser_interface.h"
#include "dc.h"
-#include "dce_abm.h"
#include "dmcu.h"
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn_calcs.h"
@@ -384,7 +383,6 @@ static int dce112_set_clock(
struct bp_set_dce_clock_parameters dce_clk_params;
struct dc_bios *bp = clk->ctx->dc_bios;
struct dc *core_dc = clk->ctx->dc;
- struct abm *abm = core_dc->res_pool->abm;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
int actual_clock = requested_clk_khz;
/* Prepare to program display clock*/
@@ -417,7 +415,7 @@ static int dce112_set_clock(
bp->funcs->set_dce_clock(bp, &dce_clk_params);
- if (abm->funcs->is_dmcu_initialized(abm) && clk_dce->dfs_bypass_disp_clk != actual_clock)
+ if (clk_dce->dfs_bypass_disp_clk != actual_clock)
dmcu->funcs->set_psr_wait_loop(dmcu,
actual_clock / 1000 / 7);
clk_dce->dfs_bypass_disp_clk = actual_clock;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a6de99db0444..f663adb33584 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -263,15 +263,35 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
}
+static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_uc_reset;
+
+ /* microcontroller is not running */
+ REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
+
+ /* DMCU is not running */
+ if (dmcu_uc_reset)
+ return false;
+
+ return true;
+}
+
static void dce_psr_wait_loop(
struct dmcu *dmcu,
unsigned int wait_loop_number)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+
if (dmcu->cached_wait_loop_number == wait_loop_number)
return;
+ /* DMCU is not running */
+ if (!dce_is_dmcu_initialized(dmcu))
+ return;
+
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
@@ -691,6 +711,14 @@ static void dcn10_get_psr_wait_loop(
return;
}
+static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
+{
+ /* microcontroller is not running */
+ if (dmcu->dmcu_state != DMCU_RUNNING)
+ return false;
+ return true;
+}
+
#endif
static const struct dmcu_funcs dce_funcs = {
@@ -700,7 +728,8 @@ static const struct dmcu_funcs dce_funcs = {
.setup_psr = dce_dmcu_setup_psr,
.get_psr_state = dce_get_dmcu_psr_state,
.set_psr_wait_loop = dce_psr_wait_loop,
- .get_psr_wait_loop = dce_get_psr_wait_loop
+ .get_psr_wait_loop = dce_get_psr_wait_loop,
+ .is_dmcu_initialized = dce_is_dmcu_initialized
};
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -711,7 +740,8 @@ static const struct dmcu_funcs dcn10_funcs = {
.setup_psr = dcn10_dmcu_setup_psr,
.get_psr_state = dcn10_get_dmcu_psr_state,
.set_psr_wait_loop = dcn10_psr_wait_loop,
- .get_psr_wait_loop = dcn10_get_psr_wait_loop
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop,
+ .is_dmcu_initialized = dcn10_is_dmcu_initialized
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index 4c25e2dd28f8..1d4546f23135 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -62,6 +62,8 @@
DMCU_ENABLE, mask_sh), \
DMCU_SF(DMCU_STATUS, \
UC_IN_STOP_MODE, mask_sh), \
+ DMCU_SF(DMCU_STATUS, \
+ UC_IN_RESET, mask_sh), \
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
IRAM_HOST_ACCESS_EN, mask_sh), \
DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
@@ -98,6 +100,7 @@
type IRAM_RD_ADDR_AUTO_INC; \
type DMCU_ENABLE; \
type UC_IN_STOP_MODE; \
+ type UC_IN_RESET; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_INTERRUPT; \
type DPHY_RX_FAST_TRAINING_CAPABLE; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 3b0db253ac22..b73db9e78437 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -582,7 +582,8 @@ struct dce_hwseq_registers {
type DOMAIN7_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
- type DENTIST_DPPCLK_WDIVIDER;
+ type DENTIST_DPPCLK_WDIVIDER; \
+ type DENTIST_DISPCLK_WDIVIDER;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index bad70c6b3aad..a266e3f5e75f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -1072,21 +1072,6 @@ void dce110_link_encoder_disable_output(
/* disable encoder */
if (dc_is_dp_signal(signal))
link_encoder_disable(enc110);
-
- /*
- * TODO: Power control cause regression, we should implement
- * it properly, for now just comment it.
- */
-// if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
-// /* power down eDP panel */
-// link_encoder_edp_wait_for_hpd_ready(
-// enc,
-// enc->connector,
-// false);
-//
-// link_encoder_edp_power_control(
-// enc, false);
-// }
}
void dce110_link_encoder_dp_set_lane_settings(
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
index ea40870624b3..a822d4e2a169 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 90911258bdb3..3ea43e2a9450 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -1,5 +1,5 @@
/*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
index de8fdf438f9b..2f366d66635d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
@@ -1,4 +1,27 @@
/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+/*
* dce100_resource.h
*
* Created on: 2016-01-20
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index 98d956e2f218..d564c0eb8b04 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index e650bdcd9423..86cdd7b4811f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -354,8 +354,8 @@ static bool convert_to_custom_float(struct pwl_result_data *rgb_resulted,
return false;
}
- if (!convert_to_custom_float_format(arr_points[2].slope, &fmt,
- &arr_points[2].custom_float_slope)) {
+ if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
+ &arr_points[1].custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -870,8 +870,6 @@ void hwss_edp_power_control(
"%s: Skipping Panel Power action: %s\n",
__func__, (power_up ? "On":"Off"));
}
-
- hwss_edp_wait_for_hpd_ready(link, true);
}
/*todo: cloned in stream enc, fix*/
@@ -972,11 +970,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
}
/* blank at encoder level */
- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
- hwss_edp_backlight_control(link, false);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
- }
+
link->link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
pipe_ctx->stream_res.stream_enc->id,
@@ -988,15 +984,12 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
struct encoder_unblank_param params = { { 0 } };
- struct dc_link *link = pipe_ctx->stream->sink->link;
/* only 3 items below are used by unblank */
params.pixel_clk_khz =
pipe_ctx->stream->timing.pix_clk_khz;
params.link_settings.link_rate = link_settings->link_rate;
pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
- if (link->connector_signal == SIGNAL_TYPE_EDP)
- hwss_edp_backlight_control(link, true);
}
@@ -1342,10 +1335,8 @@ static void power_down_encoders(struct dc *dc)
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
- if (connector_id == CONNECTOR_ID_EDP) {
+ if (connector_id == CONNECTOR_ID_EDP)
signal = SIGNAL_TYPE_EDP;
- hwss_edp_backlight_control(dc->links[i], false);
- }
}
dc->links[i]->link_enc->funcs->disable_output(
@@ -1698,60 +1689,54 @@ static void apply_min_clocks(
/*
* Check if FBC can be enabled
*/
-static enum dc_status validate_fbc(struct dc *dc,
- struct dc_state *context)
+static bool should_enable_fbc(struct dc *dc,
+ struct dc_state *context)
{
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
ASSERT(dc->fbc_compressor);
/* FBC memory should be allocated */
if (!dc->ctx->fbc_gpu_addr)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only supports single display */
if (context->stream_count != 1)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* PSR should not be enabled */
if (pipe_ctx->stream->sink->link->psr_enabled)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Nothing to compress */
if (!pipe_ctx->plane_state)
- return DC_ERROR_UNEXPECTED;
+ return false;
/* Only for non-linear tiling */
if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
- return DC_ERROR_UNEXPECTED;
+ return false;
- return DC_OK;
+ return true;
}
/*
* Enable FBC
*/
-static enum dc_status enable_fbc(struct dc *dc,
- struct dc_state *context)
+static void enable_fbc(struct dc *dc,
+ struct dc_state *context)
{
- enum dc_status status = validate_fbc(dc, context);
-
- if (status == DC_OK) {
+ if (should_enable_fbc(dc, context)) {
/* Program GRPH COMPRESSED ADDRESS and PITCH */
struct compr_addr_and_pitch_params params = {0, 0, 0};
struct compressor *compr = dc->fbc_compressor;
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[0];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
- params.source_view_width =
- pipe_ctx->stream->timing.h_addressable;
- params.source_view_height =
- pipe_ctx->stream->timing.v_addressable;
+ params.source_view_width = pipe_ctx->stream->timing.h_addressable;
+ params.source_view_height = pipe_ctx->stream->timing.v_addressable;
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
@@ -1760,7 +1745,6 @@ static enum dc_status enable_fbc(struct dc *dc,
compr->funcs->enable_fbc(compr, &params);
}
- return status;
}
#endif
@@ -2026,8 +2010,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->stream == pipe_ctx_old->stream)
continue;
- if (pipe_ctx->stream && pipe_ctx_old->stream
- && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
continue;
if (pipe_ctx->top_pipe)
@@ -2063,9 +2046,6 @@ enum dc_status dce110_apply_ctx_to_hw(
context,
dc);
- if (dc->hwss.enable_plane)
- dc->hwss.enable_plane(dc, pipe_ctx, context);
-
if (DC_OK != status)
return status;
}
@@ -2095,16 +2075,8 @@ static void set_default_colors(struct pipe_ctx *pipe_ctx)
struct default_adjustment default_adjust = { 0 };
default_adjust.force_hw_default = false;
- if (pipe_ctx->plane_state == NULL)
- default_adjust.in_color_space = COLOR_SPACE_SRGB;
- else
- default_adjust.in_color_space =
- pipe_ctx->plane_state->color_space;
- if (pipe_ctx->stream == NULL)
- default_adjust.out_color_space = COLOR_SPACE_SRGB;
- else
- default_adjust.out_color_space =
- pipe_ctx->stream->output_color_space;
+ default_adjust.in_color_space = pipe_ctx->plane_state->color_space;
+ default_adjust.out_color_space = pipe_ctx->stream->output_color_space;
default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format;
@@ -2872,13 +2844,12 @@ static void dce110_apply_ctx_for_surface(
continue;
/* Need to allocate mem before program front end for Fiji */
- if (pipe_ctx->plane_res.mi != NULL)
- pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
- pipe_ctx->plane_res.mi,
- pipe_ctx->stream->timing.h_total,
- pipe_ctx->stream->timing.v_total,
- pipe_ctx->stream->timing.pix_clk_khz,
- context->stream_count);
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
+ pipe_ctx->plane_res.mi,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ pipe_ctx->stream->timing.pix_clk_khz,
+ context->stream_count);
dce110_program_front_end_for_pipe(dc, pipe_ctx);
@@ -2985,6 +2956,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.pplib_apply_display_requirements = pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 2dd6ac637572..fc637647f643 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -77,5 +77,9 @@ void hwss_edp_backlight_control(
struct dc_link *link,
bool enable);
+void hwss_edp_wait_for_hpd_ready(
+ struct dc_link *link,
+ bool power_up);
+
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 5228ee78f7e6..7c4779578fb7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -1,5 +1,5 @@
/*
-* Copyright 2012-15 Advanced Micro Devices, Inc.
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
index 07d9303d5477..59b4cd329715 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include "dm_services.h"
/* include DCE11 register header files */
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index 265ac4310d85..8e090446d511 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 1779b963525c..37db1f8d45ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
@@ -8,4 +29,4 @@ dce120_hw_sequencer.o
AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
-AMD_DISPLAY_FILES += $(AMD_DAL_DCE120) \ No newline at end of file
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE120)
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index c1105895e5fa..bc388aa4b2f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index a6ca1f97f748..5469bdfe19f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -1,8 +1,29 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for DCN.
DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
- dcn10_dpp.o dcn10_opp.o dcn10_timing_generator.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 7f579cb19f4b..53ba3600ee6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -22,11 +22,12 @@
* Authors: AMD
*
*/
-
+#include "dc.h"
#include "reg_helper.h"
#include "dcn10_dpp.h"
#include "dcn10_cm_common.h"
+#include "custom_float.h"
#define REG(reg) reg
@@ -121,3 +122,294 @@ void cm_helper_program_xfer_func(
}
}
+
+
+
+bool cm_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num,
+ bool fixpoint)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (fixpoint == true)
+ arr_points[1].custom_float_y = dal_fixed31_32_clamp_u0d14(arr_points[1].y);
+ else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
+ &arr_points[1].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (hw_points_num == 0 || rgb_resulted == NULL || fixpoint == true)
+ return true;
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(rgb->red, &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->green, &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->blue, &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_SEGMENTS 32
+
+bool cm_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ PERF_TRACE();
+
+ arr_points = lut_params->arr_points;
+ rgb_resulted = lut_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(lut_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < 32 ; i++)
+ seg_distr[i] = 3;
+
+ segment_start = -25;
+ segment_end = 7;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+
+ segment_start = -10;
+ segment_end = 0;
+ }
+
+ for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ lut_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1) {
+ lut_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ lut_params->arr_curve_points[i].offset =
+ lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ lut_params->arr_curve_points[k].segments_num = seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
+
+ if (fixpoint == true) {
+ rgb->delta_red_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_red);
+ rgb->delta_green_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_green);
+ rgb->delta_blue_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_blue);
+ rgb->red_reg = dal_fixed31_32_clamp_u0d14(rgb->red);
+ rgb->green_reg = dal_fixed31_32_clamp_u0d14(rgb->green);
+ rgb->blue_reg = dal_fixed31_32_clamp_u0d14(rgb->blue);
+ }
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+ cm_helper_convert_to_custom_float(rgb_resulted,
+ lut_params->arr_points,
+ hw_points, fixpoint);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 64836dcf21f2..64e476b83bcb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -96,4 +96,14 @@ void cm_helper_program_xfer_func(
const struct pwl_params *params,
const struct xfer_func_reg *reg);
+bool cm_helper_convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num,
+ bool fixpoint);
+
+bool cm_helper_translate_curve_to_hw_format(
+ const struct dc_transfer_func *output_tf,
+ struct pwl_params *lut_params, bool fixpoint);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 8df3945370cf..f2a08b156cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1;
- /*
- * Spreadsheet doesn't handle taps_c is one properly,
- * need to force Chroma to always be scaled to pass
- * bandwidth validation.
- */
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
}
return true;
@@ -386,10 +385,9 @@ void dpp1_cnv_setup (
void dpp1_set_cursor_attributes(
struct dpp *dpp_base,
- const struct dc_cursor_attributes *attr)
+ enum dc_cursor_color_format color_format)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- enum dc_cursor_color_format color_format = attr->color_format;
REG_UPDATE_2(CURSOR0_CONTROL,
CUR0_MODE, color_format,
@@ -402,13 +400,6 @@ void dpp1_set_cursor_attributes(
REG_UPDATE(CURSOR0_COLOR1,
CUR0_COLOR1, 0xFFFFFFFF);
}
-
- /* TODO: Fixed vs float */
-
- REG_UPDATE_3(FORMAT_CONTROL,
- CNVC_BYPASS, 0,
- FORMAT_CONTROL__ALPHA_EN, 1,
- FORMAT_EXPANSION_MODE, 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
index ad71fb50f8a5..f56ee4d08d89 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -730,8 +730,9 @@
type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_BLNDGAM_LUT_WRITE_EN_MASK; \
type CM_BLNDGAM_LUT_WRITE_SEL; \
+ type CM_BLNDGAM_CONFIG_STATUS; \
type CM_BLNDGAM_LUT_INDEX; \
- type CM_BLNDGAM_LUT_DATA; \
+ type BLNDGAM_MEM_PWR_FORCE; \
type CM_3DLUT_MODE; \
type CM_3DLUT_SIZE; \
type CM_3DLUT_INDEX; \
@@ -905,6 +906,7 @@
type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \
type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \
type CM_SHAPER_LUT_WRITE_EN_MASK; \
+ type CM_SHAPER_CONFIG_STATUS; \
type CM_SHAPER_LUT_WRITE_SEL; \
type CM_SHAPER_LUT_INDEX; \
type CM_SHAPER_LUT_DATA; \
@@ -1005,258 +1007,255 @@
type CM_BYPASS; \
type FORMAT_CONTROL__ALPHA_EN; \
type CUR0_COLOR0; \
- type CUR0_COLOR1
-
-
+ type CUR0_COLOR1;
struct dcn_dpp_shift {
- TF_REG_FIELD_LIST(uint8_t);
+ TF_REG_FIELD_LIST(uint8_t)
};
struct dcn_dpp_mask {
- TF_REG_FIELD_LIST(uint32_t);
+ TF_REG_FIELD_LIST(uint32_t)
};
-
-
+#define DPP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT; \
+ uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM; \
+ uint32_t OTG_H_BLANK; \
+ uint32_t OTG_V_BLANK; \
+ uint32_t SCL_MODE; \
+ uint32_t LB_DATA_FORMAT; \
+ uint32_t LB_MEMORY_CTRL; \
+ uint32_t DSCL_AUTOCAL; \
+ uint32_t SCL_BLACK_OFFSET; \
+ uint32_t SCL_TAP_CONTROL; \
+ uint32_t SCL_COEF_RAM_TAP_SELECT; \
+ uint32_t SCL_COEF_RAM_TAP_DATA; \
+ uint32_t DSCL_2TAP_CONTROL; \
+ uint32_t MPC_SIZE; \
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO; \
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO; \
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C; \
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO_C; \
+ uint32_t SCL_HORZ_FILTER_INIT; \
+ uint32_t SCL_HORZ_FILTER_INIT_C; \
+ uint32_t SCL_VERT_FILTER_INIT; \
+ uint32_t SCL_VERT_FILTER_INIT_BOT; \
+ uint32_t SCL_VERT_FILTER_INIT_C; \
+ uint32_t SCL_VERT_FILTER_INIT_BOT_C; \
+ uint32_t RECOUT_START; \
+ uint32_t RECOUT_SIZE; \
+ uint32_t CM_GAMUT_REMAP_CONTROL; \
+ uint32_t CM_GAMUT_REMAP_C11_C12; \
+ uint32_t CM_GAMUT_REMAP_C33_C34; \
+ uint32_t CM_COMA_C11_C12; \
+ uint32_t CM_COMA_C33_C34; \
+ uint32_t CM_COMB_C11_C12; \
+ uint32_t CM_COMB_C33_C34; \
+ uint32_t CM_OCSC_CONTROL; \
+ uint32_t CM_OCSC_C11_C12; \
+ uint32_t CM_OCSC_C33_C34; \
+ uint32_t CM_MEM_PWR_CTRL; \
+ uint32_t CM_RGAM_LUT_DATA; \
+ uint32_t CM_RGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_RGAM_LUT_INDEX; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_RGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_RGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_RGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_RGAM_RAMB_REGION_0_1; \
+ uint32_t CM_RGAM_RAMB_REGION_32_33; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_RGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_RGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_RGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_RGAM_RAMA_REGION_0_1; \
+ uint32_t CM_RGAM_RAMA_REGION_32_33; \
+ uint32_t CM_RGAM_CONTROL; \
+ uint32_t CM_CMOUT_CONTROL; \
+ uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_BLNDGAM_CONTROL; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_0_1; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_2_3; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_4_5; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_6_7; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_8_9; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_10_11; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_12_13; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_14_15; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_16_17; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_18_19; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_20_21; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_22_23; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_24_25; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_26_27; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_28_29; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_30_31; \
+ uint32_t CM_BLNDGAM_RAMB_REGION_32_33; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_0_1; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_2_3; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_4_5; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_6_7; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_8_9; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_10_11; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_12_13; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_14_15; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_16_17; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_18_19; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_20_21; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_22_23; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_24_25; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_26_27; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_28_29; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_30_31; \
+ uint32_t CM_BLNDGAM_RAMA_REGION_32_33; \
+ uint32_t CM_BLNDGAM_LUT_INDEX; \
+ uint32_t CM_3DLUT_MODE; \
+ uint32_t CM_3DLUT_INDEX; \
+ uint32_t CM_3DLUT_DATA; \
+ uint32_t CM_3DLUT_DATA_30BIT; \
+ uint32_t CM_3DLUT_READ_WRITE_CONTROL; \
+ uint32_t CM_SHAPER_LUT_WRITE_EN_MASK; \
+ uint32_t CM_SHAPER_CONTROL; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_B; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_G; \
+ uint32_t CM_SHAPER_RAMB_START_CNTL_R; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_B; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_G; \
+ uint32_t CM_SHAPER_RAMB_END_CNTL_R; \
+ uint32_t CM_SHAPER_RAMB_REGION_0_1; \
+ uint32_t CM_SHAPER_RAMB_REGION_2_3; \
+ uint32_t CM_SHAPER_RAMB_REGION_4_5; \
+ uint32_t CM_SHAPER_RAMB_REGION_6_7; \
+ uint32_t CM_SHAPER_RAMB_REGION_8_9; \
+ uint32_t CM_SHAPER_RAMB_REGION_10_11; \
+ uint32_t CM_SHAPER_RAMB_REGION_12_13; \
+ uint32_t CM_SHAPER_RAMB_REGION_14_15; \
+ uint32_t CM_SHAPER_RAMB_REGION_16_17; \
+ uint32_t CM_SHAPER_RAMB_REGION_18_19; \
+ uint32_t CM_SHAPER_RAMB_REGION_20_21; \
+ uint32_t CM_SHAPER_RAMB_REGION_22_23; \
+ uint32_t CM_SHAPER_RAMB_REGION_24_25; \
+ uint32_t CM_SHAPER_RAMB_REGION_26_27; \
+ uint32_t CM_SHAPER_RAMB_REGION_28_29; \
+ uint32_t CM_SHAPER_RAMB_REGION_30_31; \
+ uint32_t CM_SHAPER_RAMB_REGION_32_33; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_B; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_G; \
+ uint32_t CM_SHAPER_RAMA_START_CNTL_R; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_B; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_G; \
+ uint32_t CM_SHAPER_RAMA_END_CNTL_R; \
+ uint32_t CM_SHAPER_RAMA_REGION_0_1; \
+ uint32_t CM_SHAPER_RAMA_REGION_2_3; \
+ uint32_t CM_SHAPER_RAMA_REGION_4_5; \
+ uint32_t CM_SHAPER_RAMA_REGION_6_7; \
+ uint32_t CM_SHAPER_RAMA_REGION_8_9; \
+ uint32_t CM_SHAPER_RAMA_REGION_10_11; \
+ uint32_t CM_SHAPER_RAMA_REGION_12_13; \
+ uint32_t CM_SHAPER_RAMA_REGION_14_15; \
+ uint32_t CM_SHAPER_RAMA_REGION_16_17; \
+ uint32_t CM_SHAPER_RAMA_REGION_18_19; \
+ uint32_t CM_SHAPER_RAMA_REGION_20_21; \
+ uint32_t CM_SHAPER_RAMA_REGION_22_23; \
+ uint32_t CM_SHAPER_RAMA_REGION_24_25; \
+ uint32_t CM_SHAPER_RAMA_REGION_26_27; \
+ uint32_t CM_SHAPER_RAMA_REGION_28_29; \
+ uint32_t CM_SHAPER_RAMA_REGION_30_31; \
+ uint32_t CM_SHAPER_RAMA_REGION_32_33; \
+ uint32_t CM_SHAPER_LUT_INDEX; \
+ uint32_t CM_SHAPER_LUT_DATA; \
+ uint32_t CM_ICSC_CONTROL; \
+ uint32_t CM_ICSC_C11_C12; \
+ uint32_t CM_ICSC_C33_C34; \
+ uint32_t CM_BNS_VALUES_R; \
+ uint32_t CM_BNS_VALUES_G; \
+ uint32_t CM_BNS_VALUES_B; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_B; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_G; \
+ uint32_t CM_DGAM_RAMB_START_CNTL_R; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G; \
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_B; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_B; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_G; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_G; \
+ uint32_t CM_DGAM_RAMB_END_CNTL1_R; \
+ uint32_t CM_DGAM_RAMB_END_CNTL2_R; \
+ uint32_t CM_DGAM_RAMB_REGION_0_1; \
+ uint32_t CM_DGAM_RAMB_REGION_14_15; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_B; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_G; \
+ uint32_t CM_DGAM_RAMA_START_CNTL_R; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G; \
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_B; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_B; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_G; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_G; \
+ uint32_t CM_DGAM_RAMA_END_CNTL1_R; \
+ uint32_t CM_DGAM_RAMA_END_CNTL2_R; \
+ uint32_t CM_DGAM_RAMA_REGION_0_1; \
+ uint32_t CM_DGAM_RAMA_REGION_14_15; \
+ uint32_t CM_DGAM_LUT_WRITE_EN_MASK; \
+ uint32_t CM_DGAM_LUT_INDEX; \
+ uint32_t CM_DGAM_LUT_DATA; \
+ uint32_t CM_CONTROL; \
+ uint32_t CM_DGAM_CONTROL; \
+ uint32_t CM_IGAM_CONTROL; \
+ uint32_t CM_IGAM_LUT_RW_CONTROL; \
+ uint32_t CM_IGAM_LUT_RW_INDEX; \
+ uint32_t CM_IGAM_LUT_SEQ_COLOR; \
+ uint32_t FORMAT_CONTROL; \
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT; \
+ uint32_t CURSOR_CONTROL; \
+ uint32_t CURSOR0_CONTROL; \
+ uint32_t CURSOR0_COLOR0; \
+ uint32_t CURSOR0_COLOR1;
struct dcn_dpp_registers {
- uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT;
- uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM;
- uint32_t OTG_H_BLANK;
- uint32_t OTG_V_BLANK;
- uint32_t SCL_MODE;
- uint32_t LB_DATA_FORMAT;
- uint32_t LB_MEMORY_CTRL;
- uint32_t DSCL_AUTOCAL;
- uint32_t SCL_BLACK_OFFSET;
- uint32_t SCL_TAP_CONTROL;
- uint32_t SCL_COEF_RAM_TAP_SELECT;
- uint32_t SCL_COEF_RAM_TAP_DATA;
- uint32_t DSCL_2TAP_CONTROL;
- uint32_t MPC_SIZE;
- uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
- uint32_t SCL_VERT_FILTER_SCALE_RATIO;
- uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C;
- uint32_t SCL_VERT_FILTER_SCALE_RATIO_C;
- uint32_t SCL_HORZ_FILTER_INIT;
- uint32_t SCL_HORZ_FILTER_INIT_C;
- uint32_t SCL_VERT_FILTER_INIT;
- uint32_t SCL_VERT_FILTER_INIT_BOT;
- uint32_t SCL_VERT_FILTER_INIT_C;
- uint32_t SCL_VERT_FILTER_INIT_BOT_C;
- uint32_t RECOUT_START;
- uint32_t RECOUT_SIZE;
- uint32_t CM_GAMUT_REMAP_CONTROL;
- uint32_t CM_GAMUT_REMAP_C11_C12;
- uint32_t CM_GAMUT_REMAP_C33_C34;
- uint32_t CM_COMA_C11_C12;
- uint32_t CM_COMA_C33_C34;
- uint32_t CM_COMB_C11_C12;
- uint32_t CM_COMB_C33_C34;
- uint32_t CM_OCSC_CONTROL;
- uint32_t CM_OCSC_C11_C12;
- uint32_t CM_OCSC_C33_C34;
- uint32_t CM_MEM_PWR_CTRL;
- uint32_t CM_RGAM_LUT_DATA;
- uint32_t CM_RGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_RGAM_LUT_INDEX;
- uint32_t CM_RGAM_RAMB_START_CNTL_B;
- uint32_t CM_RGAM_RAMB_START_CNTL_G;
- uint32_t CM_RGAM_RAMB_START_CNTL_R;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_RGAM_RAMB_END_CNTL1_B;
- uint32_t CM_RGAM_RAMB_END_CNTL2_B;
- uint32_t CM_RGAM_RAMB_END_CNTL1_G;
- uint32_t CM_RGAM_RAMB_END_CNTL2_G;
- uint32_t CM_RGAM_RAMB_END_CNTL1_R;
- uint32_t CM_RGAM_RAMB_END_CNTL2_R;
- uint32_t CM_RGAM_RAMB_REGION_0_1;
- uint32_t CM_RGAM_RAMB_REGION_32_33;
- uint32_t CM_RGAM_RAMA_START_CNTL_B;
- uint32_t CM_RGAM_RAMA_START_CNTL_G;
- uint32_t CM_RGAM_RAMA_START_CNTL_R;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_RGAM_RAMA_END_CNTL1_B;
- uint32_t CM_RGAM_RAMA_END_CNTL2_B;
- uint32_t CM_RGAM_RAMA_END_CNTL1_G;
- uint32_t CM_RGAM_RAMA_END_CNTL2_G;
- uint32_t CM_RGAM_RAMA_END_CNTL1_R;
- uint32_t CM_RGAM_RAMA_END_CNTL2_R;
- uint32_t CM_RGAM_RAMA_REGION_0_1;
- uint32_t CM_RGAM_RAMA_REGION_32_33;
- uint32_t CM_RGAM_CONTROL;
- uint32_t CM_CMOUT_CONTROL;
- uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_BLNDGAM_CONTROL;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_B;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_G;
- uint32_t CM_BLNDGAM_RAMB_START_CNTL_R;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R;
- uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R;
- uint32_t CM_BLNDGAM_RAMB_REGION_0_1;
- uint32_t CM_BLNDGAM_RAMB_REGION_2_3;
- uint32_t CM_BLNDGAM_RAMB_REGION_4_5;
- uint32_t CM_BLNDGAM_RAMB_REGION_6_7;
- uint32_t CM_BLNDGAM_RAMB_REGION_8_9;
- uint32_t CM_BLNDGAM_RAMB_REGION_10_11;
- uint32_t CM_BLNDGAM_RAMB_REGION_12_13;
- uint32_t CM_BLNDGAM_RAMB_REGION_14_15;
- uint32_t CM_BLNDGAM_RAMB_REGION_16_17;
- uint32_t CM_BLNDGAM_RAMB_REGION_18_19;
- uint32_t CM_BLNDGAM_RAMB_REGION_20_21;
- uint32_t CM_BLNDGAM_RAMB_REGION_22_23;
- uint32_t CM_BLNDGAM_RAMB_REGION_24_25;
- uint32_t CM_BLNDGAM_RAMB_REGION_26_27;
- uint32_t CM_BLNDGAM_RAMB_REGION_28_29;
- uint32_t CM_BLNDGAM_RAMB_REGION_30_31;
- uint32_t CM_BLNDGAM_RAMB_REGION_32_33;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_B;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_G;
- uint32_t CM_BLNDGAM_RAMA_START_CNTL_R;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R;
- uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R;
- uint32_t CM_BLNDGAM_RAMA_REGION_0_1;
- uint32_t CM_BLNDGAM_RAMA_REGION_2_3;
- uint32_t CM_BLNDGAM_RAMA_REGION_4_5;
- uint32_t CM_BLNDGAM_RAMA_REGION_6_7;
- uint32_t CM_BLNDGAM_RAMA_REGION_8_9;
- uint32_t CM_BLNDGAM_RAMA_REGION_10_11;
- uint32_t CM_BLNDGAM_RAMA_REGION_12_13;
- uint32_t CM_BLNDGAM_RAMA_REGION_14_15;
- uint32_t CM_BLNDGAM_RAMA_REGION_16_17;
- uint32_t CM_BLNDGAM_RAMA_REGION_18_19;
- uint32_t CM_BLNDGAM_RAMA_REGION_20_21;
- uint32_t CM_BLNDGAM_RAMA_REGION_22_23;
- uint32_t CM_BLNDGAM_RAMA_REGION_24_25;
- uint32_t CM_BLNDGAM_RAMA_REGION_26_27;
- uint32_t CM_BLNDGAM_RAMA_REGION_28_29;
- uint32_t CM_BLNDGAM_RAMA_REGION_30_31;
- uint32_t CM_BLNDGAM_RAMA_REGION_32_33;
- uint32_t CM_BLNDGAM_LUT_INDEX;
- uint32_t CM_BLNDGAM_LUT_DATA;
- uint32_t CM_3DLUT_MODE;
- uint32_t CM_3DLUT_INDEX;
- uint32_t CM_3DLUT_DATA;
- uint32_t CM_3DLUT_DATA_30BIT;
- uint32_t CM_3DLUT_READ_WRITE_CONTROL;
- uint32_t CM_SHAPER_LUT_WRITE_EN_MASK;
- uint32_t CM_SHAPER_CONTROL;
- uint32_t CM_SHAPER_RAMB_START_CNTL_B;
- uint32_t CM_SHAPER_RAMB_START_CNTL_G;
- uint32_t CM_SHAPER_RAMB_START_CNTL_R;
- uint32_t CM_SHAPER_RAMB_END_CNTL_B;
- uint32_t CM_SHAPER_RAMB_END_CNTL_G;
- uint32_t CM_SHAPER_RAMB_END_CNTL_R;
- uint32_t CM_SHAPER_RAMB_REGION_0_1;
- uint32_t CM_SHAPER_RAMB_REGION_2_3;
- uint32_t CM_SHAPER_RAMB_REGION_4_5;
- uint32_t CM_SHAPER_RAMB_REGION_6_7;
- uint32_t CM_SHAPER_RAMB_REGION_8_9;
- uint32_t CM_SHAPER_RAMB_REGION_10_11;
- uint32_t CM_SHAPER_RAMB_REGION_12_13;
- uint32_t CM_SHAPER_RAMB_REGION_14_15;
- uint32_t CM_SHAPER_RAMB_REGION_16_17;
- uint32_t CM_SHAPER_RAMB_REGION_18_19;
- uint32_t CM_SHAPER_RAMB_REGION_20_21;
- uint32_t CM_SHAPER_RAMB_REGION_22_23;
- uint32_t CM_SHAPER_RAMB_REGION_24_25;
- uint32_t CM_SHAPER_RAMB_REGION_26_27;
- uint32_t CM_SHAPER_RAMB_REGION_28_29;
- uint32_t CM_SHAPER_RAMB_REGION_30_31;
- uint32_t CM_SHAPER_RAMB_REGION_32_33;
- uint32_t CM_SHAPER_RAMA_START_CNTL_B;
- uint32_t CM_SHAPER_RAMA_START_CNTL_G;
- uint32_t CM_SHAPER_RAMA_START_CNTL_R;
- uint32_t CM_SHAPER_RAMA_END_CNTL_B;
- uint32_t CM_SHAPER_RAMA_END_CNTL_G;
- uint32_t CM_SHAPER_RAMA_END_CNTL_R;
- uint32_t CM_SHAPER_RAMA_REGION_0_1;
- uint32_t CM_SHAPER_RAMA_REGION_2_3;
- uint32_t CM_SHAPER_RAMA_REGION_4_5;
- uint32_t CM_SHAPER_RAMA_REGION_6_7;
- uint32_t CM_SHAPER_RAMA_REGION_8_9;
- uint32_t CM_SHAPER_RAMA_REGION_10_11;
- uint32_t CM_SHAPER_RAMA_REGION_12_13;
- uint32_t CM_SHAPER_RAMA_REGION_14_15;
- uint32_t CM_SHAPER_RAMA_REGION_16_17;
- uint32_t CM_SHAPER_RAMA_REGION_18_19;
- uint32_t CM_SHAPER_RAMA_REGION_20_21;
- uint32_t CM_SHAPER_RAMA_REGION_22_23;
- uint32_t CM_SHAPER_RAMA_REGION_24_25;
- uint32_t CM_SHAPER_RAMA_REGION_26_27;
- uint32_t CM_SHAPER_RAMA_REGION_28_29;
- uint32_t CM_SHAPER_RAMA_REGION_30_31;
- uint32_t CM_SHAPER_RAMA_REGION_32_33;
- uint32_t CM_SHAPER_LUT_INDEX;
- uint32_t CM_SHAPER_LUT_DATA;
- uint32_t CM_ICSC_CONTROL;
- uint32_t CM_ICSC_C11_C12;
- uint32_t CM_ICSC_C33_C34;
- uint32_t CM_BNS_VALUES_R;
- uint32_t CM_BNS_VALUES_G;
- uint32_t CM_BNS_VALUES_B;
- uint32_t CM_DGAM_RAMB_START_CNTL_B;
- uint32_t CM_DGAM_RAMB_START_CNTL_G;
- uint32_t CM_DGAM_RAMB_START_CNTL_R;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G;
- uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R;
- uint32_t CM_DGAM_RAMB_END_CNTL1_B;
- uint32_t CM_DGAM_RAMB_END_CNTL2_B;
- uint32_t CM_DGAM_RAMB_END_CNTL1_G;
- uint32_t CM_DGAM_RAMB_END_CNTL2_G;
- uint32_t CM_DGAM_RAMB_END_CNTL1_R;
- uint32_t CM_DGAM_RAMB_END_CNTL2_R;
- uint32_t CM_DGAM_RAMB_REGION_0_1;
- uint32_t CM_DGAM_RAMB_REGION_14_15;
- uint32_t CM_DGAM_RAMA_START_CNTL_B;
- uint32_t CM_DGAM_RAMA_START_CNTL_G;
- uint32_t CM_DGAM_RAMA_START_CNTL_R;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G;
- uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R;
- uint32_t CM_DGAM_RAMA_END_CNTL1_B;
- uint32_t CM_DGAM_RAMA_END_CNTL2_B;
- uint32_t CM_DGAM_RAMA_END_CNTL1_G;
- uint32_t CM_DGAM_RAMA_END_CNTL2_G;
- uint32_t CM_DGAM_RAMA_END_CNTL1_R;
- uint32_t CM_DGAM_RAMA_END_CNTL2_R;
- uint32_t CM_DGAM_RAMA_REGION_0_1;
- uint32_t CM_DGAM_RAMA_REGION_14_15;
- uint32_t CM_DGAM_LUT_WRITE_EN_MASK;
- uint32_t CM_DGAM_LUT_INDEX;
- uint32_t CM_DGAM_LUT_DATA;
- uint32_t CM_CONTROL;
- uint32_t CM_DGAM_CONTROL;
- uint32_t CM_IGAM_CONTROL;
- uint32_t CM_IGAM_LUT_RW_CONTROL;
- uint32_t CM_IGAM_LUT_RW_INDEX;
- uint32_t CM_IGAM_LUT_SEQ_COLOR;
- uint32_t FORMAT_CONTROL;
- uint32_t CNVC_SURFACE_PIXEL_FORMAT;
- uint32_t CURSOR_CONTROL;
- uint32_t CURSOR0_CONTROL;
- uint32_t CURSOR0_COLOR0;
- uint32_t CURSOR0_COLOR1;
+ DPP_COMMON_REG_VARIABLE_LIST
};
struct dcn10_dpp {
@@ -1284,6 +1283,10 @@ enum dcn10_input_csc_select {
INPUT_CSC_SELECT_COMA
};
+void dpp1_set_cursor_attributes(
+ struct dpp *dpp_base,
+ enum dc_cursor_color_format color_format);
+
bool dpp1_dscl_is_lb_conf_valid(
int ceil_vratio,
int num_partitions,
@@ -1371,7 +1374,7 @@ void dpp1_cm_program_regamma_lutb_settings(
const struct pwl_params *params);
void dpp1_cm_set_output_csc_adjustment(
struct dpp *dpp_base,
- const struct out_csc_color_matrix *tbl_entry);
+ const uint16_t *regval);
void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 4c90043e7b8c..a5b099023652 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -49,6 +49,8 @@
#define FN(reg_name, field_name) \
dpp->tf_shift->field_name, dpp->tf_mask->field_name
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
struct dcn10_input_csc_matrix {
enum dc_color_space color_space;
uint16_t regval[12];
@@ -223,18 +225,18 @@ void dpp1_cm_set_gamut_remap(
static void dpp1_cm_program_color_matrix(
struct dcn10_dpp *dpp,
- const struct out_csc_color_matrix *tbl_entry)
+ const uint16_t *regval)
{
uint32_t mode;
struct color_matrices_reg gam_regs;
REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
- if (tbl_entry == NULL) {
+ if (regval == NULL) {
BREAK_TO_DEBUGGER();
return;
}
-
+ mode = 4;
gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
@@ -247,7 +249,7 @@ static void dpp1_cm_program_color_matrix(
cm_helper_program_color_matrices(
dpp->base.ctx,
- tbl_entry->regval,
+ regval,
&gam_regs);
} else {
@@ -257,7 +259,7 @@ static void dpp1_cm_program_color_matrix(
cm_helper_program_color_matrices(
dpp->base.ctx,
- tbl_entry->regval,
+ regval,
&gam_regs);
}
}
@@ -266,24 +268,18 @@ void dpp1_cm_set_output_csc_default(
struct dpp *dpp_base,
enum dc_color_space colorspace)
{
-
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- struct out_csc_color_matrix tbl_entry;
- int i, j;
- int arr_size = sizeof(output_csc_matrix) / sizeof(struct output_csc_matrix);
+ const uint16_t *regval = NULL;
+ int arr_size;
uint32_t ocsc_mode = 4;
- tbl_entry.color_space = colorspace;
-
- for (i = 0; i < arr_size; i++)
- if (output_csc_matrix[i].color_space == colorspace) {
- for (j = 0; j < 12; j++)
- tbl_entry.regval[j] = output_csc_matrix[i].regval[j];
- break;
- }
-
+ regval = find_color_matrix(colorspace, &arr_size);
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ dpp1_cm_program_color_matrix(dpp, regval);
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
- dpp1_cm_program_color_matrix(dpp, &tbl_entry);
}
static void dpp1_cm_get_reg_field(
@@ -315,41 +311,12 @@ static void dpp1_cm_get_reg_field(
void dpp1_cm_set_output_csc_adjustment(
struct dpp *dpp_base,
- const struct out_csc_color_matrix *tbl_entry)
+ const uint16_t *regval)
{
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
- //enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
uint32_t ocsc_mode = 4;
-
- /**
- *if (tbl_entry != NULL) {
- * switch (tbl_entry->color_space) {
- * case COLOR_SPACE_SRGB:
- * case COLOR_SPACE_2020_RGB_FULLRANGE:
- * ocsc_mode = 0;
- * break;
- * case COLOR_SPACE_SRGB_LIMITED:
- * case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
- * ocsc_mode = 1;
- * break;
- * case COLOR_SPACE_YCBCR601:
- * case COLOR_SPACE_YCBCR601_LIMITED:
- * ocsc_mode = 2;
- * break;
- * case COLOR_SPACE_YCBCR709:
- * case COLOR_SPACE_YCBCR709_LIMITED:
- * case COLOR_SPACE_2020_YCBCR:
- * ocsc_mode = 3;
- * break;
- * case COLOR_SPACE_UNKNOWN:
- * default:
- * break;
- * }
- *}
- */
-
+ dpp1_cm_program_color_matrix(dpp, regval);
REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
- dpp1_cm_program_color_matrix(dpp, tbl_entry);
}
void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 584e82cc5df3..585b33384002 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -48,9 +48,20 @@ void hubp1_set_blank(struct hubp *hubp, bool blank)
HUBP_TTU_DISABLE, blank_en);
if (blank) {
- REG_WAIT(DCHUBP_CNTL,
- HUBP_NO_OUTSTANDING_REQ, 1,
- 1, 200);
+ uint32_t reg_val = REG_READ(DCHUBP_CNTL);
+
+ if (reg_val) {
+ /* init sequence workaround: in case HUBP is
+ * power gated, this wait would timeout.
+ *
+ * we just wrote reg_val to non-0, if it stay 0
+ * it means HUBP is gated
+ */
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 200);
+ }
+
hubp->mpcc_id = 0xf;
hubp->opp_id = 0xf;
}
@@ -96,10 +107,12 @@ static void hubp1_vready_workaround(struct hubp *hubp,
}
void hubp1_program_tiling(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
REG_UPDATE_6(DCSURF_ADDR_CONFIG,
NUM_PIPES, log_2(info->gfx9.num_pipes),
NUM_BANKS, log_2(info->gfx9.num_banks),
@@ -116,13 +129,14 @@ void hubp1_program_tiling(
}
void hubp1_program_size_and_rotation(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
/* Program data and meta surface pitch (calculation from addrlib)
@@ -178,9 +192,10 @@ void hubp1_program_size_and_rotation(
}
void hubp1_program_pixel_format(
- struct dcn10_hubp *hubp1,
+ struct hubp *hubp,
enum surface_pixel_format format)
{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
uint32_t red_bar = 3;
uint32_t blue_bar = 2;
@@ -424,13 +439,11 @@ void hubp1_program_surface_config(
struct dc_plane_dcc_param *dcc,
bool horizontal_mirror)
{
- struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
-
hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
- hubp1_program_tiling(hubp1, tiling_info, format);
+ hubp1_program_tiling(hubp, tiling_info, format);
hubp1_program_size_and_rotation(
- hubp1, rotation, format, plane_size, dcc, horizontal_mirror);
- hubp1_program_pixel_format(hubp1, format);
+ hubp, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_pixel_format(hubp, format);
}
void hubp1_program_requestor(
@@ -765,42 +778,7 @@ void hubp1_read_state(struct dcn10_hubp *hubp1,
QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
}
-enum cursor_pitch {
- CURSOR_PITCH_64_PIXELS = 0,
- CURSOR_PITCH_128_PIXELS,
- CURSOR_PITCH_256_PIXELS
-};
-
-enum cursor_lines_per_chunk {
- CURSOR_LINE_PER_CHUNK_2 = 1,
- CURSOR_LINE_PER_CHUNK_4,
- CURSOR_LINE_PER_CHUNK_8,
- CURSOR_LINE_PER_CHUNK_16
-};
-
-static bool ippn10_cursor_program_control(
- struct dcn10_hubp *hubp1,
- bool pixel_data_invert,
- enum dc_cursor_color_format color_format)
-{
- if (REG(CURSOR_SETTINS))
- REG_SET_2(CURSOR_SETTINS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
- else
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
-
- return true;
-}
-
-static enum cursor_pitch ippn10_get_cursor_pitch(
- unsigned int pitch)
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch)
{
enum cursor_pitch hw_pitch;
@@ -823,7 +801,7 @@ static enum cursor_pitch ippn10_get_cursor_pitch(
return hw_pitch;
}
-static enum cursor_lines_per_chunk ippn10_get_lines_per_chunk(
+static enum cursor_lines_per_chunk hubp1_get_lines_per_chunk(
unsigned int cur_width,
enum dc_cursor_color_format format)
{
@@ -849,8 +827,8 @@ void hubp1_cursor_set_attributes(
const struct dc_cursor_attributes *attr)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- enum cursor_pitch hw_pitch = ippn10_get_cursor_pitch(attr->pitch);
- enum cursor_lines_per_chunk lpc = ippn10_get_lines_per_chunk(
+ enum cursor_pitch hw_pitch = hubp1_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = hubp1_get_lines_per_chunk(
attr->width, attr->color_format);
hubp->curs_attr = *attr;
@@ -863,13 +841,17 @@ void hubp1_cursor_set_attributes(
REG_UPDATE_2(CURSOR_SIZE,
CURSOR_WIDTH, attr->width,
CURSOR_HEIGHT, attr->height);
+
REG_UPDATE_3(CURSOR_CONTROL,
CURSOR_MODE, attr->color_format,
CURSOR_PITCH, hw_pitch,
CURSOR_LINES_PER_CHUNK, lpc);
- ippn10_cursor_program_control(hubp1,
- attr->attribute_flags.bits.INVERT_PIXEL_DATA,
- attr->color_format);
+
+ REG_SET_2(CURSOR_SETTINS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
}
void hubp1_cursor_set_position(
@@ -909,7 +891,8 @@ void hubp1_cursor_set_position(
cur_en = 0; /* not visible beyond left edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
- hubp1_cursor_set_attributes(hubp, &hubp->curs_attr);
+ hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
+
REG_UPDATE(CURSOR_CONTROL,
CURSOR_ENABLE, cur_en);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a7834dd50716..33e91d9c010f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -127,113 +127,110 @@
SRI(CURSOR_HOT_SPOT, CURSOR, id), \
SRI(CURSOR_DST_OFFSET, CURSOR, id)
-
-
-struct dcn_mi_registers {
- uint32_t DCHUBP_CNTL;
- uint32_t HUBPREQ_DEBUG_DB;
- uint32_t DCSURF_ADDR_CONFIG;
- uint32_t DCSURF_TILING_CONFIG;
- uint32_t DCSURF_SURFACE_PITCH;
- uint32_t DCSURF_SURFACE_PITCH_C;
- uint32_t DCSURF_SURFACE_CONFIG;
- uint32_t DCSURF_FLIP_CONTROL;
- uint32_t DCSURF_PRI_VIEWPORT_DIMENSION;
- uint32_t DCSURF_PRI_VIEWPORT_START;
- uint32_t DCSURF_SEC_VIEWPORT_DIMENSION;
- uint32_t DCSURF_SEC_VIEWPORT_START;
- uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C;
- uint32_t DCSURF_PRI_VIEWPORT_START_C;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
- uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
- uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
- uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
- uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
- uint32_t DCSURF_SURFACE_INUSE;
- uint32_t DCSURF_SURFACE_INUSE_HIGH;
- uint32_t DCSURF_SURFACE_INUSE_C;
- uint32_t DCSURF_SURFACE_INUSE_HIGH_C;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C;
- uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C;
- uint32_t DCSURF_SURFACE_CONTROL;
- uint32_t HUBPRET_CONTROL;
- uint32_t DCN_EXPANSION_MODE;
- uint32_t DCHUBP_REQ_SIZE_CONFIG;
- uint32_t DCHUBP_REQ_SIZE_CONFIG_C;
- uint32_t BLANK_OFFSET_0;
- uint32_t BLANK_OFFSET_1;
- uint32_t DST_DIMENSIONS;
- uint32_t DST_AFTER_SCALER;
- uint32_t PREFETCH_SETTINS;
- uint32_t PREFETCH_SETTINGS;
- uint32_t VBLANK_PARAMETERS_0;
- uint32_t REF_FREQ_TO_PIX_FREQ;
- uint32_t VBLANK_PARAMETERS_1;
- uint32_t VBLANK_PARAMETERS_3;
- uint32_t NOM_PARAMETERS_0;
- uint32_t NOM_PARAMETERS_1;
- uint32_t NOM_PARAMETERS_4;
- uint32_t NOM_PARAMETERS_5;
- uint32_t PER_LINE_DELIVERY_PRE;
- uint32_t PER_LINE_DELIVERY;
- uint32_t PREFETCH_SETTINS_C;
- uint32_t PREFETCH_SETTINGS_C;
- uint32_t VBLANK_PARAMETERS_2;
- uint32_t VBLANK_PARAMETERS_4;
- uint32_t NOM_PARAMETERS_2;
- uint32_t NOM_PARAMETERS_3;
- uint32_t NOM_PARAMETERS_6;
- uint32_t NOM_PARAMETERS_7;
- uint32_t DCN_TTU_QOS_WM;
- uint32_t DCN_GLOBAL_TTU_CNTL;
- uint32_t DCN_SURF0_TTU_CNTL0;
- uint32_t DCN_SURF0_TTU_CNTL1;
- uint32_t DCN_SURF1_TTU_CNTL0;
- uint32_t DCN_SURF1_TTU_CNTL1;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;
- uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
- uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
- uint32_t DCN_VM_MX_L1_TLB_CNTL;
- uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;
- uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR;
- uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR;
- uint32_t DCHUBBUB_SDPIF_FB_BASE;
- uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
- uint32_t DCN_VM_FB_LOCATION_TOP;
- uint32_t DCN_VM_FB_LOCATION_BASE;
- uint32_t DCN_VM_FB_OFFSET;
- uint32_t DCN_VM_AGP_BASE;
- uint32_t DCN_VM_AGP_BOT;
- uint32_t DCN_VM_AGP_TOP;
- uint32_t CURSOR_SETTINS;
- uint32_t CURSOR_SETTINGS;
- uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
- uint32_t CURSOR_SURFACE_ADDRESS;
- uint32_t CURSOR_SIZE;
- uint32_t CURSOR_CONTROL;
- uint32_t CURSOR_POSITION;
- uint32_t CURSOR_HOT_SPOT;
- uint32_t CURSOR_DST_OFFSET;
-};
+#define HUBP_COMMON_REG_VARIABLE_LIST \
+ uint32_t DCHUBP_CNTL; \
+ uint32_t HUBPREQ_DEBUG_DB; \
+ uint32_t DCSURF_ADDR_CONFIG; \
+ uint32_t DCSURF_TILING_CONFIG; \
+ uint32_t DCSURF_SURFACE_PITCH; \
+ uint32_t DCSURF_SURFACE_PITCH_C; \
+ uint32_t DCSURF_SURFACE_CONFIG; \
+ uint32_t DCSURF_FLIP_CONTROL; \
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION; \
+ uint32_t DCSURF_PRI_VIEWPORT_START; \
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION; \
+ uint32_t DCSURF_SEC_VIEWPORT_START; \
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \
+ uint32_t DCSURF_PRI_VIEWPORT_START_C; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH; \
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \
+ uint32_t DCSURF_SURFACE_INUSE; \
+ uint32_t DCSURF_SURFACE_INUSE_HIGH; \
+ uint32_t DCSURF_SURFACE_INUSE_C; \
+ uint32_t DCSURF_SURFACE_INUSE_HIGH_C; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
+ uint32_t DCSURF_SURFACE_CONTROL; \
+ uint32_t HUBPRET_CONTROL; \
+ uint32_t DCN_EXPANSION_MODE; \
+ uint32_t DCHUBP_REQ_SIZE_CONFIG; \
+ uint32_t DCHUBP_REQ_SIZE_CONFIG_C; \
+ uint32_t BLANK_OFFSET_0; \
+ uint32_t BLANK_OFFSET_1; \
+ uint32_t DST_DIMENSIONS; \
+ uint32_t DST_AFTER_SCALER; \
+ uint32_t PREFETCH_SETTINS; \
+ uint32_t PREFETCH_SETTINGS; \
+ uint32_t VBLANK_PARAMETERS_0; \
+ uint32_t REF_FREQ_TO_PIX_FREQ; \
+ uint32_t VBLANK_PARAMETERS_1; \
+ uint32_t VBLANK_PARAMETERS_3; \
+ uint32_t NOM_PARAMETERS_0; \
+ uint32_t NOM_PARAMETERS_1; \
+ uint32_t NOM_PARAMETERS_4; \
+ uint32_t NOM_PARAMETERS_5; \
+ uint32_t PER_LINE_DELIVERY_PRE; \
+ uint32_t PER_LINE_DELIVERY; \
+ uint32_t PREFETCH_SETTINS_C; \
+ uint32_t PREFETCH_SETTINGS_C; \
+ uint32_t VBLANK_PARAMETERS_2; \
+ uint32_t VBLANK_PARAMETERS_4; \
+ uint32_t NOM_PARAMETERS_2; \
+ uint32_t NOM_PARAMETERS_3; \
+ uint32_t NOM_PARAMETERS_6; \
+ uint32_t NOM_PARAMETERS_7; \
+ uint32_t DCN_TTU_QOS_WM; \
+ uint32_t DCN_GLOBAL_TTU_CNTL; \
+ uint32_t DCN_SURF0_TTU_CNTL0; \
+ uint32_t DCN_SURF0_TTU_CNTL1; \
+ uint32_t DCN_SURF1_TTU_CNTL0; \
+ uint32_t DCN_SURF1_TTU_CNTL1; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB; \
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB; \
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB; \
+ uint32_t DCN_VM_MX_L1_TLB_CNTL; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR; \
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR; \
+ uint32_t DCHUBBUB_SDPIF_FB_BASE; \
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET; \
+ uint32_t DCN_VM_FB_LOCATION_TOP; \
+ uint32_t DCN_VM_FB_LOCATION_BASE; \
+ uint32_t DCN_VM_FB_OFFSET; \
+ uint32_t DCN_VM_AGP_BASE; \
+ uint32_t DCN_VM_AGP_BOT; \
+ uint32_t DCN_VM_AGP_TOP; \
+ uint32_t CURSOR_SETTINS; \
+ uint32_t CURSOR_SETTINGS; \
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH; \
+ uint32_t CURSOR_SURFACE_ADDRESS; \
+ uint32_t CURSOR_SIZE; \
+ uint32_t CURSOR_CONTROL; \
+ uint32_t CURSOR_POSITION; \
+ uint32_t CURSOR_HOT_SPOT; \
+ uint32_t CURSOR_DST_OFFSET
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -397,7 +394,6 @@ struct dcn_mi_registers {
HUBP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
HUBP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
-
#define DCN_HUBP_REG_FIELD_LIST(type) \
type HUBP_BLANK_EN;\
type HUBP_TTU_DISABLE;\
@@ -577,6 +573,10 @@ struct dcn_mi_registers {
type CURSOR_DST_X_OFFSET; \
type OUTPUT_FP
+struct dcn_mi_registers {
+ HUBP_COMMON_REG_VARIABLE_LIST;
+};
+
struct dcn_mi_shift {
DCN_HUBP_REG_FIELD_LIST(uint8_t);
};
@@ -611,11 +611,11 @@ void hubp1_program_requestor(
struct _vcs_dpi_display_rq_regs_st *rq_regs);
void hubp1_program_pixel_format(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
enum surface_pixel_format format);
void hubp1_program_size_and_rotation(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
enum dc_rotation_angle rotation,
enum surface_pixel_format format,
const union plane_size *plane_size,
@@ -623,7 +623,7 @@ void hubp1_program_size_and_rotation(
bool horizontal_mirror);
void hubp1_program_tiling(
- struct dcn10_hubp *hubp,
+ struct hubp *hubp,
const union dc_tiling_info *info,
const enum surface_pixel_format pixel_format);
@@ -681,4 +681,6 @@ struct dcn_hubp_state {
void hubp1_read_state(struct dcn10_hubp *hubp1,
struct dcn_hubp_state *s);
+enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 8e2ddbc2129c..82572863acab 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -32,7 +32,7 @@
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
-#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dcn10/dcn10_dpp.h"
#include "dcn10/dcn10_mpc.h"
#include "timing_generator.h"
@@ -43,6 +43,7 @@
#include "custom_float.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
+#include "dcn10_cm_common.h"
#define CTX \
hws->ctx
@@ -158,7 +159,7 @@ void dcn10_log_hw_state(struct dc *dc)
struct timing_generator *tg = pool->timing_generators[i];
struct dcn_otg_state s = {0};
- tgn10_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
//only print if OTG master is enabled
if ((s.otg_enabled & 1) == 0)
@@ -425,6 +426,34 @@ static void bios_golden_init(struct dc *dc)
}
}
+static void false_optc_underflow_wa(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct timing_generator *tg)
+{
+ int i;
+ bool underflow;
+
+ if (!dc->hwseq->wa.false_optc_underflow)
+ return;
+
+ underflow = tg->funcs->is_optc_underflow_occurred(tg);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (old_pipe_ctx->stream != stream)
+ continue;
+
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
+ }
+
+ tg->funcs->set_blank_data_double_buffer(tg, true);
+
+ if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
+ tg->funcs->clear_optc_underflow(tg);
+}
+
static enum dc_status dcn10_prog_pixclk_crtc_otg(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -433,9 +462,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_color_space color_space;
struct tg_color black_color = {0};
- bool enableStereo = stream->timing.timing_3d_format == TIMING_3D_FORMAT_NONE ?
- false:true;
- bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
/* by upper caller loop, pipe0 is parent pipe and be called first.
* back end is set up by for pipe0. Other children pipe share back end
@@ -470,11 +496,6 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
&stream->timing,
true);
- pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
- pipe_ctx->stream_res.opp,
- enableStereo,
- rightEyePolarity);
-
#if 0 /* move to after enable_crtc */
/* TODO: OPP FMT, ABM. etc. should be done here. */
/* or FPGA now. instance 0 only. TODO: move to opp.c */
@@ -489,12 +510,18 @@ static enum dc_status dcn10_prog_pixclk_crtc_otg(
/* program otg blank color */
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
- pipe_ctx->stream_res.tg->funcs->set_blank_color(
- pipe_ctx->stream_res.tg,
- &black_color);
- pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
- hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+ if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
+ !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+ hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+ false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
+ }
/* VTG is within DCHUB command block. DCFCLK is always on */
if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
@@ -573,41 +600,34 @@ static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
int fe_idx = pipe_ctx->pipe_idx;
struct hubp *hubp = dc->res_pool->hubps[fe_idx];
struct mpc *mpc = dc->res_pool->mpc;
- int opp_id, z_idx;
- int mpcc_id = -1;
+ int opp_id;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
/* look at tree rather than mi here to know if we already reset */
for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) {
struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
- for (z_idx = 0; z_idx < opp->mpc_tree.num_pipes; z_idx++) {
- if (opp->mpc_tree.dpp[z_idx] == fe_idx) {
- mpcc_id = opp->mpc_tree.mpcc[z_idx];
- break;
- }
- }
- if (mpcc_id != -1)
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, fe_idx);
+ if (mpcc_to_remove != NULL)
break;
}
+
/*Already reset*/
if (opp_id == dc->res_pool->pipe_count)
return;
- mpc->funcs->remove(mpc, &(dc->res_pool->opps[opp_id]->mpc_tree),
- dc->res_pool->opps[opp_id]->inst, fe_idx);
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+ dc->res_pool->opps[opp_id]->mpcc_disconnect_pending[fe_idx] = true;
+
+ dc->optimized_required = true;
if (hubp->funcs->hubp_disconnect)
hubp->funcs->hubp_disconnect(hubp);
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
-
- pipe_ctx->stream = NULL;
- memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
- memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
- pipe_ctx->top_pipe = NULL;
- pipe_ctx->bottom_pipe = NULL;
- pipe_ctx->plane_state = NULL;
}
static void plane_atomic_power_down(struct dc *dc, int fe_idx)
@@ -636,29 +656,30 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
int fe_idx = pipe_ctx->pipe_idx;
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = dc->res_pool->hubps[fe_idx];
- struct mpc *mpc = dc->res_pool->mpc;
int opp_id = hubp->opp_id;
- struct output_pixel_processor *opp;
- if (opp_id != 0xf) {
- mpc->funcs->wait_for_idle(mpc, hubp->mpcc_id);
- opp = dc->res_pool->opps[hubp->opp_id];
- opp->mpcc_disconnect_pending[hubp->mpcc_id] = false;
- hubp->funcs->set_blank(hubp, true);
- }
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
REG_UPDATE(HUBP_CLK_CNTL[fe_idx],
HUBP_CLOCK_ENABLE, 0);
REG_UPDATE(DPP_CONTROL[fe_idx],
DPP_CLOCK_ENABLE, 0);
- if (opp_id != 0xf && dc->res_pool->opps[opp_id]->mpc_tree.num_pipes == 0)
+ if (opp_id != 0xf && dc->res_pool->opps[opp_id]->mpc_tree_params.opp_list == NULL)
REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
OPP_PIPE_CLOCK_EN, 0);
hubp->power_gated = true;
+ dc->optimized_required = false; /* We're powering off, no need to optimize */
plane_atomic_power_down(dc, fe_idx);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
}
static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -740,25 +761,27 @@ static void dcn10_init_hw(struct dc *dc)
}
}
+ /* Reset all MPCC muxes */
+ dc->res_pool->mpc->funcs->mpc_init(dc->res_pool->mpc);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct timing_generator *tg = dc->res_pool->timing_generators[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct output_pixel_processor *opp = dc->res_pool->opps[i];
- struct mpc_tree_cfg *mpc_tree = &opp->mpc_tree;
struct hubp *hubp = dc->res_pool->hubps[i];
- mpc_tree->dpp[0] = i;
- mpc_tree->mpcc[0] = i;
- mpc_tree->num_pipes = 1;
-
pipe_ctx->stream_res.tg = tg;
pipe_ctx->pipe_idx = i;
pipe_ctx->plane_res.hubp = hubp;
hubp->mpcc_id = i;
- hubp->opp_id = dc->res_pool->mpc->funcs->get_opp_id(dc->res_pool->mpc, i);
+ hubp->opp_id = 0xf;
hubp->power_gated = false;
+ dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ dc->res_pool->opps[i]->mpcc_disconnect_pending[i] = true;
+ pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
+
plane_atomic_disconnect(dc, pipe_ctx);
}
@@ -929,280 +952,10 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-/*modify the method to handle rgb for arr_points*/
-static bool convert_to_custom_float(
- struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
- uint32_t hw_points_num)
-{
- struct custom_float_format fmt;
-
- struct pwl_result_data *rgb = rgb_resulted;
-
- uint32_t i = 0;
-
- fmt.exponenta_bits = 6;
- fmt.mantissa_bits = 12;
- fmt.sign = false;
-
- if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
- &arr_points[0].custom_float_x)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
- &arr_points[0].custom_float_offset)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
- &arr_points[0].custom_float_slope)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- fmt.mantissa_bits = 10;
- fmt.sign = false;
-
- if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
- &arr_points[1].custom_float_x)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
- &arr_points[1].custom_float_y)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
- &arr_points[1].custom_float_slope)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
- fmt.mantissa_bits = 12;
- fmt.sign = true;
- while (i != hw_points_num) {
- if (!convert_to_custom_float_format(rgb->red, &fmt,
- &rgb->red_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(rgb->green, &fmt,
- &rgb->green_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
- if (!convert_to_custom_float_format(rgb->blue, &fmt,
- &rgb->blue_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
- if (!convert_to_custom_float_format(rgb->delta_red, &fmt,
- &rgb->delta_red_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(rgb->delta_green, &fmt,
- &rgb->delta_green_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- if (!convert_to_custom_float_format(rgb->delta_blue, &fmt,
- &rgb->delta_blue_reg)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- ++rgb;
- ++i;
- }
-
- return true;
-}
-#define MAX_REGIONS_NUMBER 34
-#define MAX_LOW_POINT 25
-#define NUMBER_SEGMENTS 32
-
-static bool
-dcn10_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
- struct pwl_params *regamma_params)
-{
- struct curve_points *arr_points;
- struct pwl_result_data *rgb_resulted;
- struct pwl_result_data *rgb;
- struct pwl_result_data *rgb_plus_1;
- struct fixed31_32 y_r;
- struct fixed31_32 y_g;
- struct fixed31_32 y_b;
- struct fixed31_32 y1_min;
- struct fixed31_32 y3_max;
-
- int32_t segment_start, segment_end;
- int32_t i;
- uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
-
- if (output_tf == NULL || regamma_params == NULL || output_tf->type == TF_TYPE_BYPASS)
- return false;
-
- PERF_TRACE();
-
- arr_points = regamma_params->arr_points;
- rgb_resulted = regamma_params->rgb_resulted;
- hw_points = 0;
-
- memset(regamma_params, 0, sizeof(struct pwl_params));
- memset(seg_distr, 0, sizeof(seg_distr));
-
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
- /* 32 segments
- * segments are from 2^-25 to 2^7
- */
- for (i = 0; i < 32 ; i++)
- seg_distr[i] = 3;
-
- segment_start = -25;
- segment_end = 7;
- } else {
- /* 10 segments
- * segment is from 2^-10 to 2^0
- * There are less than 256 points, for optimization
- */
- seg_distr[0] = 3;
- seg_distr[1] = 4;
- seg_distr[2] = 4;
- seg_distr[3] = 4;
- seg_distr[4] = 4;
- seg_distr[5] = 4;
- seg_distr[6] = 4;
- seg_distr[7] = 4;
- seg_distr[8] = 5;
- seg_distr[9] = 5;
-
- segment_start = -10;
- segment_end = 0;
- }
-
- for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
- seg_distr[i] = -1;
-
- for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1)
- hw_points += (1 << seg_distr[k]);
- }
-
- j = 0;
- for (k = 0; k < (segment_end - segment_start); k++) {
- increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
- start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
- for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
- if (j == hw_points - 1)
- break;
- rgb_resulted[j].red = output_tf->tf_pts.red[i];
- rgb_resulted[j].green = output_tf->tf_pts.green[i];
- rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
- j++;
- }
- }
-
- /* last point */
- start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
- rgb_resulted[hw_points - 1].red = output_tf->tf_pts.red[start_index];
- rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
- rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
-
- arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_start));
- arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
- dal_fixed31_32_from_int(segment_end));
-
- y_r = rgb_resulted[0].red;
- y_g = rgb_resulted[0].green;
- y_b = rgb_resulted[0].blue;
-
- y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
-
- arr_points[0].y = y1_min;
- arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x);
- y_r = rgb_resulted[hw_points - 1].red;
- y_g = rgb_resulted[hw_points - 1].green;
- y_b = rgb_resulted[hw_points - 1].blue;
-
- /* see comment above, m_arrPoints[1].y should be the Y value for the
- * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
- */
- y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
-
- arr_points[1].y = y3_max;
-
- arr_points[1].slope = dal_fixed31_32_zero;
-
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
- /* for PQ, we want to have a straight line from last HW X point,
- * and the slope to be such that we hit 1.0 at 10000 nits.
- */
- const struct fixed31_32 end_value =
- dal_fixed31_32_from_int(125);
-
- arr_points[1].slope = dal_fixed31_32_div(
- dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
- dal_fixed31_32_sub(end_value, arr_points[1].x));
- }
-
- regamma_params->hw_points_num = hw_points;
-
- i = 1;
- for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
- if (seg_distr[k] != -1) {
- regamma_params->arr_curve_points[k].segments_num =
- seg_distr[k];
- regamma_params->arr_curve_points[i].offset =
- regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
- }
- i++;
- }
-
- if (seg_distr[k] != -1)
- regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
-
- rgb = rgb_resulted;
- rgb_plus_1 = rgb_resulted + 1;
-
- i = 1;
-
- while (i != hw_points + 1) {
- if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
- rgb_plus_1->red = rgb->red;
- if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
- rgb_plus_1->green = rgb->green;
- if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
- rgb_plus_1->blue = rgb->blue;
-
- rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red);
- rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green);
- rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue);
-
- ++rgb_plus_1;
- ++rgb;
- ++i;
- }
-
- convert_to_custom_float(rgb_resulted, arr_points, hw_points);
-
- PERF_TRACE();
-
- return true;
-}
static bool
dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
@@ -1223,9 +976,9 @@ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
* update.
*/
- else if (dcn10_translate_regamma_to_hw_format(
+ else if (cm_helper_translate_curve_to_hw_format(
stream->out_transfer_func,
- &dpp->regamma_params)) {
+ &dpp->regamma_params, false)) {
dpp->funcs->dpp_program_regamma_pwl(
dpp,
&dpp->regamma_params, OPP_REGAMMA_USER);
@@ -1579,7 +1332,6 @@ static void dcn10_enable_plane(
/* make sure OPP_PIPE_CLOCK_EN = 1 */
REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst],
OPP_PIPE_CLOCK_EN, 1);
- /*TODO: REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, 0x1f);*/
/* TODO: enable/disable in dm as per update type.
if (plane_state) {
@@ -1672,60 +1424,15 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
uint16_t *matrix)
{
- int i;
- struct out_csc_color_matrix tbl_entry;
-
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
- enum dc_color_space color_space =
- pipe_ctx->stream->output_color_space;
-
- //uint16_t matrix[12];
- for (i = 0; i < 12; i++)
- tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
- tbl_entry.color_space = color_space;
- //tbl_entry.regval = matrix;
-
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
- pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
} else {
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
-static void set_mpc_output_csc(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix,
- int opp_id)
-{
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
- struct out_csc_color_matrix tbl_entry;
- enum mpc_output_csc_mode ocsc_mode = MPC_OUTPUT_CSC_COEF_A;
-
-
- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
- //uint16_t matrix[12];
- for (i = 0; i < 12; i++)
- tbl_entry.regval[i] = matrix[i];
- tbl_entry.color_space = colorspace;
-
- if (mpc->funcs->set_output_csc != NULL)
- mpc->funcs->set_output_csc(mpc,
- opp_id,
- &tbl_entry,
- ocsc_mode);
- } else {
- if (mpc->funcs->set_ocsc_default != NULL)
- mpc->funcs->set_ocsc_default(mpc,
- opp_id,
- colorspace,
- ocsc_mode);
- }
-}
-
static void program_output_csc(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
@@ -1736,13 +1443,6 @@ static void program_output_csc(struct dc *dc,
program_csc_matrix(pipe_ctx,
colorspace,
matrix);
- else
- set_mpc_output_csc(dc,
- pipe_ctx,
- colorspace,
- matrix,
- opp_id);
-
}
static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
@@ -1914,35 +1614,73 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- struct mpcc_cfg mpcc_cfg = {0};
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct pipe_ctx *top_pipe;
- bool per_pixel_alpha =
- pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ struct mpcc_blnd_cfg blnd_cfg;
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
/* TODO: proper fix once fpga works */
- mpcc_cfg.dpp_id = hubp->inst;
- mpcc_cfg.opp_id = pipe_ctx->stream_res.opp->inst;
- mpcc_cfg.tree_cfg = &(pipe_ctx->stream_res.opp->mpc_tree);
- for (top_pipe = pipe_ctx->top_pipe; top_pipe; top_pipe = top_pipe->top_pipe)
- mpcc_cfg.z_index++;
if (dc->debug.surface_visual_confirm)
dcn10_get_surface_visual_confirm_color(
- pipe_ctx, &mpcc_cfg.black_color);
+ pipe_ctx, &blnd_cfg.black_color);
else
color_space_to_black_color(
dc, pipe_ctx->stream->output_color_space,
- &mpcc_cfg.black_color);
- mpcc_cfg.per_pixel_alpha = per_pixel_alpha;
+ &blnd_cfg.black_color);
+
+ if (per_pixel_alpha)
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ else
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_alpha = 0xff;
+ blnd_cfg.global_gain = 0xff;
+
/* DCN1.0 has output CM before MPC which seems to screw with
* pre-multiplied alpha.
*/
- mpcc_cfg.pre_multiplied_alpha = is_rgb_cspace(
+ blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
pipe_ctx->stream->output_color_space)
&& per_pixel_alpha;
- hubp->mpcc_id = dc->res_pool->mpc->funcs->add(dc->res_pool->mpc, &mpcc_cfg);
- hubp->opp_id = mpcc_cfg.opp_id;
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ mpcc_id = hubp->inst;
+
+ /* check if this MPCC is already being used */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+ /* remove MPCC if being used */
+ if (new_mpcc != NULL)
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
+ else
+ if (dc->debug.sanity_checks)
+ mpc->funcs->assert_mpcc_idle_before_connect(
+ dc->res_pool->mpc, mpcc_id);
+
+ /* Call MPC to insert new plane */
+ new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
+ mpc_tree_params,
+ &blnd_cfg,
+ NULL,
+ NULL,
+ hubp->inst,
+ mpcc_id);
+
+ ASSERT(new_mpcc != NULL);
+
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
}
static void update_scaler(struct pipe_ctx *pipe_ctx)
@@ -1971,7 +1709,7 @@ static void update_dchubp_dpp(
union plane_size size = plane_state->plane_size;
/* depends on DML calculation, DPP clock value may change dynamically */
- if (pipe_ctx->plane_state->update_flags.raw != 0) {
+ if (plane_state->update_flags.bits.full_update) {
enable_dppclk(
dc->hwseq,
pipe_ctx->pipe_idx,
@@ -2015,7 +1753,8 @@ static void update_dchubp_dpp(
}
if (plane_state->update_flags.bits.full_update ||
- plane_state->update_flags.bits.scaling_change) {
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change) {
hubp->funcs->mem_program_viewport(
hubp,
&pipe_ctx->plane_res.scl_data.viewport,
@@ -2037,7 +1776,9 @@ static void update_dchubp_dpp(
plane_state->update_flags.bits.horizontal_mirror_change ||
plane_state->update_flags.bits.rotation_change ||
plane_state->update_flags.bits.swizzle_change ||
- plane_state->update_flags.bits.bpp_change) {
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change) {
hubp->funcs->hubp_program_surface_config(
hubp,
plane_state->format,
@@ -2062,6 +1803,7 @@ static void program_all_pipe_in_tree(
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
+
if (pipe_ctx->top_pipe == NULL) {
pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
@@ -2072,7 +1814,11 @@ static void program_all_pipe_in_tree(
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg);
- pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, !is_pipe_tree_visible(pipe_ctx));
+
+ if (pipe_ctx->stream_res.tg->funcs->set_blank)
+ pipe_ctx->stream_res.tg->funcs->set_blank(
+ pipe_ctx->stream_res.tg,
+ !is_pipe_tree_visible(pipe_ctx));
}
if (pipe_ctx->plane_state != NULL) {
@@ -2179,6 +1925,7 @@ static void dcn10_apply_ctx_for_surface(
{
int i;
struct timing_generator *tg;
+ struct output_pixel_processor *opp;
bool removed_pipe[4] = { false };
unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
bool program_water_mark = false;
@@ -2189,6 +1936,8 @@ static void dcn10_apply_ctx_for_surface(
if (!top_pipe_to_program)
return;
+ opp = top_pipe_to_program->stream_res.opp;
+
tg = top_pipe_to_program->stream_res.tg;
tg->funcs->lock(tg);
@@ -2196,7 +1945,8 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes == 0) {
/* OTG blank before remove all front end */
- tg->funcs->set_blank(tg, true);
+ if (tg->funcs->set_blank)
+ tg->funcs->set_blank(tg, true);
}
/* Disconnect unused mpcc */
@@ -2236,24 +1986,14 @@ static void dcn10_apply_ctx_for_surface(
}
}
- if (num_planes > 0) {
+ if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context);
- /* TODO: this is a hack w/a for switching from mpo to pipe split */
- if (stream->cursor_attributes.address.quad_part != 0) {
- struct dc_cursor_position position = { 0 };
-
- dc_stream_set_cursor_position(
- (struct dc_stream_state *)stream,
- &position);
- dc_stream_set_cursor_attributes(
- (struct dc_stream_state *)stream,
- &stream->cursor_attributes);
- }
- }
-
tg->funcs->unlock(tg);
+ if (num_planes == 0)
+ false_optc_underflow_wa(dc, stream, tg);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -2264,7 +2004,7 @@ static void dcn10_apply_ctx_for_surface(
pipe_ctx->plane_state->update_flags.bits.full_update)
program_water_mark = true;
- if (removed_pipe[i] && num_planes == 0)
+ if (removed_pipe[i])
dcn10_disable_plane(dc, old_pipe_ctx);
}
@@ -2273,6 +2013,7 @@ static void dcn10_apply_ctx_for_surface(
/* pstate stuck check after watermark update */
dcn10_verify_allow_pstate_change_high(dc);
}
+
/* watermark is for all pipes */
hubbub1_program_watermarks(dc->res_pool->hubbub,
&context->bw.dcn.watermarks, ref_clk_mhz);
@@ -2502,10 +2243,10 @@ static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
dcn10_config_stereo_parameters(stream, &flags);
- pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
pipe_ctx->stream_res.opp,
flags.PROGRAM_STEREO == 1 ? true:false,
- stream->timing.flags.RIGHT_EYE_3D_POLARITY == 1 ? true:false);
+ &stream->timing);
pipe_ctx->stream_res.tg->funcs->program_stereo(
pipe_ctx->stream_res.tg,
@@ -2619,7 +2360,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.pplib_apply_display_requirements =
dcn10_pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
- .edp_power_control = hwss_edp_power_control
+ .edp_power_control = hwss_edp_power_control,
+ .edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index b016f4cbd45c..179890b1a8c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -25,8 +25,6 @@
#include "reg_helper.h"
#include "dcn10_mpc.h"
-#include "dc.h"
-#include "mem_input.h"
#define REG(reg)\
mpc10->mpc_regs->reg
@@ -38,17 +36,13 @@
#define FN(reg_name, field_name) \
mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name
-#define MODE_TOP_ONLY 1
-#define MODE_BLEND 3
-#define BLND_PP_ALPHA 0
-#define BLND_GLOBAL_ALPHA 2
-
-static void mpc10_set_bg_color(
- struct dcn10_mpc *mpc10,
+void mpc1_set_bg_color(struct mpc *mpc,
struct tg_color *bg_color,
- int id)
+ int mpcc_id)
{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
/* mpc color is 12 bit. tg_color is 10 bit */
/* todo: might want to use 16 bit to represent color and have each
* hw block translate to correct color depth.
@@ -57,15 +51,47 @@ static void mpc10_set_bg_color(
uint32_t bg_g_y = bg_color->color_g_y << 2;
uint32_t bg_b_cb = bg_color->color_b_cb << 2;
- REG_SET(MPCC_BG_R_CR[id], 0,
+ REG_SET(MPCC_BG_R_CR[mpcc_id], 0,
MPCC_BG_R_CR, bg_r_cr);
- REG_SET(MPCC_BG_G_Y[id], 0,
+ REG_SET(MPCC_BG_G_Y[mpcc_id], 0,
MPCC_BG_G_Y, bg_g_y);
- REG_SET(MPCC_BG_B_CB[id], 0,
+ REG_SET(MPCC_BG_B_CB[mpcc_id], 0,
MPCC_BG_B_CB, bg_b_cb);
}
-void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
+static void mpc1_update_blending(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_UPDATE_5(MPCC_CONTROL[mpcc_id],
+ MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, blnd_cfg->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, blnd_cfg->overlap_only,
+ MPCC_GLOBAL_ALPHA, blnd_cfg->global_alpha,
+ MPCC_GLOBAL_GAIN, blnd_cfg->global_gain);
+
+ mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
+}
+
+void mpc1_update_stereo_mix(
+ struct mpc *mpc,
+ struct mpcc_sm_cfg *sm_cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_UPDATE_6(MPCC_SM_CONTROL[mpcc_id],
+ MPCC_SM_EN, sm_cfg->enable,
+ MPCC_SM_MODE, sm_cfg->sm_mode,
+ MPCC_SM_FRAME_ALT, sm_cfg->frame_alt,
+ MPCC_SM_FIELD_ALT, sm_cfg->field_alt,
+ MPCC_SM_FORCE_NEXT_FRAME_POL, sm_cfg->force_next_frame_porlarity,
+ MPCC_SM_FORCE_NEXT_TOP_POL, sm_cfg->force_next_field_polarity);
+}
+void mpc1_assert_idle_mpcc(struct mpc *mpc, int id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
@@ -75,39 +101,52 @@ void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
1, 100000);
}
-static int mpc10_get_idle_mpcc_id(struct dcn10_mpc *mpc10)
+struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id)
{
- int i;
- int last_free_mpcc_id = -1;
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- for (i = 0; i < mpc10->num_mpcc; i++) {
- uint32_t is_idle = 0;
+ ASSERT(mpcc_id < mpc10->num_mpcc);
+ return &(mpc->mpcc_array[mpcc_id]);
+}
- if (mpc10->mpcc_in_use_mask & 1 << i)
- continue;
+struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
+{
+ struct mpcc *tmp_mpcc = tree->opp_list;
- last_free_mpcc_id = i;
- REG_GET(MPCC_STATUS[i], MPCC_IDLE, &is_idle);
- if (is_idle)
- return i;
+ while (tmp_mpcc != NULL) {
+ if (tmp_mpcc->dpp_id == dpp_id)
+ return tmp_mpcc;
+ tmp_mpcc = tmp_mpcc->mpcc_bot;
}
+ return NULL;
+}
- /* This assert should never trigger, we have mpcc leak if it does */
- ASSERT(last_free_mpcc_id != -1);
-
- mpc10_assert_idle_mpcc(&mpc10->base, last_free_mpcc_id);
- return last_free_mpcc_id;
+bool mpc1_is_mpcc_idle(struct mpc *mpc, int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ unsigned int top_sel;
+ unsigned int opp_id;
+ unsigned int idle;
+
+ REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
+ REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_STATUS[mpcc_id], MPCC_IDLE, &idle);
+ if (top_sel == 0xf && opp_id == 0xf && idle)
+ return true;
+ else
+ return false;
}
-static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int id)
+void mpc1_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
unsigned int top_sel, mpc_busy, mpc_idle;
- REG_GET(MPCC_TOP_SEL[id],
+ REG_GET(MPCC_TOP_SEL[mpcc_id],
MPCC_TOP_SEL, &top_sel);
if (top_sel == 0xf) {
- REG_GET_2(MPCC_STATUS[id],
+ REG_GET_2(MPCC_STATUS[mpcc_id],
MPCC_BUSY, &mpc_busy,
MPCC_IDLE, &mpc_idle);
@@ -116,241 +155,269 @@ static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int i
}
}
-void mpc10_mpcc_remove(
- struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int dpp_id)
-{
- struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
-
- /* find z_idx for the dpp to be removed */
- for (z_idx = 0; z_idx < tree_cfg->num_pipes; z_idx++)
- if (tree_cfg->dpp[z_idx] == dpp_id)
- break;
-
- if (z_idx == tree_cfg->num_pipes) {
- /* In case of resume from S3/S4, remove mpcc from bios left over */
- REG_SET(MPCC_OPP_ID[dpp_id], 0,
- MPCC_OPP_ID, 0xf);
- REG_SET(MPCC_TOP_SEL[dpp_id], 0,
- MPCC_TOP_SEL, 0xf);
- REG_SET(MPCC_BOT_SEL[dpp_id], 0,
- MPCC_BOT_SEL, 0xf);
- return;
- }
-
- mpcc_id = tree_cfg->mpcc[z_idx];
-
- REG_SET(MPCC_OPP_ID[mpcc_id], 0,
- MPCC_OPP_ID, 0xf);
- REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
- MPCC_TOP_SEL, 0xf);
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
-
- if (z_idx > 0) {
- int top_mpcc_id = tree_cfg->mpcc[z_idx - 1];
-
- if (z_idx + 1 < tree_cfg->num_pipes)
- /* mpcc to be removed is in the middle of the tree */
- REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
- MPCC_BOT_SEL, tree_cfg->mpcc[z_idx + 1]);
- else {
- /* mpcc to be removed is at the bottom of the tree */
- REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
- REG_UPDATE(MPCC_CONTROL[top_mpcc_id],
- MPCC_MODE, MODE_TOP_ONLY);
- }
- } else if (tree_cfg->num_pipes > 1)
- /* mpcc to be removed is at the top of the tree */
- REG_SET(MUX[opp_id], 0,
- MPC_OUT_MUX, tree_cfg->mpcc[z_idx + 1]);
- else
- /* mpcc to be removed is the only one in the tree */
- REG_SET(MUX[opp_id], 0, MPC_OUT_MUX, 0xf);
-
- /* mark this mpcc as not in use */
- mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
- tree_cfg->num_pipes--;
- for (; z_idx < tree_cfg->num_pipes; z_idx++) {
- tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx + 1];
- tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx + 1];
- }
- tree_cfg->dpp[tree_cfg->num_pipes] = 0xdeadbeef;
- tree_cfg->mpcc[tree_cfg->num_pipes] = 0xdeadbeef;
-}
-
-static void mpc10_add_to_tree_cfg(
+/*
+ * Insert DPP into MPC tree based on specified blending position.
+ * Only used for planes that are part of blending chain for OPP output
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be added to.
+ * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
+ * [in] dpp_id - DPP instance for the plane to be added.
+ * [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return: struct mpcc* - MPCC that was added.
+ */
+struct mpcc *mpc1_insert_plane(
struct mpc *mpc,
- struct mpcc_cfg *cfg,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
int mpcc_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_mode = MODE_TOP_ONLY;
- int position = cfg->z_index;
- struct mpc_tree_cfg *tree_cfg = cfg->tree_cfg;
- int alpha_blnd_mode = cfg->per_pixel_alpha ?
- BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
- int z_idx;
+ struct mpcc *new_mpcc = NULL;
- REG_SET(MPCC_OPP_ID[mpcc_id], 0,
- MPCC_OPP_ID, cfg->opp_id);
+ /* sanity check parameters */
+ ASSERT(mpcc_id < mpc10->num_mpcc);
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
- REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
- MPCC_TOP_SEL, cfg->dpp_id);
+ if (insert_above_mpcc) {
+ /* check insert_above_mpcc exist in tree->opp_list */
+ struct mpcc *temp_mpcc = tree->opp_list;
- if (position == 0) {
- /* idle dpp/mpcc is added to the top layer of tree */
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc == NULL)
+ return NULL;
+ }
- if (tree_cfg->num_pipes > 0) {
- /* get instance of previous top mpcc */
- int prev_top_mpcc_id = tree_cfg->mpcc[0];
+ /* Get and update MPCC struct parameters */
+ new_mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ new_mpcc->dpp_id = dpp_id;
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, prev_top_mpcc_id);
- mpcc_mode = MODE_BLEND;
+ /* program mux and MPCC_MODE */
+ if (insert_above_mpcc) {
+ new_mpcc->mpcc_bot = insert_above_mpcc;
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, insert_above_mpcc->mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
+ } else {
+ new_mpcc->mpcc_bot = NULL;
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[mpcc_id], MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
+ }
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
+
+ /* update mpc tree mux setting */
+ if (tree->opp_list == insert_above_mpcc) {
+ /* insert the toppest mpcc */
+ tree->opp_list = new_mpcc;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, mpcc_id);
+ } else {
+ /* find insert position */
+ struct mpcc *temp_mpcc = tree->opp_list;
+
+ while (temp_mpcc && temp_mpcc->mpcc_bot != insert_above_mpcc)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+ if (temp_mpcc && temp_mpcc->mpcc_bot == insert_above_mpcc) {
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0, MPCC_BOT_SEL, mpcc_id);
+ temp_mpcc->mpcc_bot = new_mpcc;
+ if (!insert_above_mpcc)
+ REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
+ MPCC_MODE, MPCC_BLEND_MODE_TOP_BOT_BLENDING);
}
+ }
- /* opp will get new output. from new added mpcc */
- REG_SET(MUX[cfg->opp_id], 0, MPC_OUT_MUX, mpcc_id);
-
- } else if (position == tree_cfg->num_pipes) {
- /* idle dpp/mpcc is added to the bottom layer of tree */
-
- /* get instance of previous bottom mpcc, set to middle layer */
- int prev_bot_mpcc_id = tree_cfg->mpcc[tree_cfg->num_pipes - 1];
-
- REG_SET(MPCC_BOT_SEL[prev_bot_mpcc_id], 0,
- MPCC_BOT_SEL, mpcc_id);
- REG_UPDATE(MPCC_CONTROL[prev_bot_mpcc_id],
- MPCC_MODE, MODE_BLEND);
-
- /* mpcc_id become new bottom mpcc*/
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, 0xf);
+ /* update the blending configuration */
+ new_mpcc->blnd_cfg = *blnd_cfg;
+ mpc->funcs->update_blending(mpc, &new_mpcc->blnd_cfg, mpcc_id);
- } else {
- /* idle dpp/mpcc is added to middle of tree */
- int above_mpcc_id = tree_cfg->mpcc[position - 1];
- int below_mpcc_id = tree_cfg->mpcc[position];
-
- /* mpcc above new mpcc_id has new bottom mux*/
- REG_SET(MPCC_BOT_SEL[above_mpcc_id], 0,
- MPCC_BOT_SEL, mpcc_id);
- REG_UPDATE(MPCC_CONTROL[above_mpcc_id],
- MPCC_MODE, MODE_BLEND);
-
- /* mpcc_id bottom mux is from below mpcc*/
- REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
- MPCC_BOT_SEL, below_mpcc_id);
- mpcc_mode = MODE_BLEND;
+ /* update the stereo mix settings, if provided */
+ if (sm_cfg != NULL) {
+ new_mpcc->sm_cfg = *sm_cfg;
+ mpc1_update_stereo_mix(mpc, sm_cfg, mpcc_id);
}
- REG_SET_4(MPCC_CONTROL[mpcc_id], 0xffffffff,
- MPCC_MODE, mpcc_mode,
- MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
- MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha,
- MPCC_BLND_ACTIVE_OVERLAP_ONLY, false);
+ /* mark this mpcc as in use */
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
- /* update mpc_tree_cfg with new mpcc */
- for (z_idx = tree_cfg->num_pipes; z_idx > position; z_idx--) {
- tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx - 1];
- tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx - 1];
- }
- tree_cfg->dpp[position] = cfg->dpp_id;
- tree_cfg->mpcc[position] = mpcc_id;
- tree_cfg->num_pipes++;
+ return new_mpcc;
}
-int mpc10_mpcc_add(struct mpc *mpc, struct mpcc_cfg *cfg)
+/*
+ * Remove a specified MPCC from the MPC tree.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be removed from.
+ * [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return: void
+ */
+void mpc1_remove_mpcc(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc_to_remove)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
-
- ASSERT(cfg->z_index < mpc10->num_mpcc);
-
- /* check in dpp already exists in mpc tree */
- for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
- if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
- break;
- if (z_idx == cfg->tree_cfg->num_pipes) {
- ASSERT(cfg->z_index <= cfg->tree_cfg->num_pipes);
- mpcc_id = mpc10_get_idle_mpcc_id(mpc10);
-
- /*
- * TODO: remove hack
- * Note: currently there is a bug in init_hw such that
- * on resume from hibernate, BIOS sets up MPCC0, and
- * we do mpcc_remove but the mpcc cannot go to idle
- * after remove. This cause us to pick mpcc1 here,
- * which causes a pstate hang for yet unknown reason.
- */
- mpcc_id = cfg->dpp_id;
- /* end hack*/
-
- ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
-
- if (mpc->ctx->dc->debug.sanity_checks)
- mpc10_assert_mpcc_idle_before_connect(mpc10, mpcc_id);
+ bool found = false;
+ int mpcc_id = mpcc_to_remove->mpcc_id;
+
+ if (tree->opp_list == mpcc_to_remove) {
+ found = true;
+ /* remove MPCC from top of tree */
+ if (mpcc_to_remove->mpcc_bot) {
+ /* set the next MPCC in list to be the top MPCC */
+ tree->opp_list = mpcc_to_remove->mpcc_bot;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, tree->opp_list->mpcc_id);
+ } else {
+ /* there are no other MPCC is list */
+ tree->opp_list = NULL;
+ REG_UPDATE(MUX[tree->opp_id], MPC_OUT_MUX, 0xf);
+ }
} else {
- ASSERT(cfg->z_index < cfg->tree_cfg->num_pipes);
- mpcc_id = cfg->tree_cfg->mpcc[z_idx];
- mpc10_mpcc_remove(mpc, cfg->tree_cfg, cfg->opp_id, cfg->dpp_id);
+ /* find mpcc to remove MPCC list */
+ struct mpcc *temp_mpcc = tree->opp_list;
+
+ while (temp_mpcc && temp_mpcc->mpcc_bot != mpcc_to_remove)
+ temp_mpcc = temp_mpcc->mpcc_bot;
+
+ if (temp_mpcc && temp_mpcc->mpcc_bot == mpcc_to_remove) {
+ found = true;
+ temp_mpcc->mpcc_bot = mpcc_to_remove->mpcc_bot;
+ if (mpcc_to_remove->mpcc_bot) {
+ /* remove MPCC in middle of list */
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_to_remove->mpcc_bot->mpcc_id);
+ } else {
+ /* remove MPCC from bottom of list */
+ REG_SET(MPCC_BOT_SEL[temp_mpcc->mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[temp_mpcc->mpcc_id],
+ MPCC_MODE, MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH);
+ }
+ }
}
- /* add dpp/mpcc pair to mpc_tree_cfg and update mpcc registers */
- mpc10_add_to_tree_cfg(mpc, cfg, mpcc_id);
+ if (found) {
+ /* turn off MPCC mux registers */
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
- /* set background color */
- mpc10_set_bg_color(mpc10, &cfg->black_color, mpcc_id);
-
- /* mark this mpcc as in use */
- mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+ /* mark this mpcc as not in use */
+ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
+ mpcc_to_remove->dpp_id = 0xf;
+ mpcc_to_remove->mpcc_bot = NULL;
+ } else {
+ /* In case of resume from S3/S4, remove mpcc from bios left over */
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ }
+}
- return mpcc_id;
+static void mpc1_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+{
+ mpcc->mpcc_id = mpcc_inst;
+ mpcc->dpp_id = 0xf;
+ mpcc->mpcc_bot = NULL;
+ mpcc->blnd_cfg.overlap_only = false;
+ mpcc->blnd_cfg.global_alpha = 0xff;
+ mpcc->blnd_cfg.global_gain = 0xff;
+ mpcc->sm_cfg.enable = false;
}
-void mpc10_update_blend_mode(
- struct mpc *mpc,
- struct mpcc_cfg *cfg)
+/*
+ * Reset the MPCC HW status by disconnecting all muxes.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ *
+ * Return: void
+ */
+void mpc1_mpc_init(struct mpc *mpc)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int mpcc_id, z_idx;
- int alpha_blnd_mode = cfg->per_pixel_alpha ?
- BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+ int mpcc_id;
+ int opp_id;
- /* find z_idx for the dpp that requires blending mode update*/
- for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
- if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
- break;
+ mpc10->mpcc_in_use_mask = 0;
+ for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
- ASSERT(z_idx < cfg->tree_cfg->num_pipes);
- mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+ mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
+ }
- REG_UPDATE_2(MPCC_CONTROL[mpcc_id],
- MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
- MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha);
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ REG_UPDATE(MUX[opp_id], MPC_OUT_MUX, 0xf);
+ }
}
-int mpc10_get_opp_id(struct mpc *mpc, int mpcc_id)
+void mpc1_init_mpcc_list_from_hw(
+ struct mpc *mpc,
+ struct mpc_tree *tree)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
- int opp_id = 0xF;
-
- REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
-
- return opp_id;
+ unsigned int opp_id;
+ unsigned int top_sel;
+ unsigned int bot_sel;
+ unsigned int out_mux;
+ struct mpcc *mpcc;
+ int mpcc_id;
+ int bot_mpcc_id;
+
+ REG_GET(MUX[tree->opp_id], MPC_OUT_MUX, &out_mux);
+
+ if (out_mux != 0xf) {
+ for (mpcc_id = 0; mpcc_id < mpc10->num_mpcc; mpcc_id++) {
+ REG_GET(MPCC_OPP_ID[mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_TOP_SEL[mpcc_id], MPCC_TOP_SEL, &top_sel);
+ REG_GET(MPCC_BOT_SEL[mpcc_id], MPCC_BOT_SEL, &bot_sel);
+
+ if (bot_sel == mpcc_id)
+ bot_sel = 0xf;
+
+ if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
+ mpcc = mpc1_get_mpcc(mpc, mpcc_id);
+ mpcc->dpp_id = top_sel;
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+
+ if (out_mux == mpcc_id)
+ tree->opp_list = mpcc;
+ if (bot_sel != 0xf && bot_sel < mpc10->num_mpcc) {
+ bot_mpcc_id = bot_sel;
+ REG_GET(MPCC_OPP_ID[bot_mpcc_id], MPCC_OPP_ID, &opp_id);
+ REG_GET(MPCC_TOP_SEL[bot_mpcc_id], MPCC_TOP_SEL, &top_sel);
+ if ((opp_id == tree->opp_id) && (top_sel != 0xf)) {
+ struct mpcc *mpcc_bottom = mpc1_get_mpcc(mpc, bot_mpcc_id);
+
+ mpcc->mpcc_bot = mpcc_bottom;
+ }
+ }
+ }
+ }
+ }
}
const struct mpc_funcs dcn10_mpc_funcs = {
- .add = mpc10_mpcc_add,
- .remove = mpc10_mpcc_remove,
- .wait_for_idle = mpc10_assert_idle_mpcc,
- .update_blend_mode = mpc10_update_blend_mode,
- .get_opp_id = mpc10_get_opp_id,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+ .mpc_init = mpc1_mpc_init,
+ .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
+ .wait_for_idle = mpc1_assert_idle_mpcc,
+ .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
+ .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
+ .update_blending = mpc1_update_blending,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
@@ -360,6 +427,8 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
const struct dcn_mpc_mask *mpc_mask,
int num_mpcc)
{
+ int i;
+
mpc10->base.ctx = ctx;
mpc10->base.funcs = &dcn10_mpc_funcs;
@@ -370,5 +439,8 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
mpc10->mpcc_in_use_mask = 0;
mpc10->num_mpcc = num_mpcc;
+
+ for (i = 0; i < MAX_MPCC; i++)
+ mpc1_init_mpcc(&mpc10->base.mpcc_array[i], i);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index e85e1f342266..267a2995ef6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -30,9 +30,6 @@
#define TO_DCN10_MPC(mpc_base) \
container_of(mpc_base, struct dcn10_mpc, base)
-#define MAX_MPCC 6
-#define MAX_OPP 6
-
#define MPC_COMMON_REG_LIST_DCN1_0(inst) \
SRII(MPCC_TOP_SEL, MPCC, inst),\
SRII(MPCC_BOT_SEL, MPCC, inst),\
@@ -42,7 +39,8 @@
SRII(MPCC_BG_G_Y, MPCC, inst),\
SRII(MPCC_BG_R_CR, MPCC, inst),\
SRII(MPCC_BG_B_CB, MPCC, inst),\
- SRII(MPCC_BG_B_CB, MPCC, inst)
+ SRII(MPCC_BG_B_CB, MPCC, inst),\
+ SRII(MPCC_SM_CONTROL, MPCC, inst)
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
SRII(MUX, MPC_OUT, inst)
@@ -56,6 +54,7 @@
uint32_t MPCC_BG_G_Y[MAX_MPCC]; \
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
+ uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
uint32_t MUX[MAX_OPP];
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
@@ -65,12 +64,20 @@
SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\
SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\
SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_ALPHA, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_GLOBAL_GAIN, mask_sh),\
SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\
SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\
SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\
SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\
SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\
SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_EN, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_MODE, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FRAME_ALT, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
+ SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
#define MPC_REG_FIELD_LIST(type) \
@@ -80,12 +87,20 @@
type MPCC_ALPHA_BLND_MODE;\
type MPCC_ALPHA_MULTIPLIED_MODE;\
type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\
+ type MPCC_GLOBAL_ALPHA;\
+ type MPCC_GLOBAL_GAIN;\
type MPCC_IDLE;\
type MPCC_BUSY;\
type MPCC_OPP_ID;\
type MPCC_BG_G_Y;\
type MPCC_BG_R_CR;\
type MPCC_BG_B_CB;\
+ type MPCC_SM_EN;\
+ type MPCC_SM_MODE;\
+ type MPCC_SM_FRAME_ALT;\
+ type MPCC_SM_FIELD_ALT;\
+ type MPCC_SM_FORCE_NEXT_FRAME_POL;\
+ type MPCC_SM_FORCE_NEXT_TOP_POL;\
type MPC_OUT_MUX;
struct dcn_mpc_registers {
@@ -117,23 +132,55 @@ void dcn10_mpc_construct(struct dcn10_mpc *mpcc10,
const struct dcn_mpc_mask *mpc_mask,
int num_mpcc);
-int mpc10_mpcc_add(
- struct mpc *mpc,
- struct mpcc_cfg *cfg);
-
-void mpc10_mpcc_remove(
- struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int dpp_id);
-
-void mpc10_assert_idle_mpcc(
- struct mpc *mpc,
- int id);
-
-void mpc10_update_blend_mode(
- struct mpc *mpc,
- struct mpcc_cfg *cfg);
-int mpc10_get_opp_id(struct mpc *mpc, int mpcc_id);
+struct mpcc *mpc1_insert_plane(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void mpc1_remove_mpcc(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc);
+
+void mpc1_mpc_init(
+ struct mpc *mpc);
+
+void mpc1_assert_idle_mpcc(
+ struct mpc *mpc,
+ int id);
+
+void mpc1_set_bg_color(
+ struct mpc *mpc,
+ struct tg_color *bg_color,
+ int id);
+
+void mpc1_update_stereo_mix(
+ struct mpc *mpc,
+ struct mpcc_sm_cfg *sm_cfg,
+ int mpcc_id);
+
+bool mpc1_is_mpcc_idle(
+ struct mpc *mpc,
+ int mpcc_id);
+
+void mpc1_assert_mpcc_idle_before_connect(
+ struct mpc *mpc,
+ int mpcc_id);
+
+void mpc1_init_mpcc_list_from_hw(
+ struct mpc *mpc,
+ struct mpc_tree *tree);
+
+struct mpcc *mpc1_get_mpcc(
+ struct mpc *mpc,
+ int mpcc_id);
+
+struct mpcc *mpc1_get_mpcc_for_dpp(
+ struct mpc_tree *tree,
+ int dpp_id);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 6d6f67b7d30e..f6ba0eef4489 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -296,13 +296,75 @@ void opp1_program_fmt(
return;
}
-void opp1_set_stereo_polarity(
- struct output_pixel_processor *opp,
- bool enable, bool rightEyePolarity)
+void opp1_program_stereo(
+ struct output_pixel_processor *opp,
+ bool enable,
+ const struct dc_crtc_timing *timing)
{
struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
- REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, enable);
+ uint32_t active_width = timing->h_addressable - timing->h_border_right - timing->h_border_right;
+ uint32_t space1_size = timing->v_total - timing->v_addressable;
+ /* TODO: confirm computation of space2_size */
+ uint32_t space2_size = timing->v_total - timing->v_addressable;
+
+ if (!enable) {
+ active_width = 0;
+ space1_size = 0;
+ space2_size = 0;
+ }
+
+ /* TODO: for which cases should FMT_STEREOSYNC_OVERRIDE be set? */
+ REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, 0);
+
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, active_width);
+
+ /* Program OPPBUF_3D_VACT_SPACE1_SIZE and OPPBUF_VACT_SPACE2_SIZE registers
+ * In 3D progressive frames, Vactive space happens only in between the 2 frames,
+ * so only need to program OPPBUF_3D_VACT_SPACE1_SIZE
+ * In 3D alternative frames, left and right frames, top and bottom field.
+ */
+ if (timing->timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE)
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, space2_size);
+ else
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
+
+ /* TODO: Is programming of OPPBUF_DUMMY_DATA_R/G/B needed? */
+ /*
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_DUMMY_DATA_R, data_r);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+ OPPBUF_DUMMY_DATA_G, data_g);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_1,
+ OPPBUF_DUMMY_DATA_B, _data_b);
+ */
+}
+
+void opp1_program_oppbuf(
+ struct output_pixel_processor *opp,
+ struct oppbuf_params *oppbuf)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ /* Program the oppbuf active width to be the frame width from mpc */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width);
+
+ /* Specifies the number of segments in multi-segment mode (DP-MSO operation)
+ * description "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel.
+ * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel.
+ * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes.
+ * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments."
+ */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation);
+
+ /* description "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num);
+
+ /* description "Specifies the number of times a pixel is replicated (0-15 pixel replications supported).
+ * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1."
+ */
+ REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition);
+
}
/*****************************************/
@@ -319,7 +381,7 @@ static struct opp_funcs dcn10_opp_funcs = {
.opp_set_dyn_expansion = opp1_set_dyn_expansion,
.opp_program_fmt = opp1_program_fmt,
.opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
- .opp_set_stereo_polarity = opp1_set_stereo_polarity,
+ .opp_program_stereo = opp1_program_stereo,
.opp_destroy = opp1_destroy
};
@@ -330,6 +392,7 @@ void dcn10_opp_construct(struct dcn10_opp *oppn10,
const struct dcn10_opp_shift *opp_shift,
const struct dcn10_opp_mask *opp_mask)
{
+
oppn10->base.ctx = ctx;
oppn10->base.inst = inst;
oppn10->base.funcs = &dcn10_opp_funcs;
@@ -338,4 +401,3 @@ void dcn10_opp_construct(struct dcn10_opp *oppn10,
oppn10->opp_shift = opp_shift;
oppn10->opp_mask = opp_mask;
}
-
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
index f3c298ec37fb..bc5058af6266 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -41,7 +41,10 @@
SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
SRI(FMT_CLAMP_CNTL, FMT, id), \
SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
- SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id)
+ SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id), \
+ SRI(OPPBUF_CONTROL, OPPBUF, id),\
+ SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, id), \
+ SRI(OPPBUF_3D_PARAMETERS_1, OPPBUF, id)
#define OPP_REG_LIST_DCN10(id) \
OPP_REG_LIST_DCN(id)
@@ -54,7 +57,11 @@
uint32_t FMT_DITHER_RAND_B_SEED; \
uint32_t FMT_CLAMP_CNTL; \
uint32_t FMT_DYNAMIC_EXP_CNTL; \
- uint32_t FMT_MAP420_MEMORY_CONTROL;
+ uint32_t FMT_MAP420_MEMORY_CONTROL; \
+ uint32_t OPPBUF_CONTROL; \
+ uint32_t OPPBUF_CONTROL1; \
+ uint32_t OPPBUF_3D_PARAMETERS_0; \
+ uint32_t OPPBUF_3D_PARAMETERS_1
#define OPP_MASK_SH_LIST_DCN(mask_sh) \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
@@ -78,10 +85,16 @@
OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \
OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \
OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \
- OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh)
+ OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE2_SIZE, mask_sh)
#define OPP_MASK_SH_LIST_DCN10(mask_sh) \
- OPP_MASK_SH_LIST_DCN(mask_sh)
+ OPP_MASK_SH_LIST_DCN(mask_sh), \
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, mask_sh),\
+ OPP_SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, mask_sh)
#define OPP_DCN10_REG_FIELD_LIST(type) \
type FMT_TRUNCATE_EN; \
@@ -105,18 +118,25 @@
type FMT_DYNAMIC_EXP_EN; \
type FMT_DYNAMIC_EXP_MODE; \
type FMT_MAP420MEM_PWR_FORCE; \
- type FMT_STEREOSYNC_OVERRIDE;
+ type FMT_STEREOSYNC_OVERRIDE; \
+ type OPPBUF_ACTIVE_WIDTH;\
+ type OPPBUF_PIXEL_REPETITION;\
+ type OPPBUF_DISPLAY_SEGMENTATION;\
+ type OPPBUF_OVERLAP_PIXEL_NUM;\
+ type OPPBUF_NUM_SEGMENT_PADDED_PIXELS; \
+ type OPPBUF_3D_VACT_SPACE1_SIZE; \
+ type OPPBUF_3D_VACT_SPACE2_SIZE
struct dcn10_opp_registers {
- OPP_COMMON_REG_VARIABLE_LIST
+ OPP_COMMON_REG_VARIABLE_LIST;
};
struct dcn10_opp_shift {
- OPP_DCN10_REG_FIELD_LIST(uint8_t)
+ OPP_DCN10_REG_FIELD_LIST(uint8_t);
};
struct dcn10_opp_mask {
- OPP_DCN10_REG_FIELD_LIST(uint32_t)
+ OPP_DCN10_REG_FIELD_LIST(uint32_t);
};
struct dcn10_opp {
@@ -151,9 +171,10 @@ void opp1_program_bit_depth_reduction(
struct output_pixel_processor *opp,
const struct bit_depth_reduction_params *params);
-void opp1_set_stereo_polarity(
- struct output_pixel_processor *opp,
- bool enable, bool rightEyePolarity);
+void opp1_program_stereo(
+ struct output_pixel_processor *opp,
+ bool enable,
+ const struct dc_crtc_timing *timing);
void opp1_destroy(struct output_pixel_processor **opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 73ff78f9cae1..4bf64d1b2c60 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -23,19 +23,20 @@
*
*/
+
#include "reg_helper.h"
-#include "dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dc.h"
#define REG(reg)\
- tgn10->tg_regs->reg
+ optc1->tg_regs->reg
#define CTX \
- tgn10->base.ctx
+ optc1->base.ctx
#undef FN
#define FN(reg_name, field_name) \
- tgn10->tg_shift->field_name, tgn10->tg_mask->field_name
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
@@ -45,8 +46,8 @@
* This is a workaround for a bug that has existed since R5xx and has not been
* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
*/
-static void tgn10_apply_front_porch_workaround(
- struct timing_generator *tg,
+static void optc1_apply_front_porch_workaround(
+ struct timing_generator *optc,
struct dc_crtc_timing *timing)
{
if (timing->flags.INTERLACE == 1) {
@@ -58,30 +59,30 @@ static void tgn10_apply_front_porch_workaround(
}
}
-static void tgn10_program_global_sync(
- struct timing_generator *tg)
+void optc1_program_global_sync(
+ struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
- if (tg->dlg_otg_param.vstartup_start == 0) {
+ if (optc->dlg_otg_param.vstartup_start == 0) {
BREAK_TO_DEBUGGER();
return;
}
REG_SET(OTG_VSTARTUP_PARAM, 0,
- VSTARTUP_START, tg->dlg_otg_param.vstartup_start);
+ VSTARTUP_START, optc->dlg_otg_param.vstartup_start);
REG_SET_2(OTG_VUPDATE_PARAM, 0,
- VUPDATE_OFFSET, tg->dlg_otg_param.vupdate_offset,
- VUPDATE_WIDTH, tg->dlg_otg_param.vupdate_width);
+ VUPDATE_OFFSET, optc->dlg_otg_param.vupdate_offset,
+ VUPDATE_WIDTH, optc->dlg_otg_param.vupdate_width);
REG_SET(OTG_VREADY_PARAM, 0,
- VREADY_OFFSET, tg->dlg_otg_param.vready_offset);
+ VREADY_OFFSET, optc->dlg_otg_param.vready_offset);
}
-static void tgn10_disable_stereo(struct timing_generator *tg)
+static void optc1_disable_stereo(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_STEREO_CONTROL, 0,
OTG_STEREO_EN, 0);
@@ -90,11 +91,6 @@ static void tgn10_disable_stereo(struct timing_generator *tg)
OTG_3D_STRUCTURE_EN, 0,
OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
-
- REG_UPDATE(OPPBUF_CONTROL,
- OPPBUF_ACTIVE_WIDTH, 0);
- REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
- OPPBUF_3D_VACT_SPACE1_SIZE, 0);
}
/**
@@ -102,8 +98,8 @@ static void tgn10_disable_stereo(struct timing_generator *tg)
* Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
* Including SYNC. Call BIOS command table to program Timings.
*/
-static void tgn10_program_timing(
- struct timing_generator *tg,
+void optc1_program_timing(
+ struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing,
bool use_vbios)
{
@@ -121,10 +117,10 @@ static void tgn10_program_timing(
uint32_t h_div_2;
int32_t vertical_line_start;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
patched_crtc_timing = *dc_crtc_timing;
- tgn10_apply_front_porch_workaround(tg, &patched_crtc_timing);
+ optc1_apply_front_porch_workaround(optc, &patched_crtc_timing);
/* Load horizontal timing */
@@ -217,7 +213,7 @@ static void tgn10_program_timing(
/* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
* program the reg for interrupt postition.
*/
- vertical_line_start = asic_blank_end - tg->dlg_otg_param.vstartup_start + 1;
+ vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
if (vertical_line_start < 0) {
ASSERT(0);
vertical_line_start = 0;
@@ -233,26 +229,25 @@ static void tgn10_program_timing(
OTG_V_SYNC_A_POL, v_sync_polarity);
v_init = asic_blank_start;
- if (tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
- tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- tg->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
+ if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ optc->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
start_point = 1;
if (patched_crtc_timing.flags.INTERLACE == 1)
field_num = 1;
}
v_fp2 = 0;
- if (tg->dlg_otg_param.vstartup_start > asic_blank_end)
- v_fp2 = tg->dlg_otg_param.vstartup_start > asic_blank_end;
+ if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
+ v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
/* Interlace */
if (patched_crtc_timing.flags.INTERLACE == 1) {
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 1);
v_init = v_init / 2;
- if ((tg->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
+ if ((optc->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
v_fp2 = v_fp2 / 2;
- }
- else
+ } else
REG_UPDATE(OTG_INTERLACE_CONTROL,
OTG_INTERLACE_ENABLE, 0);
@@ -270,13 +265,13 @@ static void tgn10_program_timing(
OTG_START_POINT_CNTL, start_point,
OTG_FIELD_NUMBER_CNTL, field_num);
- tgn10_program_global_sync(tg);
+ optc1_program_global_sync(optc);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
* program_horz_count_by_2
* for DVI 30bpp mode, 0 otherwise
- * program_horz_count_by_2(tg, &patched_crtc_timing);
+ * program_horz_count_by_2(optc, &patched_crtc_timing);
*/
/* Enable stereo - only when we need to pack 3D frame. Other types
@@ -290,9 +285,9 @@ static void tgn10_program_timing(
}
-static void tgn10_set_blank_data_double_buffer(struct timing_generator *tg, bool enable)
+static void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t blank_data_double_buffer_enable = enable ? 1 : 0;
@@ -304,9 +299,9 @@ static void tgn10_set_blank_data_double_buffer(struct timing_generator *tg, bool
* unblank_crtc
* Call ASIC Control Object to UnBlank CRTC.
*/
-static void tgn10_unblank_crtc(struct timing_generator *tg)
+static void optc1_unblank_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t vertical_interrupt_enable = 0;
REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
@@ -316,7 +311,7 @@ static void tgn10_unblank_crtc(struct timing_generator *tg)
* this check will be removed.
*/
if (vertical_interrupt_enable)
- tgn10_set_blank_data_double_buffer(tg, true);
+ optc1_set_blank_data_double_buffer(optc, true);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 0,
@@ -328,36 +323,29 @@ static void tgn10_unblank_crtc(struct timing_generator *tg)
* Call ASIC Control Object to Blank CRTC.
*/
-static void tgn10_blank_crtc(struct timing_generator *tg)
+static void optc1_blank_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 1,
OTG_BLANK_DE_MODE, 0);
- /* todo: why are we waiting for BLANK_DATA_EN? shouldn't we be waiting
- * for status?
- */
- REG_WAIT(OTG_BLANK_CONTROL,
- OTG_BLANK_DATA_EN, 1,
- 1, 100000);
-
- tgn10_set_blank_data_double_buffer(tg, false);
+ optc1_set_blank_data_double_buffer(optc, false);
}
-static void tgn10_set_blank(struct timing_generator *tg,
+void optc1_set_blank(struct timing_generator *optc,
bool enable_blanking)
{
if (enable_blanking)
- tgn10_blank_crtc(tg);
+ optc1_blank_crtc(optc);
else
- tgn10_unblank_crtc(tg);
+ optc1_unblank_crtc(optc);
}
-static bool tgn10_is_blanked(struct timing_generator *tg)
+bool optc1_is_blanked(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t blank_en;
uint32_t blank_state;
@@ -368,9 +356,9 @@ static bool tgn10_is_blanked(struct timing_generator *tg)
return blank_en && blank_state;
}
-static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
+void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (enable) {
REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
@@ -403,19 +391,19 @@ static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
* Enable CRTC
* Enable CRTC - call ASIC Control Object to enable Timing generator.
*/
-static bool tgn10_enable_crtc(struct timing_generator *tg)
+static bool optc1_enable_crtc(struct timing_generator *optc)
{
/* TODO FPGA wait for answer
* OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
* OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
*/
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* opp instance for OTG. For DCN1.0, ODM is remoed.
* OPP and OPTC should 1:1 mapping
*/
REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
- OPTC_SRC_SEL, tg->inst);
+ OPTC_SRC_SEL, optc->inst);
/* VTG enable first is for HW workaround */
REG_UPDATE(CONTROL,
@@ -430,9 +418,9 @@ static bool tgn10_enable_crtc(struct timing_generator *tg)
}
/* disable_crtc - call ASIC Control Object to disable Timing generator. */
-static bool tgn10_disable_crtc(struct timing_generator *tg)
+bool optc1_disable_crtc(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* disable otg request until end of the first line
* in the vertical blank region
@@ -453,11 +441,11 @@ static bool tgn10_disable_crtc(struct timing_generator *tg)
}
-static void tgn10_program_blank_color(
- struct timing_generator *tg,
+void optc1_program_blank_color(
+ struct timing_generator *optc,
const struct tg_color *black_color)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET_3(OTG_BLACK_COLOR, 0,
OTG_BLACK_COLOR_B_CB, black_color->color_b_cb,
@@ -465,15 +453,15 @@ static void tgn10_program_blank_color(
OTG_BLACK_COLOR_R_CR, black_color->color_r_cr);
}
-static bool tgn10_validate_timing(
- struct timing_generator *tg,
+bool optc1_validate_timing(
+ struct timing_generator *optc,
const struct dc_crtc_timing *timing)
{
uint32_t interlace_factor;
uint32_t v_blank;
uint32_t h_blank;
uint32_t min_v_blank;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
ASSERT(timing != NULL);
@@ -503,19 +491,19 @@ static bool tgn10_validate_timing(
* needs more than 8192 horizontal and
* more than 8192 vertical total pixels)
*/
- if (timing->h_total > tgn10->max_h_total ||
- timing->v_total > tgn10->max_v_total)
+ if (timing->h_total > optc1->max_h_total ||
+ timing->v_total > optc1->max_v_total)
return false;
- if (h_blank < tgn10->min_h_blank)
+ if (h_blank < optc1->min_h_blank)
return false;
- if (timing->h_sync_width < tgn10->min_h_sync_width ||
- timing->v_sync_width < tgn10->min_v_sync_width)
+ if (timing->h_sync_width < optc1->min_h_sync_width ||
+ timing->v_sync_width < optc1->min_v_sync_width)
return false;
- min_v_blank = timing->flags.INTERLACE?tgn10->min_v_blank_interlace:tgn10->min_v_blank;
+ min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank;
if (v_blank < min_v_blank)
return false;
@@ -532,15 +520,15 @@ static bool tgn10_validate_timing(
* holds the counter of frames.
*
* @param
- * struct timing_generator *tg - [in] timing generator which controls the
+ * struct timing_generator *optc - [in] timing generator which controls the
* desired CRTC
*
* @return
* Counter of frames, which should equal to number of vblanks.
*/
-static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
+uint32_t optc1_get_vblank_counter(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t frame_count;
REG_GET(OTG_STATUS_FRAME_COUNT,
@@ -549,34 +537,34 @@ static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
return frame_count;
}
-static void tgn10_lock(struct timing_generator *tg)
+void optc1_lock(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_GLOBAL_CONTROL0, 0,
- OTG_MASTER_UPDATE_LOCK_SEL, tg->inst);
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 1);
/* Should be fast, status does not update on maximus */
- if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
REG_WAIT(OTG_MASTER_UPDATE_LOCK,
UPDATE_LOCK_STATUS, 1,
1, 10);
}
-static void tgn10_unlock(struct timing_generator *tg)
+void optc1_unlock(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
OTG_MASTER_UPDATE_LOCK, 0);
}
-static void tgn10_get_position(struct timing_generator *tg,
+void optc1_get_position(struct timing_generator *optc,
struct crtc_position *position)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET_2(OTG_STATUS_POSITION,
OTG_HORZ_COUNT, &position->horizontal_count,
@@ -586,12 +574,12 @@ static void tgn10_get_position(struct timing_generator *tg,
OTG_VERT_COUNT_NOM, &position->nominal_vcount);
}
-static bool tgn10_is_counter_moving(struct timing_generator *tg)
+bool optc1_is_counter_moving(struct timing_generator *optc)
{
struct crtc_position position1, position2;
- tg->funcs->get_position(tg, &position1);
- tg->funcs->get_position(tg, &position2);
+ optc->funcs->get_position(optc, &position1);
+ optc->funcs->get_position(optc, &position2);
if (position1.horizontal_count == position2.horizontal_count &&
position1.vertical_count == position2.vertical_count)
@@ -600,10 +588,10 @@ static bool tgn10_is_counter_moving(struct timing_generator *tg)
return true;
}
-static bool tgn10_did_triggered_reset_occur(
- struct timing_generator *tg)
+bool optc1_did_triggered_reset_occur(
+ struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t occurred_force, occurred_vsync;
REG_GET(OTG_FORCE_COUNT_NOW_CNTL,
@@ -615,9 +603,9 @@ static bool tgn10_did_triggered_reset_occur(
return occurred_vsync != 0 || occurred_force != 0;
}
-static void tgn10_disable_reset_trigger(struct timing_generator *tg)
+void optc1_disable_reset_trigger(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_WRITE(OTG_TRIGA_CNTL, 0);
@@ -628,9 +616,9 @@ static void tgn10_disable_reset_trigger(struct timing_generator *tg)
OTG_FORCE_VSYNC_NEXT_LINE_CLEAR, 1);
}
-static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_tg_inst)
+void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t falling_edge;
REG_GET(OTG_V_SYNC_A_CNTL,
@@ -662,12 +650,12 @@ static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_t
OTG_FORCE_COUNT_NOW_MODE, 2);
}
-void tgn10_enable_crtc_reset(
- struct timing_generator *tg,
+void optc1_enable_crtc_reset(
+ struct timing_generator *optc,
int source_tg_inst,
struct crtc_trigger_info *crtc_tp)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t falling_edge = 0;
uint32_t rising_edge = 0;
@@ -707,10 +695,10 @@ void tgn10_enable_crtc_reset(
}
}
-static void tgn10_wait_for_state(struct timing_generator *tg,
+void optc1_wait_for_state(struct timing_generator *optc,
enum crtc_state state)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
switch (state) {
case CRTC_STATE_VBLANK:
@@ -730,8 +718,8 @@ static void tgn10_wait_for_state(struct timing_generator *tg,
}
}
-static void tgn10_set_early_control(
- struct timing_generator *tg,
+void optc1_set_early_control(
+ struct timing_generator *optc,
uint32_t early_cntl)
{
/* asic design change, do not need this control
@@ -740,11 +728,11 @@ static void tgn10_set_early_control(
}
-static void tgn10_set_static_screen_control(
- struct timing_generator *tg,
+void optc1_set_static_screen_control(
+ struct timing_generator *optc,
uint32_t value)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* Bit 8 is no longer applicable in RV for PSR case,
* set bit 8 to 0 if given
@@ -769,11 +757,11 @@ static void tgn10_set_static_screen_control(
*
*****************************************************************************
*/
-static void tgn10_set_drr(
- struct timing_generator *tg,
+void optc1_set_drr(
+ struct timing_generator *optc,
const struct drr_params *params)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (params != NULL &&
params->vertical_total_max > 0 &&
@@ -806,15 +794,15 @@ static void tgn10_set_drr(
}
}
-static void tgn10_set_test_pattern(
- struct timing_generator *tg,
+static void optc1_set_test_pattern(
+ struct timing_generator *optc,
/* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
* because this is not DP-specific (which is probably somewhere in DP
* encoder) */
enum controller_dp_test_pattern test_pattern,
enum dc_color_depth color_depth)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
enum test_pattern_color_format bit_depth;
enum test_pattern_dyn_range dyn_range;
enum test_pattern_mode mode;
@@ -1065,35 +1053,30 @@ static void tgn10_set_test_pattern(
}
}
-static void tgn10_get_crtc_scanoutpos(
- struct timing_generator *tg,
+void optc1_get_crtc_scanoutpos(
+ struct timing_generator *optc,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
uint32_t *h_position,
uint32_t *v_position)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
struct crtc_position position;
REG_GET_2(OTG_V_BLANK_START_END,
OTG_V_BLANK_START, v_blank_start,
OTG_V_BLANK_END, v_blank_end);
- tgn10_get_position(tg, &position);
+ optc1_get_position(optc, &position);
*h_position = position.horizontal_count;
*v_position = position.vertical_count;
}
-
-
-static void tgn10_enable_stereo(struct timing_generator *tg,
+static void optc1_enable_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
-
- uint32_t active_width = timing->h_addressable;
- uint32_t space1_size = timing->v_total - timing->v_addressable;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
if (flags) {
uint32_t stereo_en;
@@ -1121,29 +1104,23 @@ static void tgn10_enable_stereo(struct timing_generator *tg,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
}
-
- REG_UPDATE(OPPBUF_CONTROL,
- OPPBUF_ACTIVE_WIDTH, active_width);
-
- REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
- OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
}
-static void tgn10_program_stereo(struct timing_generator *tg,
+void optc1_program_stereo(struct timing_generator *optc,
const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
{
if (flags->PROGRAM_STEREO)
- tgn10_enable_stereo(tg, timing, flags);
+ optc1_enable_stereo(optc, timing, flags);
else
- tgn10_disable_stereo(tg);
+ optc1_disable_stereo(optc);
}
-static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
+bool optc1_is_stereo_left_eye(struct timing_generator *optc)
{
bool ret = false;
uint32_t left_eye = 0;
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
REG_GET(OTG_STEREO_STATUS,
OTG_STEREO_CURRENT_EYE, &left_eye);
@@ -1155,7 +1132,7 @@ static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
return ret;
}
-void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s)
{
REG_GET(OTG_CONTROL,
@@ -1199,17 +1176,22 @@ void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
}
-static void tgn10_tg_init(struct timing_generator *tg)
+static void optc1_clear_optc_underflow(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
- tgn10_set_blank_data_double_buffer(tg, true);
REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
}
-static bool tgn10_is_tg_enabled(struct timing_generator *tg)
+static void optc1_tg_init(struct timing_generator *optc)
+{
+ optc1_set_blank_data_double_buffer(optc, true);
+ optc1_clear_optc_underflow(optc);
+}
+
+static bool optc1_is_tg_enabled(struct timing_generator *optc)
{
- struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
uint32_t otg_enabled = 0;
REG_GET(OTG_CONTROL, OTG_MASTER_EN, &otg_enabled);
@@ -1217,50 +1199,65 @@ static bool tgn10_is_tg_enabled(struct timing_generator *tg)
return (otg_enabled != 0);
}
+
+static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t underflow_occurred = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS,
+ &underflow_occurred);
+
+ return (underflow_occurred == 1);
+}
+
static const struct timing_generator_funcs dcn10_tg_funcs = {
- .validate_timing = tgn10_validate_timing,
- .program_timing = tgn10_program_timing,
- .program_global_sync = tgn10_program_global_sync,
- .enable_crtc = tgn10_enable_crtc,
- .disable_crtc = tgn10_disable_crtc,
+ .validate_timing = optc1_validate_timing,
+ .program_timing = optc1_program_timing,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc1_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
/* used by enable_timing_synchronization. Not need for FPGA */
- .is_counter_moving = tgn10_is_counter_moving,
- .get_position = tgn10_get_position,
- .get_frame_count = tgn10_get_vblank_counter,
- .get_scanoutpos = tgn10_get_crtc_scanoutpos,
- .set_early_control = tgn10_set_early_control,
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .set_early_control = optc1_set_early_control,
/* used by enable_timing_synchronization. Not need for FPGA */
- .wait_for_state = tgn10_wait_for_state,
- .set_blank = tgn10_set_blank,
- .is_blanked = tgn10_is_blanked,
- .set_blank_color = tgn10_program_blank_color,
- .did_triggered_reset_occur = tgn10_did_triggered_reset_occur,
- .enable_reset_trigger = tgn10_enable_reset_trigger,
- .enable_crtc_reset = tgn10_enable_crtc_reset,
- .disable_reset_trigger = tgn10_disable_reset_trigger,
- .lock = tgn10_lock,
- .unlock = tgn10_unlock,
- .enable_optc_clock = tgn10_enable_optc_clock,
- .set_drr = tgn10_set_drr,
- .set_static_screen_control = tgn10_set_static_screen_control,
- .set_test_pattern = tgn10_set_test_pattern,
- .program_stereo = tgn10_program_stereo,
- .is_stereo_left_eye = tgn10_is_stereo_left_eye,
- .set_blank_data_double_buffer = tgn10_set_blank_data_double_buffer,
- .tg_init = tgn10_tg_init,
- .is_tg_enabled = tgn10_is_tg_enabled,
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank = optc1_set_blank,
+ .is_blanked = optc1_is_blanked,
+ .set_blank_color = optc1_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .lock = optc1_lock,
+ .unlock = optc1_unlock,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc1_set_drr,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .set_test_pattern = optc1_set_test_pattern,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
+ .tg_init = optc1_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
};
-void dcn10_timing_generator_init(struct dcn10_timing_generator *tgn10)
+void dcn10_timing_generator_init(struct optc *optc1)
{
- tgn10->base.funcs = &dcn10_tg_funcs;
+ optc1->base.funcs = &dcn10_tg_funcs;
- tgn10->max_h_total = tgn10->tg_mask->OTG_H_TOTAL + 1;
- tgn10->max_v_total = tgn10->tg_mask->OTG_V_TOTAL + 1;
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
- tgn10->min_h_blank = 32;
- tgn10->min_v_blank = 3;
- tgn10->min_v_blank_interlace = 5;
- tgn10->min_h_sync_width = 8;
- tgn10->min_v_sync_width = 1;
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 8;
+ optc1->min_v_sync_width = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index bb1cbfdc3554..a3c7c2012f05 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -29,7 +29,7 @@
#include "timing_generator.h"
#define DCN10TG_FROM_TG(tg)\
- container_of(tg, struct dcn10_timing_generator, base)
+ container_of(tg, struct optc, base)
#define TG_COMMON_REG_LIST_DCN(inst) \
SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
@@ -70,8 +70,6 @@
SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
- SRI(OPPBUF_CONTROL, OPPBUF, inst),\
- SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, inst),\
SRI(CONTROL, VTG, inst),\
SRI(OTG_VERT_SYNC_CONTROL, OTG, inst),\
SRI(OTG_MASTER_UPDATE_MODE, OTG, inst),\
@@ -84,7 +82,7 @@
SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
-struct dcn_tg_registers {
+struct dcn_optc_registers {
uint32_t OTG_VERT_SYNC_CONTROL;
uint32_t OTG_MASTER_UPDATE_MODE;
uint32_t OTG_GSL_CONTROL;
@@ -129,9 +127,11 @@ struct dcn_tg_registers {
uint32_t OPTC_INPUT_CLOCK_CONTROL;
uint32_t OPTC_DATA_SOURCE_SELECT;
uint32_t OPTC_INPUT_GLOBAL_CONTROL;
- uint32_t OPPBUF_CONTROL;
- uint32_t OPPBUF_3D_PARAMETERS_0;
uint32_t CONTROL;
+ uint32_t OTG_GSL_WINDOW_X;
+ uint32_t OTG_GSL_WINDOW_Y;
+ uint32_t OTG_VUPDATE_KEEPOUT;
+ uint32_t OTG_DSC_START_POSITION;
};
#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
@@ -211,8 +211,6 @@ struct dcn_tg_registers {
SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
- SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
- SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
@@ -332,8 +330,6 @@ struct dcn_tg_registers {
type OPTC_SEG0_SRC_SEL;\
type OPTC_UNDERFLOW_OCCURRED_STATUS;\
type OPTC_UNDERFLOW_CLEAR;\
- type OPPBUF_ACTIVE_WIDTH;\
- type OPPBUF_3D_VACT_SPACE1_SIZE;\
type VTG0_ENABLE;\
type VTG0_FP2;\
type VTG0_VCOUNT_INIT;\
@@ -346,22 +342,35 @@ struct dcn_tg_registers {
type OTG_GSL2_EN;\
type OTG_GSL_MASTER_EN;\
type OTG_GSL_FORCE_DELAY;\
- type OTG_GSL_CHECK_ALL_FIELDS;
+ type OTG_GSL_CHECK_ALL_FIELDS;\
+ type OTG_GSL_WINDOW_START_X;\
+ type OTG_GSL_WINDOW_END_X;\
+ type OTG_GSL_WINDOW_START_Y;\
+ type OTG_GSL_WINDOW_END_Y;\
+ type OTG_RANGE_TIMING_DBUF_UPDATE_MODE;\
+ type OTG_GSL_MASTER_MODE;\
+ type OTG_MASTER_UPDATE_LOCK_GSL_EN;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET;\
+ type MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET;\
+ type OTG_DSC_START_POSITION_X;\
+ type OTG_DSC_START_POSITION_LINE_NUM;\
+ type OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN;
-struct dcn_tg_shift {
+
+struct dcn_optc_shift {
TG_REG_FIELD_LIST(uint8_t)
};
-struct dcn_tg_mask {
+struct dcn_optc_mask {
TG_REG_FIELD_LIST(uint32_t)
};
-struct dcn10_timing_generator {
+struct optc {
struct timing_generator base;
- const struct dcn_tg_registers *tg_regs;
- const struct dcn_tg_shift *tg_shift;
- const struct dcn_tg_mask *tg_mask;
+ const struct dcn_optc_registers *tg_regs;
+ const struct dcn_optc_shift *tg_shift;
+ const struct dcn_optc_mask *tg_mask;
enum controller_id controller_id;
@@ -376,7 +385,7 @@ struct dcn10_timing_generator {
uint32_t min_v_blank_interlace;
};
-void dcn10_timing_generator_init(struct dcn10_timing_generator *tg);
+void dcn10_timing_generator_init(struct optc *optc);
struct dcn_otg_state {
uint32_t v_blank_start;
@@ -397,7 +406,77 @@ struct dcn_otg_state {
uint32_t otg_enabled;
};
-void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+void optc1_read_otg_state(struct optc *optc1,
struct dcn_otg_state *s);
+bool optc1_validate_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *timing);
+
+void optc1_program_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ bool use_vbios);
+
+void optc1_program_global_sync(
+ struct timing_generator *optc);
+
+bool optc1_disable_crtc(struct timing_generator *optc);
+
+bool optc1_is_counter_moving(struct timing_generator *optc);
+
+void optc1_get_position(struct timing_generator *optc,
+ struct crtc_position *position);
+
+uint32_t optc1_get_vblank_counter(struct timing_generator *optc);
+
+void optc1_get_crtc_scanoutpos(
+ struct timing_generator *optc,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+void optc1_set_early_control(
+ struct timing_generator *optc,
+ uint32_t early_cntl);
+
+void optc1_wait_for_state(struct timing_generator *optc,
+ enum crtc_state state);
+
+void optc1_set_blank(struct timing_generator *optc,
+ bool enable_blanking);
+
+bool optc1_is_blanked(struct timing_generator *optc);
+
+void optc1_program_blank_color(
+ struct timing_generator *optc,
+ const struct tg_color *black_color);
+
+bool optc1_did_triggered_reset_occur(
+ struct timing_generator *optc);
+
+void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst);
+
+void optc1_disable_reset_trigger(struct timing_generator *optc);
+
+void optc1_lock(struct timing_generator *optc);
+
+void optc1_unlock(struct timing_generator *optc);
+
+void optc1_enable_optc_clock(struct timing_generator *optc, bool enable);
+
+void optc1_set_drr(
+ struct timing_generator *optc,
+ const struct drr_params *params);
+
+void optc1_set_static_screen_control(
+ struct timing_generator *optc,
+ uint32_t value);
+
+void optc1_program_stereo(struct timing_generator *optc,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
+
+bool optc1_is_stereo_left_eye(struct timing_generator *optc);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 10cce51d31d2..44825e2c9ebb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -34,7 +34,7 @@
#include "dcn10/dcn10_mpc.h"
#include "irq/dcn10/irq_service_dcn10.h"
#include "dcn10/dcn10_dpp.h"
-#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10_optc.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dcn10/dcn10_opp.h"
@@ -348,18 +348,18 @@ static const struct dcn_mpc_mask mpc_mask = {
#define tg_regs(id)\
[id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
-static const struct dcn_tg_registers tg_regs[] = {
+static const struct dcn_optc_registers tg_regs[] = {
tg_regs(0),
tg_regs(1),
tg_regs(2),
tg_regs(3),
};
-static const struct dcn_tg_shift tg_shift = {
+static const struct dcn_optc_shift tg_shift = {
TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
};
-static const struct dcn_tg_mask tg_mask = {
+static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
@@ -553,8 +553,8 @@ static struct timing_generator *dcn10_timing_generator_create(
struct dc_context *ctx,
uint32_t instance)
{
- struct dcn10_timing_generator *tgn10 =
- kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -678,6 +678,7 @@ static struct dce_hwseq *dcn10_hwseq_create(
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN10_253 = true;
+ hws->wa.false_optc_underflow = true;
}
return hws;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 87bab8e8139f..3488af2b5786 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'utils' sub-component of DAL.
# It provides the general basic services required by other DAL
# subcomponents.
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 4c31fa54af39..c109b2c34c8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,35 +35,6 @@ static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum
soc->writeback_latency_us = 12.0;
soc->ideal_dram_bw_after_urgent_percent = 80.0;
soc->max_request_size_bytes = 256;
-
- soc->vmin.dcfclk_mhz = 300.0;
- soc->vmin.dispclk_mhz = 608.0;
- soc->vmin.dppclk_mhz = 435.0;
- soc->vmin.dram_bw_per_chan_gbps = 12.8;
- soc->vmin.phyclk_mhz = 540.0;
- soc->vmin.socclk_mhz = 208.0;
-
- soc->vmid.dcfclk_mhz = 600.0;
- soc->vmid.dispclk_mhz = 661.0;
- soc->vmid.dppclk_mhz = 661.0;
- soc->vmid.dram_bw_per_chan_gbps = 12.8;
- soc->vmid.phyclk_mhz = 540.0;
- soc->vmid.socclk_mhz = 208.0;
-
- soc->vnom.dcfclk_mhz = 600.0;
- soc->vnom.dispclk_mhz = 661.0;
- soc->vnom.dppclk_mhz = 661.0;
- soc->vnom.dram_bw_per_chan_gbps = 38.4;
- soc->vnom.phyclk_mhz = 810;
- soc->vnom.socclk_mhz = 208.0;
-
- soc->vmax.dcfclk_mhz = 600.0;
- soc->vmax.dispclk_mhz = 1086.0;
- soc->vmax.dppclk_mhz = 661.0;
- soc->vmax.dram_bw_per_chan_gbps = 38.4;
- soc->vmax.phyclk_mhz = 810.0;
- soc->vmax.socclk_mhz = 208.0;
-
soc->downspread_percent = 0.5;
soc->dram_page_open_time_ns = 50.0;
soc->dram_rw_turnaround_time_ns = 17.5;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 2d9d6298f0d3..aeebd8bee628 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -79,10 +79,6 @@ struct _vcs_dpi_soc_bounding_box_st {
double writeback_latency_us;
double ideal_dram_bw_after_urgent_percent;
unsigned int max_request_size_bytes;
- struct _vcs_dpi_voltage_scaling_st vmin;
- struct _vcs_dpi_voltage_scaling_st vmid;
- struct _vcs_dpi_voltage_scaling_st vnom;
- struct _vcs_dpi_voltage_scaling_st vmax;
double downspread_percent;
double dram_page_open_time_ns;
double dram_rw_turnaround_time_ns;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 1f337ecfeab0..260e113fcc02 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -28,6 +28,15 @@
#include "dml_inline_defs.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
#define BPP_INVALID 0
#define BPP_BLENDED_PIPE 0xffffffff
static const unsigned int NumberOfStates = DC__VOLTAGE_STATES;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
index 8ba962df42e6..325dd2b757d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
@@ -27,6 +27,15 @@
#include "display_mode_vba.h"
#include "display_rq_dlg_calc.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
double *refcyc_per_req_delivery_pre_cur,
double *refcyc_per_req_delivery_cur,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 1e4b1e383401..c2037daa8e66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -28,6 +28,15 @@
#include "dml_inline_defs.h"
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
{
unsigned int ret_val = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
index bc7d8c707221..324239c77958 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
@@ -27,6 +27,16 @@
#include "dc_features.h"
#include "dml_inline_defs.h"
+
+/*
+ * NOTE:
+ * This file is gcc-parseable HW gospel, coming straight from HW engineers.
+ *
+ * It doesn't adhere to Linux kernel style and sometimes will do things in odd
+ * ways. Unless there is something clearly wrong with it the code should
+ * remain as-is as it provides us with a guarantee from HW that it is correct.
+ */
+
void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
{
to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index 70d01a9e9676..562ee189d780 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'gpio' sub-component of DAL.
# It provides the control and status of HW GPIO pins.
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
index 55603400acd9..352885cb4d07 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'i2c' sub-component of DAL.
# It provides the control and status of HW i2c engine of the adapter.
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d680b565af6f..d6971054ec07 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -212,7 +212,6 @@ struct pipe_ctx {
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
#endif
- struct dwbc *dwbc;
};
struct resource_context {
@@ -241,6 +240,7 @@ struct dce_bw_output {
struct dcn_bw_clocks {
int dispclk_khz;
+ int dppclk_khz;
bool dppclk_div;
int dcfclk_khz;
int dcfclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index 1e231f6de732..132d18d4b293 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -349,10 +349,10 @@ struct dcn_bw_internal_vars {
float dst_x_after_scaler;
float dst_y_after_scaler;
float time_calc;
- float v_update_offset[number_of_planes_minus_one + 1];
+ float v_update_offset[number_of_planes_minus_one + 1][2];
float total_repeater_delay;
- float v_update_width[number_of_planes_minus_one + 1];
- float v_ready_offset[number_of_planes_minus_one + 1];
+ float v_update_width[number_of_planes_minus_one + 1][2];
+ float v_ready_offset[number_of_planes_minus_one + 1][2];
float time_setup;
float extra_latency;
float maximum_vstartup;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 48217ecfabd4..a83a48494613 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -50,9 +50,9 @@ struct abm_funcs {
bool (*set_backlight_level)(struct abm *abm,
unsigned int backlight_level,
unsigned int frame_ramp,
- unsigned int controller_id);
+ unsigned int controller_id,
+ bool use_smooth_brightness);
unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
- bool (*is_dmcu_initialized)(struct abm *abm);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index b59712b41b81..ce206355461b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -63,6 +63,7 @@ struct dmcu_funcs {
unsigned int wait_loop_number);
void (*get_psr_wait_loop)(struct dmcu *dmcu,
unsigned int *psr_wait_loop_number);
+ bool (*is_dmcu_initialized)(struct dmcu *dmcu);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index ccb4896975c2..25edbde6163e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -68,7 +68,7 @@ struct dpp_funcs {
void (*dpp_set_csc_adjustment)(
struct dpp *dpp,
- const struct out_csc_color_matrix *tbl_entry);
+ const uint16_t *regval);
void (*dpp_power_on_regamma_lut)(
struct dpp *dpp,
@@ -122,7 +122,7 @@ struct dpp_funcs {
void (*set_cursor_attributes)(
struct dpp *dpp_base,
- const struct dc_cursor_attributes *attr);
+ enum dc_cursor_color_format color_format);
void (*set_cursor_position)(
struct dpp *dpp_base,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 49b12f602e79..b7c7e70022e4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -28,6 +28,20 @@
#include "mem_input.h"
+
+enum cursor_pitch {
+ CURSOR_PITCH_64_PIXELS = 0,
+ CURSOR_PITCH_128_PIXELS,
+ CURSOR_PITCH_256_PIXELS
+};
+
+enum cursor_lines_per_chunk {
+ CURSOR_LINE_PER_CHUNK_2 = 1,
+ CURSOR_LINE_PER_CHUNK_4,
+ CURSOR_LINE_PER_CHUNK_8,
+ CURSOR_LINE_PER_CHUNK_16
+};
+
struct hubp {
struct hubp_funcs *funcs;
struct dc_context *ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index ddc56700109b..e3f0b4056318 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -126,36 +126,12 @@ struct default_adjustment {
bool force_hw_default;
};
-struct out_csc_color_matrix {
- enum dc_color_space color_space;
- uint16_t regval[12];
-};
-struct output_csc_matrix {
+struct out_csc_color_matrix {
enum dc_color_space color_space;
uint16_t regval[12];
};
-static const struct output_csc_matrix output_csc_matrix[] = {
- { COLOR_SPACE_SRGB,
- { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
- { COLOR_SPACE_SRGB_LIMITED,
- { 0x1B67, 0, 0, 0x201, 0, 0x1B67, 0, 0x201, 0, 0, 0x1B67, 0x201} },
- { COLOR_SPACE_YCBCR601,
- { 0xE04, 0xF444, 0xFDB9, 0x1004, 0x831, 0x1016, 0x320, 0x201, 0xFB45,
- 0xF6B7, 0xE04, 0x1004} },
- { COLOR_SPACE_YCBCR709,
- { 0xE04, 0xF345, 0xFEB7, 0x1004, 0x5D3, 0x1399, 0x1FA,
- 0x201, 0xFCCA, 0xF533, 0xE04, 0x1004} },
-
- /* TODO: correct values below */
- { COLOR_SPACE_YCBCR601_LIMITED,
- { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
- 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
- { COLOR_SPACE_YCBCR709_LIMITED,
- { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
- 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
-};
enum opp_regamma {
OPP_REGAMMA_BYPASS = 0,
@@ -178,4 +154,41 @@ struct dc_bias_and_scale {
uint16_t bias_blue;
};
+enum test_pattern_dyn_range {
+ TEST_PATTERN_DYN_RANGE_VESA = 0,
+ TEST_PATTERN_DYN_RANGE_CEA
+};
+
+enum test_pattern_mode {
+ TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
+ TEST_PATTERN_MODE_VERTICALBARS,
+ TEST_PATTERN_MODE_HORIZONTALBARS,
+ TEST_PATTERN_MODE_SINGLERAMP_RGB,
+ TEST_PATTERN_MODE_DUALRAMP_RGB
+};
+
+enum test_pattern_color_format {
+ TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
+ TEST_PATTERN_COLOR_FORMAT_BPC_8,
+ TEST_PATTERN_COLOR_FORMAT_BPC_10,
+ TEST_PATTERN_COLOR_FORMAT_BPC_12
+};
+
+enum controller_dp_test_pattern {
+ CONTROLLER_DP_TEST_PATTERN_D102 = 0,
+ CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
+ CONTROLLER_DP_TEST_PATTERN_PRBS7,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
+};
+
#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 8a08f0a97f94..0fd329deacd8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -1,4 +1,26 @@
/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
* link_encoder.h
*
* Created on: Oct 6, 2015
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 72ea33526a5c..23a8d5e53a89 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -26,7 +26,10 @@
#define __DC_MPCC_H__
#include "dc_hw_types.h"
-#include "opp.h"
+#include "hw_shared.h"
+
+#define MAX_MPCC 6
+#define MAX_OPP 6
enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_DISABLE = 0,
@@ -34,45 +37,151 @@ enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_COEF_B
};
-struct mpcc_cfg {
- int dpp_id;
- int opp_id;
- struct mpc_tree_cfg *tree_cfg;
- unsigned int z_index;
- struct tg_color black_color;
- bool per_pixel_alpha;
- bool pre_multiplied_alpha;
+enum mpcc_blend_mode {
+ MPCC_BLEND_MODE_BYPASS,
+ MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH,
+ MPCC_BLEND_MODE_TOP_LAYER_ONLY,
+ MPCC_BLEND_MODE_TOP_BOT_BLENDING
+};
+
+enum mpcc_alpha_blend_mode {
+ MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA,
+ MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN,
+ MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
+};
+
+/*
+ * MPCC blending configuration
+ */
+struct mpcc_blnd_cfg {
+ struct tg_color black_color; /* background color */
+ enum mpcc_alpha_blend_mode alpha_mode; /* alpha blend mode */
+ bool pre_multiplied_alpha; /* alpha pre-multiplied mode flag */
+ int global_gain;
+ int global_alpha;
+ bool overlap_only;
+
+};
+
+struct mpcc_sm_cfg {
+ bool enable;
+ /* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */
+ int sm_mode;
+ /* 0- disable frame alternate, 1- enable frame alternate */
+ bool frame_alt;
+ /* 0- disable field alternate, 1- enable field alternate */
+ bool field_alt;
+ /* 0-no force,2-force frame polarity from top,3-force frame polarity from bottom */
+ int force_next_frame_porlarity;
+ /* 0-no force,2-force field polarity from top,3-force field polarity from bottom */
+ int force_next_field_polarity;
+};
+
+/*
+ * MPCC connection and blending configuration for a single MPCC instance.
+ * This struct is used as a node in an MPC tree.
+ */
+struct mpcc {
+ int mpcc_id; /* MPCC physical instance */
+ int dpp_id; /* DPP input to this MPCC */
+ struct mpcc *mpcc_bot; /* pointer to bottom layer MPCC. NULL when not connected */
+ struct mpcc_blnd_cfg blnd_cfg; /* The blending configuration for this MPCC */
+ struct mpcc_sm_cfg sm_cfg; /* stereo mix setting for this MPCC */
+};
+
+/*
+ * MPC tree represents all MPCC connections for a pipe.
+ */
+struct mpc_tree {
+ int opp_id; /* The OPP instance that owns this MPC tree */
+ struct mpcc *opp_list; /* The top MPCC layer of the MPC tree that outputs to OPP endpoint */
};
struct mpc {
const struct mpc_funcs *funcs;
struct dc_context *ctx;
+
+ struct mpcc mpcc_array[MAX_MPCC];
};
struct mpc_funcs {
- int (*add)(struct mpc *mpc, struct mpcc_cfg *cfg);
+ /*
+ * Insert DPP into MPC tree based on specified blending position.
+ * Only used for planes that are part of blending chain for OPP output
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be added to.
+ * [in] blnd_cfg - MPCC blending configuration for the new blending layer.
+ * [in] sm_cfg - MPCC stereo mix configuration for the new blending layer.
+ * stereo mix must disable for the very bottom layer of the tree config.
+ * [in] insert_above_mpcc - Insert new plane above this MPCC. If NULL, insert as bottom plane.
+ * [in] dpp_id - DPP instance for the plane to be added.
+ * [in] mpcc_id - The MPCC physical instance to use for blending.
+ *
+ * Return: struct mpcc* - MPCC that was added.
+ */
+ struct mpcc* (*insert_plane)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
- void (*remove)(struct mpc *mpc,
- struct mpc_tree_cfg *tree_cfg,
- int opp_id,
- int mpcc_inst);
+ /*
+ * Remove a specified MPCC from the MPC tree.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in/out] tree - MPC tree structure that plane will be removed from.
+ * [in/out] mpcc - MPCC to be removed from tree.
+ *
+ * Return: void
+ */
+ void (*remove_mpcc)(
+ struct mpc *mpc,
+ struct mpc_tree *tree,
+ struct mpcc *mpcc);
- void (*wait_for_idle)(struct mpc *mpc, int id);
+ /*
+ * Reset the MPCC HW status by disconnecting all muxes.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ *
+ * Return: void
+ */
+ void (*mpc_init)(struct mpc *mpc);
- void (*update_blend_mode)(struct mpc *mpc, struct mpcc_cfg *cfg);
+ /*
+ * Update the blending configuration for a specified MPCC.
+ *
+ * Parameters:
+ * [in/out] mpc - MPC context.
+ * [in] blnd_cfg - MPCC blending configuration.
+ * [in] mpcc_id - The MPCC physical instance.
+ *
+ * Return: void
+ */
+ void (*update_blending)(
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg *blnd_cfg,
+ int mpcc_id);
- int (*get_opp_id)(struct mpc *mpc, int mpcc_id);
+ struct mpcc* (*get_mpcc_for_dpp)(
+ struct mpc_tree *tree,
+ int dpp_id);
+
+ void (*wait_for_idle)(struct mpc *mpc, int id);
- void (*set_output_csc)(struct mpc *mpc,
- int opp_id,
- const struct out_csc_color_matrix *tbl_entry,
- enum mpc_output_csc_mode ocsc_mode);
+ void (*assert_mpcc_idle_before_connect)(struct mpc *mpc, int mpcc_id);
- void (*set_ocsc_default)(struct mpc *mpc,
- int opp_id,
- enum dc_color_space color_space,
- enum mpc_output_csc_mode ocsc_mode);
+ void (*init_mpcc_list_from_hw)(
+ struct mpc *mpc,
+ struct mpc_tree *tree);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 579d1059a3d4..ab8fb77f1ae5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -29,6 +29,7 @@
#include "hw_shared.h"
#include "dc_hw_types.h"
#include "transform.h"
+#include "mpc.h"
struct fixed31_32;
@@ -204,7 +205,7 @@ struct output_pixel_processor {
struct dc_context *ctx;
uint32_t inst;
struct pwl_params regamma_params;
- struct mpc_tree_cfg mpc_tree;
+ struct mpc_tree mpc_tree_params;
bool mpcc_disconnect_pending[MAX_PIPES];
const struct opp_funcs *funcs;
};
@@ -248,6 +249,21 @@ enum ovl_csc_adjust_item {
OVERLAY_COLOR_TEMPERATURE
};
+enum oppbuf_display_segmentation {
+ OPPBUF_DISPLAY_SEGMENTATION_1_SEGMENT = 0,
+ OPPBUF_DISPLAY_SEGMENTATION_2_SEGMENT = 1,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT = 2,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT_SPLIT_LEFT = 3,
+ OPPBUF_DISPLAY_SEGMENTATION_4_SEGMENT_SPLIT_RIGHT = 4
+};
+
+struct oppbuf_params {
+ uint32_t active_width;
+ enum oppbuf_display_segmentation mso_segmentation;
+ uint32_t mso_overlap_pixel_num;
+ uint32_t pixel_repetition;
+};
+
struct opp_funcs {
@@ -276,26 +292,11 @@ struct opp_funcs {
void (*opp_destroy)(struct output_pixel_processor **opp);
- void (*opp_set_stereo_polarity)(
- struct output_pixel_processor *opp,
- bool enable,
- bool rightEyePolarity);
-
- void (*opp_set_test_pattern)(
- struct output_pixel_processor *opp,
- bool enable);
-
- void (*opp_dpg_blank_enable)(
- struct output_pixel_processor *opp,
- bool enable,
- const struct tg_color *color,
- int width,
- int height);
-
- void (*opp_convert_pti)(
+ void (*opp_program_stereo)(
struct output_pixel_processor *opp,
bool enable,
- bool polarity);
+ const struct dc_crtc_timing *timing);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 3050afe8e8a9..b5db1692393c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -1,4 +1,26 @@
/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
* stream_encoder.h
*
*/
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 860259913d78..ec312f1a3e55 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -26,6 +26,8 @@
#ifndef __DAL_TIMING_GENERATOR_TYPES_H__
#define __DAL_TIMING_GENERATOR_TYPES_H__
+#include "hw_shared.h"
+
struct dc_bios;
/* Contains CRTC vertical/horizontal pixel counters */
@@ -40,6 +42,19 @@ struct dcp_gsl_params {
int gsl_master;
};
+struct gsl_params {
+ int gsl0_en;
+ int gsl1_en;
+ int gsl2_en;
+ int gsl_master_en;
+ int gsl_master_mode;
+ int master_update_lock_gsl_en;
+ int gsl_window_start_x;
+ int gsl_window_end_x;
+ int gsl_window_start_y;
+ int gsl_window_end_y;
+};
+
/* define the structure of Dynamic Refresh Mode */
struct drr_params {
uint32_t vertical_total_min;
@@ -50,43 +65,6 @@ struct drr_params {
#define LEFT_EYE_3D_PRIMARY_SURFACE 1
#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
-enum test_pattern_dyn_range {
- TEST_PATTERN_DYN_RANGE_VESA = 0,
- TEST_PATTERN_DYN_RANGE_CEA
-};
-
-enum test_pattern_mode {
- TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
- TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
- TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
- TEST_PATTERN_MODE_VERTICALBARS,
- TEST_PATTERN_MODE_HORIZONTALBARS,
- TEST_PATTERN_MODE_SINGLERAMP_RGB,
- TEST_PATTERN_MODE_DUALRAMP_RGB
-};
-
-enum test_pattern_color_format {
- TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
- TEST_PATTERN_COLOR_FORMAT_BPC_8,
- TEST_PATTERN_COLOR_FORMAT_BPC_10,
- TEST_PATTERN_COLOR_FORMAT_BPC_12
-};
-
-enum controller_dp_test_pattern {
- CONTROLLER_DP_TEST_PATTERN_D102 = 0,
- CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
- CONTROLLER_DP_TEST_PATTERN_PRBS7,
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
- CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
- CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
- CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
-};
-
enum crtc_state {
CRTC_STATE_VBLANK = 0,
CRTC_STATE_VACTIVE
@@ -100,6 +78,12 @@ struct _dlg_otg_param {
enum signal_type signal;
};
+struct vupdate_keepout_params {
+ int start_offset;
+ int end_offset;
+ int enable;
+};
+
struct crtc_stereo_flags {
uint8_t PROGRAM_STEREO : 1;
uint8_t PROGRAM_POLARITY : 1;
@@ -187,6 +171,8 @@ struct timing_generator_funcs {
void (*tg_init)(struct timing_generator *tg);
bool (*is_tg_enabled)(struct timing_generator *tg);
+ bool (*is_optc_underflow_occurred)(struct timing_generator *tg);
+ void (*clear_optc_underflow)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 5dc4ecf618ff..4c0aa56f7bae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -28,6 +28,7 @@
#include "dc_types.h"
#include "clock_source.h"
#include "inc/hw/timing_generator.h"
+#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "core_status.h"
@@ -40,6 +41,7 @@ enum pipe_gating_control {
struct dce_hwseq_wa {
bool blnd_crtc_trigger;
bool DEGVIDCN10_253;
+ bool false_optc_underflow;
};
struct hwseq_wa_state {
@@ -137,10 +139,6 @@ struct hw_sequencer_funcs {
void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
- void (*enable_plane)(struct dc *dc,
- struct pipe_ctx *pipe,
- struct dc_state *context);
-
void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
void (*enable_stream)(struct pipe_ctx *pipe_ctx);
@@ -198,6 +196,7 @@ struct hw_sequencer_funcs {
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
+ void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
};
@@ -209,4 +208,8 @@ void color_space_to_black_color(
bool hwss_wait_for_blank_complete(
struct timing_generator *tg);
+const uint16_t *find_color_matrix(
+ enum dc_color_space color_space,
+ uint32_t *array_size);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index c7e93f7223bd..498515aad4a5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'audio' sub-component of DAL.
# It provides the control and status of HW adapter resources,
# that are global for the ASIC and sharable between pipes.
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index a87c0329541f..1fcbc99e63b5 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -26,8 +26,6 @@
#ifndef _OS_TYPES_H_
#define _OS_TYPES_H_
-#if defined __KERNEL__
-
#include <asm/byteorder.h>
#include <linux/types.h>
#include <drm/drmP.h>
@@ -46,14 +44,12 @@
#undef WRITE
#undef FRAME_SIZE
-#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
+#define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
-
-#define dm_vlog(fmt, args) vprintk(fmt, args)
-
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include <asm/fpu/api.h>
#endif
/*
@@ -89,8 +85,4 @@
BREAK_TO_DEBUGGER(); \
} while (0)
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include <asm/fpu/api.h>
-#endif
-
#endif /* _OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
index fc0b7318d9cc..07326d244d50 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the virtual sub-component of DAL.
# It provides the control and status of HW CRTC block.
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 3248f699daf2..4badaedbaadd 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -463,4 +463,11 @@ uint32_t dal_fixed31_32_u2d19(
uint32_t dal_fixed31_32_u0d19(
struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_clamp_u0d14(
+ struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_clamp_u0d10(
+ struct fixed31_32 arg);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 03a7a9ca95ea..c4197432eb7c 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -233,10 +233,6 @@ static inline struct graphics_object_id dal_graphics_object_id_init(
return result;
}
-bool dal_graphics_object_id_is_equal(
- struct graphics_object_id id1,
- struct graphics_object_id id2);
-
/* Based on internal data members memory layout */
static inline uint32_t dal_graphics_object_id_to_uint(
struct graphics_object_id id)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/Makefile b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
index db8e0ff6d7a9..fb9a499780e8 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/Makefile
+++ b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the 'freesync' sub-module of DAL.
#
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
index 663d3af35baf..5bf84c6d0ec3 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h
@@ -436,7 +436,6 @@
#define mmTA_CNTL_DEFAULT 0x8004d850
#define mmTA_CNTL_AUX_DEFAULT 0x00000000
#define mmTA_RESERVED_010C_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_DEFAULT 0x40000040
#define mmTA_STATUS_DEFAULT 0x00000000
#define mmTA_SCRATCH_DEFAULT 0x00000000
@@ -1700,7 +1699,6 @@
#define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000
#define mmDB_STENCIL_WRITE_BASE_HI_DEFAULT 0x00000000
#define mmDB_DFSM_CONTROL_DEFAULT 0x00000000
-#define mmDB_RENDER_FILTER_DEFAULT 0x00000000
#define mmDB_Z_INFO2_DEFAULT 0x00000000
#define mmDB_STENCIL_INFO2_DEFAULT 0x00000000
#define mmTA_BC_BASE_ADDR_DEFAULT 0x00000000
@@ -1806,8 +1804,6 @@
#define mmPA_SC_RIGHT_VERT_GRID_DEFAULT 0x00000000
#define mmPA_SC_LEFT_VERT_GRID_DEFAULT 0x00000000
#define mmPA_SC_HORIZ_GRID_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_LR_DEFAULT 0x00000000
-#define mmPA_SC_FOV_WINDOW_TB_DEFAULT 0x00000000
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_DEFAULT 0x00000000
#define mmCB_BLEND_RED_DEFAULT 0x00000000
#define mmCB_BLEND_GREEN_DEFAULT 0x00000000
@@ -2072,7 +2068,6 @@
#define mmVGT_EVENT_INITIATOR_DEFAULT 0x00000000
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_DEFAULT 0x00000000
#define mmVGT_DRAW_PAYLOAD_CNTL_DEFAULT 0x00000000
-#define mmVGT_INDEX_PAYLOAD_CNTL_DEFAULT 0x00000000
#define mmVGT_INSTANCE_STEP_RATE_0_DEFAULT 0x00000000
#define mmVGT_INSTANCE_STEP_RATE_1_DEFAULT 0x00000000
#define mmVGT_ESGS_RING_ITEMSIZE_DEFAULT 0x00000000
@@ -2490,7 +2485,6 @@
#define mmWD_INDEX_BUF_BASE_DEFAULT 0x00000000
#define mmWD_INDEX_BUF_BASE_HI_DEFAULT 0x00000000
#define mmIA_MULTI_VGT_PARAM_DEFAULT 0x006000ff
-#define mmVGT_OBJECT_ID_DEFAULT 0x00000000
#define mmVGT_INSTANCE_BASE_ID_DEFAULT 0x00000000
#define mmPA_SU_LINE_STIPPLE_VALUE_DEFAULT 0x00000000
#define mmPA_SC_LINE_STIPPLE_STATE_DEFAULT 0x00000000
@@ -2534,7 +2528,6 @@
#define mmSQC_WRITEBACK_DEFAULT 0x00000000
#define mmTA_CS_BC_BASE_ADDR_DEFAULT 0x00000000
#define mmTA_CS_BC_BASE_ADDR_HI_DEFAULT 0x00000000
-#define mmTA_GRAD_ADJ_UCONFIG_DEFAULT 0x40000040
#define mmDB_OCCLUSION_COUNT0_LOW_DEFAULT 0x00000000
#define mmDB_OCCLUSION_COUNT0_HI_DEFAULT 0x00000000
#define mmDB_OCCLUSION_COUNT1_LOW_DEFAULT 0x00000000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
index e6d6171aa8b9..4ce090db7ef7 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
@@ -841,8 +841,6 @@
#define mmTA_CNTL_AUX_BASE_IDX 0
#define mmTA_RESERVED_010C 0x0543
#define mmTA_RESERVED_010C_BASE_IDX 0
-#define mmTA_GRAD_ADJ 0x0544
-#define mmTA_GRAD_ADJ_BASE_IDX 0
#define mmTA_STATUS 0x0548
#define mmTA_STATUS_BASE_IDX 0
#define mmTA_SCRATCH 0x0564
@@ -3330,8 +3328,6 @@
#define mmDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
#define mmDB_DFSM_CONTROL 0x0018
#define mmDB_DFSM_CONTROL_BASE_IDX 1
-#define mmDB_RENDER_FILTER 0x0019
-#define mmDB_RENDER_FILTER_BASE_IDX 1
#define mmDB_Z_INFO2 0x001a
#define mmDB_Z_INFO2_BASE_IDX 1
#define mmDB_STENCIL_INFO2 0x001b
@@ -3542,10 +3538,6 @@
#define mmPA_SC_LEFT_VERT_GRID_BASE_IDX 1
#define mmPA_SC_HORIZ_GRID 0x00ea
#define mmPA_SC_HORIZ_GRID_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_LR 0x00eb
-#define mmPA_SC_FOV_WINDOW_LR_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_TB 0x00ec
-#define mmPA_SC_FOV_WINDOW_TB_BASE_IDX 1
#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
#define mmCB_BLEND_RED 0x0105
@@ -4074,8 +4066,6 @@
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
#define mmVGT_DRAW_PAYLOAD_CNTL 0x02a6
#define mmVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
-#define mmVGT_INDEX_PAYLOAD_CNTL 0x02a7
-#define mmVGT_INDEX_PAYLOAD_CNTL_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_0 0x02a8
#define mmVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_1 0x02a9
@@ -4908,8 +4898,6 @@
#define mmWD_INDEX_BUF_BASE_HI_BASE_IDX 1
#define mmIA_MULTI_VGT_PARAM 0x2258
#define mmIA_MULTI_VGT_PARAM_BASE_IDX 1
-#define mmVGT_OBJECT_ID 0x2259
-#define mmVGT_OBJECT_ID_BASE_IDX 1
#define mmVGT_INSTANCE_BASE_ID 0x225a
#define mmVGT_INSTANCE_BASE_ID_BASE_IDX 1
#define mmPA_SU_LINE_STIPPLE_VALUE 0x2280
@@ -4996,8 +4984,6 @@
#define mmTA_CS_BC_BASE_ADDR_BASE_IDX 1
#define mmTA_CS_BC_BASE_ADDR_HI 0x2381
#define mmTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
-#define mmTA_GRAD_ADJ_UCONFIG 0x2382
-#define mmTA_GRAD_ADJ_UCONFIG_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_LOW 0x23c0
#define mmDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_HI 0x23c1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
index 5c5e9b445432..2e1214be67a2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
@@ -4576,15 +4576,6 @@
//TA_RESERVED_010C
#define TA_RESERVED_010C__Unused__SHIFT 0x0
#define TA_RESERVED_010C__Unused_MASK 0xFFFFFFFFL
-//TA_GRAD_ADJ
-#define TA_GRAD_ADJ__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ__GRAD_ADJ_3_MASK 0xFF000000L
//TA_STATUS
#define TA_STATUS__FG_PFIFO_EMPTYB__SHIFT 0xc
#define TA_STATUS__FG_LFIFO_EMPTYB__SHIFT 0xd
@@ -14459,9 +14450,6 @@
#define DB_DFSM_CONTROL__PUNCHOUT_MODE_MASK 0x00000003L
#define DB_DFSM_CONTROL__POPS_DRAIN_PS_ON_OVERLAP_MASK 0x00000004L
#define DB_DFSM_CONTROL__DISALLOW_OVERFLOW_MASK 0x00000008L
-//DB_RENDER_FILTER
-#define DB_RENDER_FILTER__PS_INVOKE_MASK__SHIFT 0x0
-#define DB_RENDER_FILTER__PS_INVOKE_MASK_MASK 0x0000FFFFL
//DB_Z_INFO2
#define DB_Z_INFO2__EPITCH__SHIFT 0x0
#define DB_Z_INFO2__EPITCH_MASK 0x0000FFFFL
@@ -14959,11 +14947,9 @@
#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE__SHIFT 0x0
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE__SHIFT 0x1
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE__SHIFT 0x5
-#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING__SHIFT 0x8
#define PA_SC_TILE_STEERING_OVERRIDE__ENABLE_MASK 0x00000001L
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_SE_MASK 0x00000006L
#define PA_SC_TILE_STEERING_OVERRIDE__NUM_RB_PER_SE_MASK 0x00000060L
-#define PA_SC_TILE_STEERING_OVERRIDE__DISABLE_SRBSL_DB_OPTIMIZED_PACKING_MASK 0x00000100L
//CP_PERFMON_CNTX_CNTL
#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE__SHIFT 0x1f
#define CP_PERFMON_CNTX_CNTL__PERFMON_ENABLE_MASK 0x80000000L
@@ -15003,20 +14989,6 @@
#define PA_SC_HORIZ_GRID__TOP_HALF_MASK 0x0000FF00L
#define PA_SC_HORIZ_GRID__BOT_HALF_MASK 0x00FF0000L
#define PA_SC_HORIZ_GRID__BOT_QTR_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_LR
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT__SHIFT 0x10
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT__SHIFT 0x18
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_LEFT_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_LR__LEFT_EYE_FOV_RIGHT_MASK 0x0000FF00L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_LEFT_MASK 0x00FF0000L
-#define PA_SC_FOV_WINDOW_LR__RIGHT_EYE_FOV_RIGHT_MASK 0xFF000000L
-//PA_SC_FOV_WINDOW_TB
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP__SHIFT 0x0
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT__SHIFT 0x8
-#define PA_SC_FOV_WINDOW_TB__FOV_TOP_MASK 0x000000FFL
-#define PA_SC_FOV_WINDOW_TB__FOV_BOT_MASK 0x0000FF00L
//VGT_MULTI_PRIM_IB_RESET_INDX
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX__SHIFT 0x0
#define VGT_MULTI_PRIM_IB_RESET_INDX__RESET_INDX_MASK 0xFFFFFFFFL
@@ -17010,13 +16982,11 @@
#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE__SHIFT 0x2
#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE__SHIFT 0x3
#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE__SHIFT 0x4
-#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE__SHIFT 0x5
#define PA_SU_SMALL_PRIM_FILTER_CNTL__SMALL_PRIM_FILTER_ENABLE_MASK 0x00000001L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__TRIANGLE_FILTER_DISABLE_MASK 0x00000002L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__LINE_FILTER_DISABLE_MASK 0x00000004L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__POINT_FILTER_DISABLE_MASK 0x00000008L
#define PA_SU_SMALL_PRIM_FILTER_CNTL__RECTANGLE_FILTER_DISABLE_MASK 0x00000010L
-#define PA_SU_SMALL_PRIM_FILTER_CNTL__SRBSL_ENABLE_MASK 0x00000020L
//PA_CL_OBJPRIM_ID_CNTL
#define PA_CL_OBJPRIM_ID_CNTL__OBJ_ID_SEL__SHIFT 0x0
#define PA_CL_OBJPRIM_ID_CNTL__ADD_PIPED_PRIM_ID__SHIFT 0x1
@@ -17345,9 +17315,6 @@
#define VGT_DRAW_PAYLOAD_CNTL__EN_REG_RT_INDEX_MASK 0x00000002L
#define VGT_DRAW_PAYLOAD_CNTL__EN_PIPELINE_PRIMID_MASK 0x00000004L
#define VGT_DRAW_PAYLOAD_CNTL__OBJECT_ID_INST_EN_MASK 0x00000008L
-//VGT_INDEX_PAYLOAD_CNTL
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN__SHIFT 0x0
-#define VGT_INDEX_PAYLOAD_CNTL__COMPOUND_INDEX_EN_MASK 0x00000001L
//VGT_INSTANCE_STEP_RATE_0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE__SHIFT 0x0
#define VGT_INSTANCE_STEP_RATE_0__STEP_RATE_MASK 0xFFFFFFFFL
@@ -19849,9 +19816,6 @@
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_BASIC_MASK 0x00200000L
#define IA_MULTI_VGT_PARAM__EN_INST_OPT_ADV_MASK 0x00400000L
#define IA_MULTI_VGT_PARAM__HW_USE_ONLY_MASK 0x00800000L
-//VGT_OBJECT_ID
-#define VGT_OBJECT_ID__REG_OBJ_ID__SHIFT 0x0
-#define VGT_OBJECT_ID__REG_OBJ_ID_MASK 0xFFFFFFFFL
//VGT_INSTANCE_BASE_ID
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID__SHIFT 0x0
#define VGT_INSTANCE_BASE_ID__INSTANCE_BASE_ID_MASK 0xFFFFFFFFL
@@ -20067,15 +20031,6 @@
//TA_CS_BC_BASE_ADDR_HI
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS__SHIFT 0x0
#define TA_CS_BC_BASE_ADDR_HI__ADDRESS_MASK 0x000000FFL
-//TA_GRAD_ADJ_UCONFIG
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0__SHIFT 0x0
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1__SHIFT 0x8
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2__SHIFT 0x10
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3__SHIFT 0x18
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_0_MASK 0x000000FFL
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_1_MASK 0x0000FF00L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_2_MASK 0x00FF0000L
-#define TA_GRAD_ADJ_UCONFIG__GRAD_ADJ_3_MASK 0xFF000000L
//DB_OCCLUSION_COUNT0_LOW
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW__SHIFT 0x0
#define DB_OCCLUSION_COUNT0_LOW__COUNT_LOW_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
index db7ef5ede0e5..030e0020902b 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h
@@ -815,8 +815,6 @@
#define mmTA_CNTL_AUX_BASE_IDX 0
#define mmTA_RESERVED_010C 0x0543
#define mmTA_RESERVED_010C_BASE_IDX 0
-#define mmTA_GRAD_ADJ 0x0544
-#define mmTA_GRAD_ADJ_BASE_IDX 0
#define mmTA_STATUS 0x0548
#define mmTA_STATUS_BASE_IDX 0
#define mmTA_SCRATCH 0x0564
@@ -3617,8 +3615,6 @@
#define mmDB_STENCIL_WRITE_BASE_HI_BASE_IDX 1
#define mmDB_DFSM_CONTROL 0x0018
#define mmDB_DFSM_CONTROL_BASE_IDX 1
-#define mmDB_RENDER_FILTER 0x0019
-#define mmDB_RENDER_FILTER_BASE_IDX 1
#define mmDB_Z_INFO2 0x001a
#define mmDB_Z_INFO2_BASE_IDX 1
#define mmDB_STENCIL_INFO2 0x001b
@@ -3829,10 +3825,6 @@
#define mmPA_SC_LEFT_VERT_GRID_BASE_IDX 1
#define mmPA_SC_HORIZ_GRID 0x00ea
#define mmPA_SC_HORIZ_GRID_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_LR 0x00eb
-#define mmPA_SC_FOV_WINDOW_LR_BASE_IDX 1
-#define mmPA_SC_FOV_WINDOW_TB 0x00ec
-#define mmPA_SC_FOV_WINDOW_TB_BASE_IDX 1
#define mmVGT_MULTI_PRIM_IB_RESET_INDX 0x0103
#define mmVGT_MULTI_PRIM_IB_RESET_INDX_BASE_IDX 1
#define mmCB_BLEND_RED 0x0105
@@ -4361,8 +4353,6 @@
#define mmVGT_GS_MAX_PRIMS_PER_SUBGROUP_BASE_IDX 1
#define mmVGT_DRAW_PAYLOAD_CNTL 0x02a6
#define mmVGT_DRAW_PAYLOAD_CNTL_BASE_IDX 1
-#define mmVGT_INDEX_PAYLOAD_CNTL 0x02a7
-#define mmVGT_INDEX_PAYLOAD_CNTL_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_0 0x02a8
#define mmVGT_INSTANCE_STEP_RATE_0_BASE_IDX 1
#define mmVGT_INSTANCE_STEP_RATE_1 0x02a9
@@ -5195,8 +5185,6 @@
#define mmWD_INDEX_BUF_BASE_HI_BASE_IDX 1
#define mmIA_MULTI_VGT_PARAM 0x2258
#define mmIA_MULTI_VGT_PARAM_BASE_IDX 1
-#define mmVGT_OBJECT_ID 0x2259
-#define mmVGT_OBJECT_ID_BASE_IDX 1
#define mmVGT_INSTANCE_BASE_ID 0x225a
#define mmVGT_INSTANCE_BASE_ID_BASE_IDX 1
#define mmPA_SU_LINE_STIPPLE_VALUE 0x2280
@@ -5283,8 +5271,6 @@
#define mmTA_CS_BC_BASE_ADDR_BASE_IDX 1
#define mmTA_CS_BC_BASE_ADDR_HI 0x2381
#define mmTA_CS_BC_BASE_ADDR_HI_BASE_IDX 1
-#define mmTA_GRAD_ADJ_UCONFIG 0x2382
-#define mmTA_GRAD_ADJ_UCONFIG_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_LOW 0x23c0
#define mmDB_OCCLUSION_COUNT0_LOW_BASE_IDX 1
#define mmDB_OCCLUSION_COUNT0_HI 0x23c1
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index f516fd10e6ba..a6752bd0c871 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -46,6 +46,28 @@ enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
};
+struct kfd_cu_info {
+ uint32_t num_shader_engines;
+ uint32_t num_shader_arrays_per_engine;
+ uint32_t num_cu_per_sh;
+ uint32_t cu_active_number;
+ uint32_t cu_ao_mask;
+ uint32_t simd_per_cu;
+ uint32_t max_waves_per_simd;
+ uint32_t wave_front_size;
+ uint32_t max_scratch_slots_per_cu;
+ uint32_t lds_size;
+ uint32_t cu_bitmap[4][4];
+};
+
+/* For getting GPU local memory information from KGD */
+struct kfd_local_mem_info {
+ uint64_t local_mem_size_private;
+ uint64_t local_mem_size_public;
+ uint32_t vram_width;
+ uint32_t mem_clk_max;
+};
+
enum kgd_memory_pool {
KGD_POOL_SYSTEM_CACHEABLE = 1,
KGD_POOL_SYSTEM_WRITECOMBINE = 2,
@@ -106,7 +128,7 @@ struct tile_config {
*
* @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
*
- * @get_vmem_size: Retrieves (physical) size of VRAM
+ * @get_local_mem_info: Retrieves information about GPU local memory
*
* @get_gpu_clock_counter: Retrieves GPU clock counter
*
@@ -131,6 +153,12 @@ struct tile_config {
* @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
* used only for no HWS mode.
*
+ * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
+ * Array is allocated with kmalloc, needs to be freed with kfree by caller.
+ *
+ * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
+ * Array is allocated with kmalloc, needs to be freed with kfree by caller.
+ *
* @hqd_is_occupies: Checks if a hqd slot is occupied.
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
@@ -147,6 +175,10 @@ struct tile_config {
*
* @get_tile_config: Returns GPU-specific tiling mode information
*
+ * @get_cu_info: Retrieves activated cu info
+ *
+ * @get_vram_usage: Returns current VRAM usage
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -158,7 +190,8 @@ struct kfd2kgd_calls {
void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
- uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
+ void (*get_local_mem_info)(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
@@ -184,7 +217,16 @@ struct kfd2kgd_calls {
uint32_t wptr_shift, uint32_t wptr_mask,
struct mm_struct *mm);
- int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
+ int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+
+ int (*hqd_dump)(struct kgd_dev *kgd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+
+ int (*hqd_sdma_dump)(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
@@ -224,6 +266,10 @@ struct kfd2kgd_calls {
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
+
+ void (*get_cu_info)(struct kgd_dev *kgd,
+ struct kfd_cu_info *cu_info);
+ uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
};
/**
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index 20234820194b..717fbae1d362 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -153,6 +153,8 @@ struct vi_sdma_mqd {
uint32_t reserved_125;
uint32_t reserved_126;
uint32_t reserved_127;
+ uint32_t sdma_engine_id;
+ uint32_t sdma_queue_id;
};
struct vi_mqd {
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
index 87cd7009e80f..690243001e1a 100644
--- a/drivers/gpu/drm/amd/lib/Makefile
+++ b/drivers/gpu/drm/amd/lib/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for AMD library routines, which are used by AMD driver
# components.
#
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 8c55c6e254d9..231785a9e24c 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/powerplay/inc/ \
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 9d3bdada79d5..fa9d1615a2cc 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -389,20 +389,12 @@ static int pp_dpm_force_performance_level(void *handle,
if (level == hwmgr->dpm_level)
return 0;
- if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
- pr_info("%s was not implemented.\n", __func__);
- return 0;
- }
-
mutex_lock(&pp_handle->pp_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
- ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
- if (!ret)
- hwmgr->dpm_level = hwmgr->request_dpm_level;
-
mutex_unlock(&pp_handle->pp_lock);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 824fb6fe54ae..a212c27f2e17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for the 'hw manager' sub-component of powerplay.
# It provides the hardware management services for the driver.
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index ad1f6b57884b..b314d09d41af 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -728,9 +728,6 @@ static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
if (clock < stable_pstate_sclk)
clock = stable_pstate_sclk;
- } else {
- if (clock < hwmgr->gfx_arbiter.sclk)
- clock = hwmgr->gfx_arbiter.sclk;
}
if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
@@ -1085,14 +1082,8 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
uint32_t num_of_active_displays = 0;
struct cgs_display_info info = {0};
- cz_ps->evclk = hwmgr->vce_arbiter.evclk;
- cz_ps->ecclk = hwmgr->vce_arbiter.ecclk;
-
cz_ps->need_dfs_bypass = true;
- cz_hwmgr->video_start = (hwmgr->uvd_arbiter.vclk != 0 || hwmgr->uvd_arbiter.dclk != 0 ||
- hwmgr->vce_arbiter.evclk != 0 || hwmgr->vce_arbiter.ecclk != 0);
-
cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
@@ -1105,9 +1096,6 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
- if (clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
|| (num_of_active_displays >= 3);
@@ -1339,22 +1327,13 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
- /*Program HardMin based on the vce_arbiter.ecclk */
- if (hwmgr->vce_arbiter.ecclk == 0) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin, 0);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetEclkHardMin, 0);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
- smum_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkSoftMin, 1);
- } else {
- cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
- }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 623cff90233d..2b0c53fe4c8d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -112,26 +112,29 @@ int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->force_dpm_level != NULL) {
+ if (hwmgr->hwmgr_func->force_dpm_level != NULL)
ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
- if (ret)
- return ret;
-
- if (hwmgr->hwmgr_func->set_power_profile_state) {
- if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE)
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->gfx_power_profile);
- else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE)
- ret = hwmgr->hwmgr_func->set_power_profile_state(
- hwmgr,
- &hwmgr->compute_power_profile);
- }
- }
return ret;
}
+int phm_reset_power_profile_state(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+
+ if (hwmgr->hwmgr_func->set_power_profile_state) {
+ if (hwmgr->current_power_profile == AMD_PP_GFX_PROFILE)
+ ret = hwmgr->hwmgr_func->set_power_profile_state(
+ hwmgr,
+ &hwmgr->gfx_power_profile);
+ else if (hwmgr->current_power_profile == AMD_PP_COMPUTE_PROFILE)
+ ret = hwmgr->hwmgr_func->set_power_profile_state(
+ hwmgr,
+ &hwmgr->compute_power_profile);
+ }
+ return ret;
+}
+
int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *adjusted_ps,
const struct pp_power_state *current_ps)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index ce59e0e67cb2..0229f774f7a9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -149,6 +149,7 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->power_source = PP_PowerSource_AC;
hwmgr->pp_table_version = PP_TABLE_V1;
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ hwmgr->request_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
hwmgr->fan_ctrl_is_in_default_mode = true;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
index 67fae834bc67..8de384bf9a8f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
@@ -1,4 +1,26 @@
-// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#include "pp_overdriver.h"
#include <linux/errno.h>
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ffa44bbb218e..95ab772e0c3e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -244,6 +244,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
}
phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
+ if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
+ hwmgr->dpm_level = hwmgr->request_dpm_level;
+
+ phm_reset_power_profile_state(hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 3e0b267c74a8..569073e3a5a1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -159,7 +159,6 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
{
- struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
@@ -170,39 +169,6 @@ static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
"Attempt to set DCF Clock Failed!", return -EINVAL);
- if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
- ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
- rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
- rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinVcn,
- (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
- }
-
- if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
- ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetHardMinSocclkByFreq,
- hwmgr->gfx_arbiter.sclk_hard_min / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq);
- }
-
- if ((hwmgr->gfx_arbiter.gfxclk != 0) &&
- (rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinVideoGfxclkFreq,
- hwmgr->gfx_arbiter.gfxclk / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq);
- }
-
- if ((hwmgr->gfx_arbiter.fclk != 0) &&
- (rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinVideoFclkFreq,
- hwmgr->gfx_arbiter.fclk / 100);
- rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq);
- }
-
return 0;
}
@@ -518,17 +484,161 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
+ if (hwmgr->smu_version < 0x1E3700) {
+ pr_info("smu firmware version too old, can not set dpm level\n");
+ return 0;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ RAVEN_UMD_PSTATE_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ RAVEN_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ RAVEN_UMD_PSTATE_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ RAVEN_UMD_PSTATE_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ RAVEN_UMD_PSTATE_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ RAVEN_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinSocclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinVcn,
+ RAVEN_UMD_PSTATE_MIN_VCE);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_PEAK_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxSocclkByFreq,
+ RAVEN_UMD_PSTATE_PEAK_SOCCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxVcn,
+ RAVEN_UMD_PSTATE_VCE);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ RAVEN_UMD_PSTATE_MIN_GFXCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxFclkByFreq,
+ RAVEN_UMD_PSTATE_MIN_FCLK);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
return 0;
}
static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
- return 0;
+ struct rv_hwmgr *data;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ data = (struct rv_hwmgr *)(hwmgr->backend);
+
+ if (low)
+ return data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+ else
+ return data->clock_vol_info.vdd_dep_on_fclk->entries[
+ data->clock_vol_info.vdd_dep_on_fclk->count - 1].clk;
}
static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
- return 0;
+ struct rv_hwmgr *data;
+
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ data = (struct rv_hwmgr *)(hwmgr->backend);
+
+ if (low)
+ return data->gfx_min_freq_limit;
+ else
+ return data->gfx_max_freq_limit;
}
static int rv_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index 9dc503055394..c3bc311dc59f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -304,4 +304,19 @@ struct pp_hwmgr;
int rv_init_function_pointers(struct pp_hwmgr *hwmgr);
+/* UMD PState Raven Msg Parameters in MHz */
+#define RAVEN_UMD_PSTATE_GFXCLK 700
+#define RAVEN_UMD_PSTATE_SOCCLK 626
+#define RAVEN_UMD_PSTATE_FCLK 933
+#define RAVEN_UMD_PSTATE_VCE 0x03C00320
+
+#define RAVEN_UMD_PSTATE_PEAK_GFXCLK 1100
+#define RAVEN_UMD_PSTATE_PEAK_SOCCLK 757
+#define RAVEN_UMD_PSTATE_PEAK_FCLK 1200
+
+#define RAVEN_UMD_PSTATE_MIN_GFXCLK 200
+#define RAVEN_UMD_PSTATE_MIN_FCLK 400
+#define RAVEN_UMD_PSTATE_MIN_SOCCLK 200
+#define RAVEN_UMD_PSTATE_MIN_VCE 0x0190012C
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 8edb0c4c3876..40adc855c416 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -2722,9 +2722,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- smu7_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- smu7_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
cgs_get_active_displays_info(hwmgr->device, &info);
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
@@ -2754,38 +2751,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.memoryClock = stable_pstate_mclk;
}
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- smu7_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- smu7_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- smu7_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 07d256d136ad..2d55dabc77d4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -426,9 +426,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_VR0HOT].supported = true;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- vega10_read_arg_from_smc(hwmgr, &(data->smu_version));
+ vega10_read_arg_from_smc(hwmgr, &(hwmgr->smu_version));
/* ACG firmware has major version 5 */
- if ((data->smu_version & 0xff000000) == 0x5000000)
+ if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
if (data->registry_data.didt_support)
@@ -2879,8 +2879,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"DPM is already running right , skipping re-enablement!",
return 0);
- if ((data->smu_version == 0x001c2c00) ||
- (data->smu_version == 0x001c2d00)) {
+ if ((hwmgr->smu_version == 0x001c2c00) ||
+ (hwmgr->smu_version == 0x001c2d00)) {
tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UpdatePkgPwrPidAlpha, 1);
PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3124,9 +3124,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
}
- vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
cgs_get_active_displays_info(hwmgr->device, &info);
/* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
@@ -3165,38 +3162,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.memoryClock = stable_pstate_mclk;
}
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- vega10_ps->performance_levels[1].gfx_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- vega10_ps->performance_levels[1].mem_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
@@ -3819,10 +3784,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
uint32_t low_sclk_interrupt_threshold = 0;
if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
- (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index 8f7358cc3327..e8507ff8dbb3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -387,7 +387,6 @@ struct vega10_hwmgr {
struct vega10_smc_state_table smc_state_table;
uint32_t config_telemetry;
- uint32_t smu_version;
uint32_t acg_loop_state;
uint32_t mem_channels;
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 57a0467b7267..5716b937a6ad 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -437,5 +437,6 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+extern int phm_reset_power_profile_state(struct pp_hwmgr *hwmgr);
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 004a40e88bde..565fe0832f41 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -105,36 +105,6 @@ struct phm_set_power_state_input {
const struct pp_hw_power_state *pnew_state;
};
-struct phm_acp_arbiter {
- uint32_t acpclk;
-};
-
-struct phm_uvd_arbiter {
- uint32_t vclk;
- uint32_t dclk;
- uint32_t vclk_ceiling;
- uint32_t dclk_ceiling;
- uint32_t vclk_soft_min;
- uint32_t dclk_soft_min;
-};
-
-struct phm_vce_arbiter {
- uint32_t evclk;
- uint32_t ecclk;
-};
-
-struct phm_gfx_arbiter {
- uint32_t sclk;
- uint32_t sclk_hard_min;
- uint32_t mclk;
- uint32_t sclk_over_drive;
- uint32_t mclk_over_drive;
- uint32_t sclk_threshold;
- uint32_t num_cus;
- uint32_t gfxclk;
- uint32_t fclk;
-};
-
struct phm_clock_array {
uint32_t count;
uint32_t values[1];
@@ -722,6 +692,7 @@ enum PP_TABLE_VERSION {
struct pp_hwmgr {
uint32_t chip_family;
uint32_t chip_id;
+ uint32_t smu_version;
uint32_t pp_table_version;
void *device;
@@ -737,10 +708,6 @@ struct pp_hwmgr {
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
enum amd_dpm_forced_level request_dpm_level;
- struct phm_gfx_arbiter gfx_arbiter;
- struct phm_acp_arbiter acp_arbiter;
- struct phm_uvd_arbiter uvd_arbiter;
- struct phm_vce_arbiter vce_arbiter;
uint32_t usec_timeout;
void *pptable;
struct phm_platform_descriptor platform_descriptor;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index 2b3497135bbd..f15f4df9d0a9 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -75,7 +75,12 @@
#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C
#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D
#define PPSMC_MSG_SoftReset 0x2E
-#define PPSMC_Message_Count 0x2F
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x30
+#define PPSMC_MSG_SetHardMinGfxClk 0x31
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33
+#define PPSMC_MSG_SetSoftMaxVcn 0x34
+#define PPSMC_Message_Count 0x35
typedef uint16_t PPSMC_Result;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
index 08cd70c75d8b..9ad1cefff79f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#ifndef SMU72_H
#define SMU72_H
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
index b2edbc0c3c4d..2aefbb85f620 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
@@ -1,4 +1,26 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
#ifndef SMU72_DISCRETE_H
#define SMU72_DISCRETE_H
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 30d3089d7dba..98e701e4f553 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -1,4 +1,24 @@
-# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
#
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index c36f00ef46f3..0b4a55660de4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -2218,10 +2218,7 @@ static int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
@@ -2319,6 +2316,7 @@ static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
hwmgr->is_kicker = info.is_kicker;
+ hwmgr->smu_version = info.version;
byte_count = info.image_size;
src = (uint8_t *)info.kptr;
start_addr = info.ucode_start_address;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 78ab0556e48f..4d3aff381bca 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -709,6 +709,19 @@ static int cz_start_smu(struct pp_hwmgr *hwmgr)
{
int ret = 0;
uint32_t fw_to_check = 0;
+ struct cgs_firmware_info info = {0};
+ uint32_t index = SMN_MP1_SRAM_START_ADDR +
+ SMU8_FIRMWARE_HEADER_LOCATION +
+ offsetof(struct SMU8_Firmware_Header, Version);
+
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
+ hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+ info.version = hwmgr->smu_version >> 8;
+ cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
fw_to_check = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index f572beff197f..085d81c8b332 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -2385,10 +2385,7 @@ static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index d62078681cae..125312691f75 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -204,7 +204,7 @@ static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr)
pr_err("[ powerplay ] SMC address is beyond the SMC RAM area\n");
return -EINVAL;
}
-
+ hwmgr->smu_version = info.version;
/* wait for smc boot up */
PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
@@ -2202,10 +2202,7 @@ static int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index bd6be7793ca7..cdb47657b567 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2369,10 +2369,7 @@ static int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
index b98ade676d12..2d662b44af54 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
@@ -305,6 +305,14 @@ static int rv_smu_fini(struct pp_hwmgr *hwmgr)
static int rv_start_smu(struct pp_hwmgr *hwmgr)
{
+ struct cgs_firmware_info info = {0};
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ rv_read_arg_from_smc(hwmgr, &hwmgr->smu_version);
+ info.version = hwmgr->smu_version >> 8;
+
+ cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
if (rv_verify_smc_interface(hwmgr))
return -EINVAL;
if (rv_smc_enable_sdma(hwmgr))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 7f5359a97ef2..cb95e882b98f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -535,7 +535,7 @@ int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
hwmgr->is_kicker = info.is_kicker;
-
+ hwmgr->smu_version = info.version;
result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index 81b8790c0d22..79e5c05571bc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -2654,10 +2654,7 @@ static int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
+ && (data->low_sclk_interrupt_threshold != 0)) {
low_sclk_interrupt_threshold =
data->low_sclk_interrupt_threshold;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
deleted file mode 100644
index eebe323c7159..000000000000
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _GPU_SCHED_TRACE_H_
-
-#include <linux/stringify.h>
-#include <linux/types.h>
-#include <linux/tracepoint.h>
-
-#include <drm/drmP.h>
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM gpu_sched
-#define TRACE_INCLUDE_FILE gpu_sched_trace
-
-TRACE_EVENT(amd_sched_job,
- TP_PROTO(struct amd_sched_job *sched_job, struct amd_sched_entity *entity),
- TP_ARGS(sched_job, entity),
- TP_STRUCT__entry(
- __field(struct amd_sched_entity *, entity)
- __field(struct dma_fence *, fence)
- __field(const char *, name)
- __field(uint64_t, id)
- __field(u32, job_count)
- __field(int, hw_job_count)
- ),
-
- TP_fast_assign(
- __entry->entity = entity;
- __entry->id = sched_job->id;
- __entry->fence = &sched_job->s_fence->finished;
- __entry->name = sched_job->sched->name;
- __entry->job_count = spsc_queue_count(&entity->job_queue);
- __entry->hw_job_count = atomic_read(
- &sched_job->sched->hw_rq_count);
- ),
- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->id,
- __entry->fence, __entry->name,
- __entry->job_count, __entry->hw_job_count)
-);
-
-TRACE_EVENT(amd_sched_process_job,
- TP_PROTO(struct amd_sched_fence *fence),
- TP_ARGS(fence),
- TP_STRUCT__entry(
- __field(struct dma_fence *, fence)
- ),
-
- TP_fast_assign(
- __entry->fence = &fence->finished;
- ),
- TP_printk("fence=%p signaled", __entry->fence)
-);
-
-#endif
-
-/* This part must be outside protection */
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
deleted file mode 100644
index b590fcc2786a..000000000000
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _GPU_SCHEDULER_H_
-#define _GPU_SCHEDULER_H_
-
-#include <linux/kfifo.h>
-#include <linux/dma-fence.h>
-#include "spsc_queue.h"
-
-struct amd_gpu_scheduler;
-struct amd_sched_rq;
-
-enum amd_sched_priority {
- AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_LOW = AMD_SCHED_PRIORITY_MIN,
- AMD_SCHED_PRIORITY_NORMAL,
- AMD_SCHED_PRIORITY_HIGH_SW,
- AMD_SCHED_PRIORITY_HIGH_HW,
- AMD_SCHED_PRIORITY_KERNEL,
- AMD_SCHED_PRIORITY_MAX,
- AMD_SCHED_PRIORITY_INVALID = -1,
- AMD_SCHED_PRIORITY_UNSET = -2
-};
-
-
-/**
- * A scheduler entity is a wrapper around a job queue or a group
- * of other entities. Entities take turns emitting jobs from their
- * job queues to corresponding hardware ring based on scheduling
- * policy.
-*/
-struct amd_sched_entity {
- struct list_head list;
- struct amd_sched_rq *rq;
- spinlock_t rq_lock;
- struct amd_gpu_scheduler *sched;
-
- spinlock_t queue_lock;
- struct spsc_queue job_queue;
-
- atomic_t fence_seq;
- uint64_t fence_context;
-
- struct dma_fence *dependency;
- struct dma_fence_cb cb;
- atomic_t *guilty; /* points to ctx's guilty */
-};
-
-/**
- * Run queue is a set of entities scheduling command submissions for
- * one specific ring. It implements the scheduling policy that selects
- * the next entity to emit commands from.
-*/
-struct amd_sched_rq {
- spinlock_t lock;
- struct list_head entities;
- struct amd_sched_entity *current_entity;
-};
-
-struct amd_sched_fence {
- struct dma_fence scheduled;
- struct dma_fence finished;
- struct dma_fence_cb cb;
- struct dma_fence *parent;
- struct amd_gpu_scheduler *sched;
- spinlock_t lock;
- void *owner;
-};
-
-struct amd_sched_job {
- struct spsc_node queue_node;
- struct amd_gpu_scheduler *sched;
- struct amd_sched_fence *s_fence;
- struct dma_fence_cb finish_cb;
- struct work_struct finish_work;
- struct list_head node;
- struct delayed_work work_tdr;
- uint64_t id;
- atomic_t karma;
- enum amd_sched_priority s_priority;
-};
-
-extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
-extern const struct dma_fence_ops amd_sched_fence_ops_finished;
-static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
-{
- if (f->ops == &amd_sched_fence_ops_scheduled)
- return container_of(f, struct amd_sched_fence, scheduled);
-
- if (f->ops == &amd_sched_fence_ops_finished)
- return container_of(f, struct amd_sched_fence, finished);
-
- return NULL;
-}
-
-static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
-{
- return (s_job && atomic_inc_return(&s_job->karma) > threshold);
-}
-
-/**
- * Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
-struct amd_sched_backend_ops {
- struct dma_fence *(*dependency)(struct amd_sched_job *sched_job,
- struct amd_sched_entity *s_entity);
- struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
- void (*timedout_job)(struct amd_sched_job *sched_job);
- void (*free_job)(struct amd_sched_job *sched_job);
-};
-
-/**
- * One scheduler is implemented for each hardware ring
-*/
-struct amd_gpu_scheduler {
- const struct amd_sched_backend_ops *ops;
- uint32_t hw_submission_limit;
- long timeout;
- const char *name;
- struct amd_sched_rq sched_rq[AMD_SCHED_PRIORITY_MAX];
- wait_queue_head_t wake_up_worker;
- wait_queue_head_t job_scheduled;
- atomic_t hw_rq_count;
- atomic64_t job_id_count;
- struct task_struct *thread;
- struct list_head ring_mirror_list;
- spinlock_t job_list_lock;
- int hang_limit;
-};
-
-int amd_sched_init(struct amd_gpu_scheduler *sched,
- const struct amd_sched_backend_ops *ops,
- uint32_t hw_submission, unsigned hang_limit, long timeout, const char *name);
-void amd_sched_fini(struct amd_gpu_scheduler *sched);
-
-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- struct amd_sched_rq *rq,
- uint32_t jobs, atomic_t* guilty);
-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity);
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
- struct amd_sched_entity *entity);
-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
- struct amd_sched_rq *rq);
-
-int amd_sched_fence_slab_init(void);
-void amd_sched_fence_slab_fini(void);
-
-struct amd_sched_fence *amd_sched_fence_create(
- struct amd_sched_entity *s_entity, void *owner);
-void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
-void amd_sched_fence_finished(struct amd_sched_fence *fence);
-int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- void *owner);
-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *job);
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
-bool amd_sched_dependency_optimized(struct dma_fence* fence,
- struct amd_sched_entity *entity);
-void amd_sched_job_kickout(struct amd_sched_job *s_job);
-
-#endif
diff --git a/drivers/gpu/drm/amd/scheduler/spsc_queue.h b/drivers/gpu/drm/amd/scheduler/spsc_queue.h
deleted file mode 100644
index 5902f35ce759..000000000000
--- a/drivers/gpu/drm/amd/scheduler/spsc_queue.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef AMD_SCHEDULER_SPSC_QUEUE_H_
-#define AMD_SCHEDULER_SPSC_QUEUE_H_
-
-#include <linux/atomic.h>
-
-/** SPSC lockless queue */
-
-struct spsc_node {
-
- /* Stores spsc_node* */
- struct spsc_node *next;
-};
-
-struct spsc_queue {
-
- struct spsc_node *head;
-
- /* atomic pointer to struct spsc_node* */
- atomic_long_t tail;
-
- atomic_t job_count;
-};
-
-static inline void spsc_queue_init(struct spsc_queue *queue)
-{
- queue->head = NULL;
- atomic_long_set(&queue->tail, (long)&queue->head);
- atomic_set(&queue->job_count, 0);
-}
-
-static inline struct spsc_node *spsc_queue_peek(struct spsc_queue *queue)
-{
- return queue->head;
-}
-
-static inline int spsc_queue_count(struct spsc_queue *queue)
-{
- return atomic_read(&queue->job_count);
-}
-
-static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *node)
-{
- struct spsc_node **tail;
-
- node->next = NULL;
-
- preempt_disable();
-
- tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
- WRITE_ONCE(*tail, node);
- atomic_inc(&queue->job_count);
-
- /*
- * In case of first element verify new node will be visible to the consumer
- * thread when we ping the kernel thread that there is new work to do.
- */
- smp_wmb();
-
- preempt_enable();
-
- return tail == &queue->head;
-}
-
-
-static inline struct spsc_node *spsc_queue_pop(struct spsc_queue *queue)
-{
- struct spsc_node *next, *node;
-
- /* Verify reading from memory and not the cache */
- smp_rmb();
-
- node = READ_ONCE(queue->head);
-
- if (!node)
- return NULL;
-
- next = READ_ONCE(node->next);
- WRITE_ONCE(queue->head, next);
-
- if (unlikely(!next)) {
- /* slowpath for the last element in the queue */
-
- if (atomic_long_cmpxchg(&queue->tail,
- (long)&node->next, (long) &queue->head) != (long)&node->next) {
- /* Updating tail failed wait for new next to appear */
- do {
- smp_rmb();
- } while (unlikely(!(queue->head = READ_ONCE(node->next))));
- }
- }
-
- atomic_dec(&queue->job_count);
- return node;
-}
-
-
-
-#endif /* AMD_SCHEDULER_SPSC_QUEUE_H_ */
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index e080e31a8513..3d82712d8002 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/console.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
@@ -24,6 +23,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -183,13 +183,6 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
return (ret > 0) ? 0 : -ETIMEDOUT;
}
-static void malidp_output_poll_changed(struct drm_device *drm)
-{
- struct malidp_drm *malidp = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(malidp->fbdev);
-}
-
static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_pending_vblank_event *event;
@@ -252,7 +245,7 @@ static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = malidp_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -317,19 +310,12 @@ static int malidp_irq_init(struct platform_device *pdev)
return 0;
}
-static void malidp_lastclose(struct drm_device *drm)
-{
- struct malidp_drm *malidp = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(malidp->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME,
- .lastclose = malidp_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -623,14 +609,9 @@ static int malidp_bind(struct device *dev)
drm_mode_config_reset(drm);
- malidp->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
-
- if (IS_ERR(malidp->fbdev)) {
- ret = PTR_ERR(malidp->fbdev);
- malidp->fbdev = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret)
goto fbdev_fail;
- }
drm_kms_helper_poll_init(drm);
@@ -641,10 +622,7 @@ static int malidp_bind(struct device *dev)
return 0;
register_fail:
- if (malidp->fbdev) {
- drm_fbdev_cma_fini(malidp->fbdev);
- malidp->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
fbdev_fail:
pm_runtime_get_sync(dev);
@@ -681,10 +659,7 @@ static void malidp_unbind(struct device *dev)
struct malidp_drm *malidp = drm->dev_private;
drm_dev_unregister(drm);
- if (malidp->fbdev) {
- drm_fbdev_cma_fini(malidp->fbdev);
- malidp->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
malidp_se_irq_fini(drm);
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index 70ed6aeccf05..e0d12c9fc6b8 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -20,7 +20,6 @@
struct malidp_drm {
struct malidp_hw_device *dev;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
wait_queue_head_t wq;
atomic_t config_valid;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2e065facdce7..e2adfbef7d6b 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -13,6 +13,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
@@ -20,13 +21,6 @@
#include "armada_hw.h"
#include "armada_trace.h"
-struct armada_frame_work {
- struct armada_plane_work work;
- struct drm_pending_vblank_event *event;
- struct armada_regs regs[4];
- struct drm_framebuffer *old_fb;
-};
-
enum csc_mode {
CSC_AUTO = 0,
CSC_YUV_CCIR601 = 1,
@@ -168,16 +162,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y)
{
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
u32 addr = drm_fb_obj(fb)->dev_addr;
- int num_planes = fb->format->num_planes;
int i;
if (num_planes > 3)
num_planes = 3;
- for (i = 0; i < num_planes; i++)
+ addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++)
addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * fb->format->cpp[i];
+ x * format->cpp[i];
for (; i < 3; i++)
addrs[i] = 0;
}
@@ -209,6 +210,38 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
return i;
}
+static void armada_drm_plane_work_call(struct armada_crtc *dcrtc,
+ struct armada_plane_work *work,
+ void (*fn)(struct armada_crtc *, struct armada_plane_work *))
+{
+ struct armada_plane *dplane = drm_to_armada_plane(work->plane);
+ struct drm_pending_vblank_event *event;
+ struct drm_framebuffer *fb;
+
+ if (fn)
+ fn(dcrtc, work);
+ drm_crtc_vblank_put(&dcrtc->crtc);
+
+ event = work->event;
+ fb = work->old_fb;
+ if (event || fb) {
+ struct drm_device *dev = dcrtc->crtc.dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (event)
+ drm_crtc_send_vblank_event(&dcrtc->crtc, event);
+ if (fb)
+ __armada_drm_queue_unref_work(dev, fb);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ if (work->need_kfree)
+ kfree(work);
+
+ wake_up(&dplane->frame_wait);
+}
+
static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
struct drm_plane *plane)
{
@@ -216,24 +249,19 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
struct armada_plane_work *work = xchg(&dplane->work, NULL);
/* Handle any pending frame work. */
- if (work) {
- work->fn(dcrtc, dplane, work);
- drm_crtc_vblank_put(&dcrtc->crtc);
- }
-
- wake_up(&dplane->frame_wait);
+ if (work)
+ armada_drm_plane_work_call(dcrtc, work, work->fn);
}
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work)
+ struct armada_plane_work *work)
{
+ struct armada_plane *plane = drm_to_armada_plane(work->plane);
int ret;
ret = drm_crtc_vblank_get(&dcrtc->crtc);
- if (ret) {
- DRM_ERROR("failed to acquire vblank counter\n");
+ if (ret)
return ret;
- }
ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
if (ret)
@@ -247,51 +275,60 @@ int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
}
-struct armada_plane_work *armada_drm_plane_work_cancel(
- struct armada_crtc *dcrtc, struct armada_plane *plane)
+void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
+ struct armada_plane *dplane)
{
- struct armada_plane_work *work = xchg(&plane->work, NULL);
+ struct armada_plane_work *work = xchg(&dplane->work, NULL);
if (work)
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- return work;
+ armada_drm_plane_work_call(dcrtc, work, work->cancel);
}
-static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
- struct armada_frame_work *work)
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
+ struct armada_plane_work *work)
{
- struct armada_plane *plane = drm_to_armada_plane(dcrtc->crtc.primary);
+ unsigned long flags;
- return armada_drm_plane_work_queue(dcrtc, plane, &work->work);
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work)
+static void armada_drm_crtc_complete_disable_work(struct armada_crtc *dcrtc,
+ struct armada_plane_work *work)
{
- struct armada_frame_work *fwork = container_of(work, struct armada_frame_work, work);
- struct drm_device *dev = dcrtc->crtc.dev;
unsigned long flags;
+ if (dcrtc->plane == work->plane)
+ dcrtc->plane = NULL;
+
spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, fwork->regs);
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
- if (fwork->event) {
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
+static struct armada_plane_work *
+armada_drm_crtc_alloc_plane_work(struct drm_plane *plane)
+{
+ struct armada_plane_work *work;
+ int i = 0;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return NULL;
- /* Finally, queue the process-half of the cleanup. */
- __armada_drm_queue_unref_work(dcrtc->crtc.dev, fwork->old_fb);
- kfree(fwork);
+ work->plane = plane;
+ work->fn = armada_drm_crtc_complete_frame_work;
+ work->need_kfree = true;
+ armada_reg_queue_end(work->regs, i);
+
+ return work;
}
static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
struct drm_framebuffer *fb, bool force)
{
- struct armada_frame_work *work;
+ struct armada_plane_work *work;
if (!fb)
return;
@@ -302,15 +339,11 @@ static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
return;
}
- work = kmalloc(sizeof(*work), GFP_KERNEL);
+ work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
if (work) {
- int i = 0;
- work->work.fn = armada_drm_crtc_complete_frame_work;
- work->event = NULL;
work->old_fb = fb;
- armada_reg_queue_end(work->regs, i);
- if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+ if (armada_drm_plane_work_queue(dcrtc, work) == 0)
return;
kfree(work);
@@ -373,8 +406,11 @@ static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
* the new mode parameters.
*/
plane = dcrtc->plane;
- if (plane)
+ if (plane) {
drm_plane_force_disable(plane);
+ WARN_ON(!armada_drm_plane_work_wait(drm_to_armada_plane(plane),
+ HZ));
+ }
}
/* The mode_config.mutex will be held for this call */
@@ -440,11 +476,11 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
if (stat & VSYNC_IRQ)
drm_crtc_handle_vblank(&dcrtc->crtc);
- spin_lock(&dcrtc->irq_lock);
ovl_plane = dcrtc->plane;
if (ovl_plane)
armada_drm_plane_work_run(dcrtc, ovl_plane);
+ spin_lock(&dcrtc->irq_lock);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
uint32_t val;
@@ -536,18 +572,14 @@ static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
return val;
}
-static void armada_drm_primary_set(struct drm_crtc *crtc,
- struct drm_plane *plane, int x, int y)
+static void armada_drm_gra_plane_regs(struct armada_regs *regs,
+ struct drm_framebuffer *fb, struct armada_plane_state *state,
+ int x, int y, bool interlaced)
{
- struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[8];
- bool interlaced = dcrtc->interlaced;
- unsigned i;
+ unsigned int i;
u32 ctrl0;
- i = armada_drm_crtc_calc_fb(plane->fb, x, y, regs, interlaced);
-
+ i = armada_drm_crtc_calc_fb(fb, x, y, regs, interlaced);
armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
@@ -559,9 +591,21 @@ static void armada_drm_primary_set(struct drm_crtc *crtc,
armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
+ CFG_GRA_HSMOOTH | CFG_GRA_ENA,
LCD_SPU_DMA_CTRL0);
armada_reg_queue_end(regs, i);
+}
+
+static void armada_drm_primary_set(struct drm_crtc *crtc,
+ struct drm_plane *plane, int x, int y)
+{
+ struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_regs regs[8];
+ bool interlaced = dcrtc->interlaced;
+
+ armada_drm_gra_plane_regs(regs, plane->fb, state, x, y, interlaced);
armada_drm_crtc_update_regs(dcrtc, regs);
}
@@ -581,7 +625,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
- val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+ val = CFG_GRA_ENA;
val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
@@ -633,8 +677,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
/* Now compute the divider for real */
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
- /* Ensure graphic fifo is enabled */
- armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
if (interlaced ^ dcrtc->interlaced) {
@@ -647,6 +689,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ /* Ensure graphic fifo is enabled */
+ armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+
/* Even interlaced/progressive frame */
dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
adj->crtc_htotal;
@@ -729,47 +774,13 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
-void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
- struct drm_plane *plane)
-{
- u32 sram_para1, dma_ctrl0_mask;
-
- /*
- * Drop our reference on any framebuffer attached to this plane.
- * We don't need to NULL this out as drm_plane_force_disable(),
- * and __setplane_internal() will do so for an overlay plane, and
- * __drm_helper_disable_unused_functions() will do so for the
- * primary plane.
- */
- if (plane->fb)
- drm_framebuffer_put(plane->fb);
-
- /* Power down the Y/U/V FIFOs */
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
-
- /* Power down most RAMs and FIFOs if this is the primary plane */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
- dma_ctrl0_mask = CFG_GRA_ENA;
- } else {
- dma_ctrl0_mask = CFG_DMA_ENA;
- }
-
- spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(0, dma_ctrl0_mask, dcrtc->base + LCD_SPU_DMA_CTRL0);
- spin_unlock_irq(&dcrtc->irq_lock);
-
- armada_updatel(sram_para1, 0, dcrtc->base + LCD_SPU_SRAM_PARA1);
-}
-
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_disable(struct drm_crtc *crtc)
{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-
armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- armada_drm_crtc_plane_disable(dcrtc, crtc->primary);
+
+ /* Disable our primary plane when we disable the CRTC. */
+ crtc->primary->funcs->disable_plane(crtc->primary, NULL);
}
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
@@ -879,9 +890,11 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
return 0;
}
+ spin_lock_irq(&dcrtc->irq_lock);
para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
dcrtc->base + LCD_SPU_SRAM_PARA1);
+ spin_unlock_irq(&dcrtc->irq_lock);
/*
* Initialize the transparency if the SRAM was powered down.
@@ -1021,7 +1034,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_frame_work *work;
+ struct armada_plane_work *work;
unsigned i;
int ret;
@@ -1029,11 +1042,10 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
if (fb->format != crtc->primary->fb->format)
return -EINVAL;
- work = kmalloc(sizeof(*work), GFP_KERNEL);
+ work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
if (!work)
return -ENOMEM;
- work->work.fn = armada_drm_crtc_complete_frame_work;
work->event = event;
work->old_fb = dcrtc->crtc.primary->fb;
@@ -1047,7 +1059,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
*/
drm_framebuffer_get(fb);
- ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+ ret = armada_drm_plane_work_queue(dcrtc, work);
if (ret) {
/* Undo our reference above */
drm_framebuffer_put(fb);
@@ -1127,14 +1139,195 @@ static const struct drm_crtc_funcs armada_crtc_funcs = {
.disable_vblank = armada_drm_crtc_disable_vblank,
};
+static void armada_drm_primary_update_state(struct drm_plane_state *state,
+ struct armada_regs *regs)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(state->plane);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(state->crtc);
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
+ bool was_disabled;
+ unsigned int idx = 0;
+ u32 val;
+
+ val = CFG_GRA_FMT(dfb->fmt) | CFG_GRA_MOD(dfb->mod);
+ if (dfb->fmt > CFG_420)
+ val |= CFG_PALETTE_ENA;
+ if (state->visible)
+ val |= CFG_GRA_ENA;
+ if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
+ val |= CFG_GRA_HSMOOTH;
+
+ was_disabled = !(dplane->state.ctrl0 & CFG_GRA_ENA);
+ if (was_disabled)
+ armada_reg_queue_mod(regs, idx,
+ 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+
+ dplane->state.ctrl0 = val;
+ dplane->state.src_hw = (drm_rect_height(&state->src) & 0xffff0000) |
+ drm_rect_width(&state->src) >> 16;
+ dplane->state.dst_hw = drm_rect_height(&state->dst) << 16 |
+ drm_rect_width(&state->dst);
+ dplane->state.dst_yx = state->dst.y1 << 16 | state->dst.x1;
+
+ armada_drm_gra_plane_regs(regs + idx, &dfb->fb, &dplane->state,
+ state->src.x1 >> 16, state->src.y1 >> 16,
+ dcrtc->interlaced);
+
+ dplane->state.vsync_update = !was_disabled;
+ dplane->state.changed = true;
+}
+
+static int armada_drm_primary_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_plane_work *work;
+ struct drm_plane_state state = {
+ .plane = plane,
+ .crtc = crtc,
+ .fb = fb,
+ .src_x = src_x,
+ .src_y = src_y,
+ .src_w = src_w,
+ .src_h = src_h,
+ .crtc_x = crtc_x,
+ .crtc_y = crtc_y,
+ .crtc_w = crtc_w,
+ .crtc_h = crtc_h,
+ .rotation = DRM_MODE_ROTATE_0,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+ int ret;
+
+ ret = drm_atomic_helper_check_plane_state(&state, crtc->state, &clip, 0,
+ INT_MAX, true, false);
+ if (ret)
+ return ret;
+
+ work = &dplane->works[dplane->next_work];
+ work->fn = armada_drm_crtc_complete_frame_work;
+
+ if (plane->fb != fb) {
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ drm_framebuffer_reference(fb);
+
+ work->old_fb = plane->fb;
+ } else {
+ work->old_fb = NULL;
+ }
+
+ armada_drm_primary_update_state(&state, work->regs);
+
+ if (!dplane->state.changed)
+ return 0;
+
+ /* Wait for pending work to complete */
+ if (armada_drm_plane_work_wait(dplane, HZ / 10) == 0)
+ armada_drm_plane_work_cancel(dcrtc, dplane);
+
+ if (!dplane->state.vsync_update) {
+ work->fn(dcrtc, work);
+ if (work->old_fb)
+ drm_framebuffer_unreference(work->old_fb);
+ return 0;
+ }
+
+ /* Queue it for update on the next interrupt if we are enabled */
+ ret = armada_drm_plane_work_queue(dcrtc, work);
+ if (ret) {
+ work->fn(dcrtc, work);
+ if (work->old_fb)
+ drm_framebuffer_unreference(work->old_fb);
+ }
+
+ dplane->next_work = !dplane->next_work;
+
+ return 0;
+}
+
+int armada_drm_plane_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct armada_plane *dplane = drm_to_armada_plane(plane);
+ struct armada_crtc *dcrtc;
+ struct armada_plane_work *work;
+ unsigned int idx = 0;
+ u32 sram_para1, enable_mask;
+
+ if (!plane->crtc)
+ return 0;
+
+ /*
+ * Arrange to power down most RAMs and FIFOs if this is the primary
+ * plane, otherwise just the YUV FIFOs for the overlay plane.
+ */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+ CFG_PDWN32x32 | CFG_PDWN64x66;
+ enable_mask = CFG_GRA_ENA;
+ } else {
+ sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
+ enable_mask = CFG_DMA_ENA;
+ }
+
+ dplane->state.ctrl0 &= ~enable_mask;
+
+ dcrtc = drm_to_armada_crtc(plane->crtc);
+
+ /*
+ * Try to disable the plane and drop our ref on the framebuffer
+ * at the next frame update. If we fail for any reason, disable
+ * the plane immediately.
+ */
+ work = &dplane->works[dplane->next_work];
+ work->fn = armada_drm_crtc_complete_disable_work;
+ work->cancel = armada_drm_crtc_complete_disable_work;
+ work->old_fb = plane->fb;
+
+ armada_reg_queue_mod(work->regs, idx,
+ 0, enable_mask, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(work->regs, idx,
+ sram_para1, 0, LCD_SPU_SRAM_PARA1);
+ armada_reg_queue_end(work->regs, idx);
+
+ /* Wait for any preceding work to complete, but don't wedge */
+ if (WARN_ON(!armada_drm_plane_work_wait(dplane, HZ)))
+ armada_drm_plane_work_cancel(dcrtc, dplane);
+
+ if (armada_drm_plane_work_queue(dcrtc, work)) {
+ work->fn(dcrtc, work);
+ if (work->old_fb)
+ drm_framebuffer_unreference(work->old_fb);
+ }
+
+ dplane->next_work = !dplane->next_work;
+
+ return 0;
+}
+
static const struct drm_plane_funcs armada_primary_plane_funcs = {
- .update_plane = drm_primary_helper_update,
- .disable_plane = drm_primary_helper_disable,
+ .update_plane = armada_drm_primary_update,
+ .disable_plane = armada_drm_plane_disable,
.destroy = drm_primary_helper_destroy,
};
int armada_drm_plane_init(struct armada_plane *plane)
{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(plane->works); i++)
+ plane->works[i].plane = &plane->base;
+
init_waitqueue_head(&plane->frame_wait);
return 0;
@@ -1225,17 +1418,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
dcrtc);
- if (ret < 0) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret < 0)
+ goto err_crtc;
if (dcrtc->variant->init) {
ret = dcrtc->variant->init(dcrtc, dev);
- if (ret) {
- kfree(dcrtc);
- return ret;
- }
+ if (ret)
+ goto err_crtc;
}
/* Ensure AXI pipeline is enabled */
@@ -1246,13 +1435,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->crtc.port = port;
primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary)
- return -ENOMEM;
+ if (!primary) {
+ ret = -ENOMEM;
+ goto err_crtc;
+ }
ret = armada_drm_plane_init(primary);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_universal_plane_init(drm, &primary->base, 0,
@@ -1263,7 +1454,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
kfree(primary);
- return ret;
+ goto err_crtc;
}
ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
@@ -1282,6 +1473,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
err_crtc_init:
primary->base.funcs->destroy(&primary->base);
+err_crtc:
+ kfree(dcrtc);
+
return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index bab11f483575..445829b8877a 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -36,21 +36,31 @@ struct armada_plane;
struct armada_variant;
struct armada_plane_work {
- void (*fn)(struct armada_crtc *,
- struct armada_plane *,
- struct armada_plane_work *);
+ void (*fn)(struct armada_crtc *, struct armada_plane_work *);
+ void (*cancel)(struct armada_crtc *, struct armada_plane_work *);
+ bool need_kfree;
+ struct drm_plane *plane;
+ struct drm_framebuffer *old_fb;
+ struct drm_pending_vblank_event *event;
+ struct armada_regs regs[14];
};
struct armada_plane_state {
+ u16 src_x;
+ u16 src_y;
u32 src_hw;
u32 dst_hw;
u32 dst_yx;
u32 ctrl0;
+ bool changed;
+ bool vsync_update;
};
struct armada_plane {
struct drm_plane base;
wait_queue_head_t frame_wait;
+ bool next_work;
+ struct armada_plane_work works[2];
struct armada_plane_work *work;
struct armada_plane_state state;
};
@@ -58,10 +68,10 @@ struct armada_plane {
int armada_drm_plane_init(struct armada_plane *plane);
int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work);
+ struct armada_plane_work *work);
int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
-struct armada_plane_work *armada_drm_plane_work_cancel(
- struct armada_crtc *dcrtc, struct armada_plane *plane);
+void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
+ struct armada_plane *plane);
void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
int x, int y);
@@ -104,8 +114,8 @@ struct armada_crtc {
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
-void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc,
- struct drm_plane *plane);
+int armada_drm_plane_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
extern struct platform_driver armada_lcd_platform_driver;
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index b064879ecdbd..cc4c557c9f66 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -84,7 +84,6 @@ void armada_drm_queue_unref_work(struct drm_device *,
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *);
-void armada_fbdev_lastclose(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e857b88a9799..4b11b6b52f1d 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of_graph.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -54,15 +55,10 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl, 0),
};
-static void armada_drm_lastclose(struct drm_device *dev)
-{
- armada_fbdev_lastclose(dev);
-}
-
DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static struct drm_driver armada_drm_driver = {
- .lastclose = armada_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index a38d5a0892a9..ac92bce07ecd 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -154,16 +154,7 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
return ERR_PTR(ret);
}
-static void armada_output_poll_changed(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct drm_fb_helper *fbh = priv->fbdev;
-
- if (fbh)
- drm_fb_helper_hotplug_event(fbh);
-}
-
const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
.fb_create = armada_fb_create,
- .output_poll_changed = armada_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index a2ce83f84800..2a59db0994b2 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -159,14 +159,6 @@ int armada_fbdev_init(struct drm_device *dev)
return ret;
}
-void armada_fbdev_lastclose(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- if (priv->fbdev)
- drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
-}
-
void armada_fbdev_fini(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index b411b608821a..77b55adaa2ac 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,7 +7,7 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
@@ -32,11 +32,6 @@ struct armada_ovl_plane_properties {
struct armada_ovl_plane {
struct armada_plane base;
- struct drm_framebuffer *old_fb;
- struct {
- struct armada_plane_work work;
- struct armada_regs regs[13];
- } vbl;
struct armada_ovl_plane_properties prop;
};
#define drm_to_armada_ovl_plane(p) \
@@ -67,218 +62,204 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
spin_unlock_irq(&dcrtc->irq_lock);
}
-static void armada_ovl_retire_fb(struct armada_ovl_plane *dplane,
- struct drm_framebuffer *fb)
-{
- struct drm_framebuffer *old_fb;
-
- old_fb = xchg(&dplane->old_fb, fb);
-
- if (old_fb)
- armada_drm_queue_unref_work(dplane->base.base.dev, old_fb);
-}
-
/* === Plane support === */
static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
- struct armada_plane *plane, struct armada_plane_work *work)
+ struct armada_plane_work *work)
{
- struct armada_ovl_plane *dplane = container_of(plane, struct armada_ovl_plane, base);
+ unsigned long flags;
- trace_armada_ovl_plane_work(&dcrtc->crtc, &plane->base);
+ trace_armada_ovl_plane_work(&dcrtc->crtc, work->plane);
- armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
- armada_ovl_retire_fb(dplane, NULL);
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_crtc_update_regs(dcrtc, work->regs);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
-static int
-armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
- uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
+static void armada_ovl_plane_update_state(struct drm_plane_state *state,
+ struct armada_regs *regs)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct drm_rect src = {
- .x1 = src_x,
- .y1 = src_y,
- .x2 = src_x + src_w,
- .y2 = src_y + src_h,
- };
- struct drm_rect dest = {
- .x1 = crtc_x,
- .y1 = crtc_y,
- .x2 = crtc_x + crtc_w,
- .y2 = crtc_y + crtc_h,
- };
- const struct drm_rect clip = {
- .x2 = crtc->mode.hdisplay,
- .y2 = crtc->mode.vdisplay,
- };
- uint32_t val, ctrl0;
- unsigned idx = 0;
- bool visible;
- int ret;
-
- trace_armada_ovl_plane_update(plane, crtc, fb,
- crtc_x, crtc_y, crtc_w, crtc_h,
- src_x, src_y, src_w, src_h);
-
- ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
- DRM_MODE_ROTATE_0,
- 0, INT_MAX, true, false, &visible);
- if (ret)
- return ret;
-
- ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
- CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
- CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
-
- /* Does the position/size result in nothing to display? */
- if (!visible)
- ctrl0 &= ~CFG_DMA_ENA;
-
- if (!dcrtc->plane) {
- dcrtc->plane = plane;
- armada_ovl_update_attr(&dplane->prop, dcrtc);
- }
-
- /* FIXME: overlay on an interlaced display */
- /* Just updating the position/size? */
- if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) {
- val = (drm_rect_height(&src) & 0xffff0000) |
- drm_rect_width(&src) >> 16;
- dplane->base.state.src_hw = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
-
- val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
- dplane->base.state.dst_hw = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
-
- val = dest.y1 << 16 | dest.x1;
- dplane->base.state.dst_yx = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
-
- return 0;
- } else if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
+ struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(state->plane);
+ struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
+ const struct drm_format_info *format;
+ unsigned int idx = 0;
+ bool fb_changed;
+ u32 val, ctrl0;
+ u16 src_x, src_y;
+
+ ctrl0 = CFG_DMA_FMT(dfb->fmt) | CFG_DMA_MOD(dfb->mod) | CFG_CBSH_ENA;
+ if (state->visible)
+ ctrl0 |= CFG_DMA_ENA;
+ if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
+ ctrl0 |= CFG_DMA_HSMOOTH;
+
+ /*
+ * Shifting a YUV packed format image by one pixel causes the U/V
+ * planes to swap. Compensate for it by also toggling the UV swap.
+ */
+ format = dfb->fb.format;
+ if (format->num_planes == 1 && state->src.x1 >> 16 & (format->hsub - 1))
+ ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+
+ if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
/* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
- armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
- dcrtc->base + LCD_SPU_SRAM_PARA1);
+ armada_reg_queue_mod(regs, idx,
+ 0, CFG_PDWN16x66 | CFG_PDWN32x66,
+ LCD_SPU_SRAM_PARA1);
}
- if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
-
- if (plane->fb != fb) {
- u32 addrs[3], pixel_format;
- int num_planes, hsub;
-
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_get(fb);
-
- if (plane->fb)
- armada_ovl_retire_fb(dplane, plane->fb);
+ fb_changed = dplane->base.base.fb != &dfb->fb ||
+ dplane->base.state.src_x != state->src.x1 >> 16 ||
+ dplane->base.state.src_y != state->src.y1 >> 16;
- src_y = src.y1 >> 16;
- src_x = src.x1 >> 16;
+ dplane->base.state.vsync_update = fb_changed;
- armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y);
+ /* FIXME: overlay on an interlaced display */
+ if (fb_changed) {
+ u32 addrs[3];
- pixel_format = fb->format->format;
- hsub = drm_format_horz_chroma_subsampling(pixel_format);
- num_planes = fb->format->num_planes;
+ dplane->base.state.src_y = src_y = state->src.y1 >> 16;
+ dplane->base.state.src_x = src_x = state->src.x1 >> 16;
- /*
- * Annoyingly, shifting a YUYV-format image by one pixel
- * causes the U/V planes to toggle. Toggle the UV swap.
- * (Unfortunately, this causes momentary colour flickering.)
- */
- if (src_x & (hsub - 1) && num_planes == 1)
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+ armada_drm_plane_calc_addrs(addrs, &dfb->fb, src_x, src_y);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(dplane->vbl.regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[2],
LCD_SPU_DMA_START_ADDR_V1);
- val = fb->pitches[0] << 16 | fb->pitches[0];
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ val = dfb->fb.pitches[0] << 16 | dfb->fb.pitches[0];
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_PITCH_YC);
- val = fb->pitches[1] << 16 | fb->pitches[2];
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ val = dfb->fb.pitches[1] << 16 | dfb->fb.pitches[2];
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_PITCH_UV);
}
- val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16;
+ val = (drm_rect_height(&state->src) & 0xffff0000) |
+ drm_rect_width(&state->src) >> 16;
if (dplane->base.state.src_hw != val) {
dplane->base.state.src_hw = val;
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_HPXL_VLN);
}
- val = drm_rect_height(&dest) << 16 | drm_rect_width(&dest);
+ val = drm_rect_height(&state->dst) << 16 | drm_rect_width(&state->dst);
if (dplane->base.state.dst_hw != val) {
dplane->base.state.dst_hw = val;
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DZM_HPXL_VLN);
}
- val = dest.y1 << 16 | dest.x1;
+ val = state->dst.y1 << 16 | state->dst.x1;
if (dplane->base.state.dst_yx != val) {
dplane->base.state.dst_yx = val;
- armada_reg_queue_set(dplane->vbl.regs, idx, val,
+ armada_reg_queue_set(regs, idx, val,
LCD_SPU_DMA_OVSA_HPXL_VLN);
}
if (dplane->base.state.ctrl0 != ctrl0) {
dplane->base.state.ctrl0 = ctrl0;
- armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+ armada_reg_queue_mod(regs, idx, ctrl0,
CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
CFG_YUV2RGB) | CFG_DMA_ENA,
LCD_SPU_DMA_CTRL0);
+ dplane->base.state.vsync_update = true;
}
- if (idx) {
- armada_reg_queue_end(dplane->vbl.regs, idx);
- armada_drm_plane_work_queue(dcrtc, &dplane->base,
- &dplane->vbl.work);
- }
- return 0;
+
+ dplane->base.state.changed = idx != 0;
+
+ armada_reg_queue_end(regs, idx);
}
-static int armada_ovl_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
+static int
+armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct drm_framebuffer *fb;
- struct armada_crtc *dcrtc;
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct armada_plane_work *work;
+ struct drm_plane_state state = {
+ .plane = plane,
+ .crtc = crtc,
+ .fb = fb,
+ .src_x = src_x,
+ .src_y = src_y,
+ .src_w = src_w,
+ .src_h = src_h,
+ .crtc_x = crtc_x,
+ .crtc_y = crtc_y,
+ .crtc_w = crtc_w,
+ .crtc_h = crtc_h,
+ .rotation = DRM_MODE_ROTATE_0,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+ int ret;
- if (!dplane->base.base.crtc)
+ trace_armada_ovl_plane_update(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+
+ ret = drm_atomic_helper_check_plane_state(&state, crtc->state, &clip, 0,
+ INT_MAX, true, false);
+ if (ret)
+ return ret;
+
+ work = &dplane->base.works[dplane->base.next_work];
+
+ if (plane->fb != fb) {
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ drm_framebuffer_reference(fb);
+
+ work->old_fb = plane->fb;
+ } else {
+ work->old_fb = NULL;
+ }
+
+ armada_ovl_plane_update_state(&state, work->regs);
+
+ if (!dplane->base.state.changed)
return 0;
- dcrtc = drm_to_armada_crtc(dplane->base.base.crtc);
+ /* Wait for pending work to complete */
+ if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
+ armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+
+ /* Just updating the position/size? */
+ if (!dplane->base.state.vsync_update) {
+ armada_ovl_plane_work(dcrtc, work);
+ return 0;
+ }
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
- armada_drm_crtc_plane_disable(dcrtc, plane);
+ if (!dcrtc->plane) {
+ dcrtc->plane = plane;
+ armada_ovl_update_attr(&dplane->prop, dcrtc);
+ }
- dcrtc->plane = NULL;
- dplane->base.state.ctrl0 = 0;
+ /* Queue it for update on the next interrupt if we are enabled */
+ ret = armada_drm_plane_work_queue(dcrtc, work);
+ if (ret)
+ DRM_ERROR("failed to queue plane work: %d\n", ret);
- fb = xchg(&dplane->old_fb, NULL);
- if (fb)
- drm_framebuffer_put(fb);
+ dplane->base.next_work = !dplane->base.next_work;
return 0;
}
@@ -362,7 +343,7 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
.update_plane = armada_ovl_plane_update,
- .disable_plane = armada_ovl_plane_disable,
+ .disable_plane = armada_drm_plane_disable,
.destroy = armada_ovl_plane_destroy,
.set_property = armada_ovl_plane_set_property,
};
@@ -454,7 +435,8 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
return ret;
}
- dplane->vbl.work.fn = armada_ovl_plane_work;
+ dplane->base.works[0].fn = armada_ovl_plane_work;
+ dplane->base.works[1].fn = armada_ovl_plane_work;
ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
&armada_ovl_plane_funcs,
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index 8dbfea7a00fe..f03a56bda596 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -34,14 +34,34 @@ TRACE_EVENT(armada_ovl_plane_update,
__field(struct drm_plane *, plane)
__field(struct drm_crtc *, crtc)
__field(struct drm_framebuffer *, fb)
+ __field(int, crtc_x)
+ __field(int, crtc_y)
+ __field(unsigned int, crtc_w)
+ __field(unsigned int, crtc_h)
+ __field(u32, src_x)
+ __field(u32, src_y)
+ __field(u32, src_w)
+ __field(u32, src_h)
),
TP_fast_assign(
__entry->plane = plane;
__entry->crtc = crtc;
__entry->fb = fb;
+ __entry->crtc_x = crtc_x;
+ __entry->crtc_y = crtc_y;
+ __entry->crtc_w = crtc_w;
+ __entry->crtc_h = crtc_h;
+ __entry->src_x = src_x;
+ __entry->src_y = src_y;
+ __entry->src_w = src_w;
+ __entry->src_h = src_h;
),
- TP_printk("plane %p crtc %p fb %p",
- __entry->plane, __entry->crtc, __entry->fb)
+ TP_printk("plane %p crtc %p fb %p crtc @ (%d,%d, %ux%u) src @ (%u,%u, %ux%u)",
+ __entry->plane, __entry->crtc, __entry->fb,
+ __entry->crtc_x, __entry->crtc_y,
+ __entry->crtc_w, __entry->crtc_h,
+ __entry->src_x >> 16, __entry->src_y >> 16,
+ __entry->src_w >> 16, __entry->src_h >> 16)
);
TRACE_EVENT(armada_ovl_plane_work,
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 28da7c2b7ed9..7b784d91e258 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -216,9 +216,10 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
return tt;
}
-static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+static int ast_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -237,7 +238,6 @@ struct ttm_bo_driver ast_bo_driver = {
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int ast_mm_init(struct ast_private *ast)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index c6e8061ffcfc..c1ea5c36b006 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -461,13 +461,6 @@ static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
-static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
-{
- struct atmel_hlcdc_dc *dc = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(dc->fbdev);
-}
-
struct atmel_hlcdc_dc_commit {
struct work_struct work;
struct drm_device *dev;
@@ -563,7 +556,7 @@ error:
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
- .output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = atmel_hlcdc_dc_atomic_commit,
};
@@ -665,10 +658,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
platform_set_drvdata(pdev, dev);
- dc->fbdev = drm_fbdev_cma_init(dev, 24,
- dev->mode_config.num_connector);
- if (IS_ERR(dc->fbdev))
- dc->fbdev = NULL;
+ drm_fb_cma_fbdev_init(dev, 24, 0);
drm_kms_helper_poll_init(dev);
@@ -688,8 +678,7 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- if (dc->fbdev)
- drm_fbdev_cma_fini(dc->fbdev);
+ drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
@@ -705,13 +694,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
destroy_workqueue(dc->wq);
}
-static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
-{
- struct atmel_hlcdc_dc *dc = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(dc->fbdev);
-}
-
static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -744,7 +726,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = atmel_hlcdc_dc_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 6833ee253cfa..ab32d5b268d2 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -32,6 +32,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -374,7 +375,6 @@ struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
struct dma_pool *dscrpool;
struct atmel_hlcdc *hlcdc;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc *crtc;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
struct workqueue_struct *wq;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 8250b5e612d2..704e879711e4 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -205,7 +205,6 @@ struct ttm_bo_driver bochs_bo_driver = {
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int bochs_mm_init(struct bochs_device *bochs)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 5dd3f1cd074a..a8905049b9da 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
return 0;
}
+ pm_runtime_get_sync(dp->dev);
edid = drm_get_edid(connector, &dp->aux.ddc);
+ pm_runtime_put(dp->dev);
if (edid) {
drm_mode_connector_update_edid_property(&dp->connector,
edid);
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2a5b54d3a03a..a8e31ea07382 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -216,9 +216,10 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev,
return tt;
}
-static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -237,7 +238,6 @@ struct ttm_bo_driver cirrus_bo_driver = {
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 37445d50816a..b76d49218cf1 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -50,7 +50,8 @@ EXPORT_SYMBOL(__drm_crtc_commit_free);
* @state: atomic state
*
* Free all the memory allocated by drm_atomic_state_init.
- * This is useful for drivers that subclass the atomic state.
+ * This should only be used by drivers which are still subclassing
+ * &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
void drm_atomic_state_default_release(struct drm_atomic_state *state)
{
@@ -67,7 +68,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
* @state: atomic state
*
* Default implementation for filling in a new atomic state.
- * This is useful for drivers that subclass the atomic state.
+ * This should only be used by drivers which are still subclassing
+ * &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
@@ -132,7 +134,8 @@ EXPORT_SYMBOL(drm_atomic_state_alloc);
* @state: atomic state
*
* Default implementation for clearing atomic state.
- * This is useful for drivers that subclass the atomic state.
+ * This should only be used by drivers which are still subclassing
+ * &drm_atomic_state and haven't switched to &drm_private_state yet.
*/
void drm_atomic_state_default_clear(struct drm_atomic_state *state)
{
@@ -947,6 +950,42 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
}
/**
+ * DOC: handling driver private state
+ *
+ * Very often the DRM objects exposed to userspace in the atomic modeset api
+ * (&drm_connector, &drm_crtc and &drm_plane) do not map neatly to the
+ * underlying hardware. Especially for any kind of shared resources (e.g. shared
+ * clocks, scaler units, bandwidth and fifo limits shared among a group of
+ * planes or CRTCs, and so on) it makes sense to model these as independent
+ * objects. Drivers then need to do similar state tracking and commit ordering for
+ * such private (since not exposed to userpace) objects as the atomic core and
+ * helpers already provide for connectors, planes and CRTCs.
+ *
+ * To make this easier on drivers the atomic core provides some support to track
+ * driver private state objects using struct &drm_private_obj, with the
+ * associated state struct &drm_private_state.
+ *
+ * Similar to userspace-exposed objects, private state structures can be
+ * acquired by calling drm_atomic_get_private_obj_state(). Since this function
+ * does not take care of locking, drivers should wrap it for each type of
+ * private state object they have with the required call to drm_modeset_lock()
+ * for the corresponding &drm_modeset_lock.
+ *
+ * All private state structures contained in a &drm_atomic_state update can be
+ * iterated using for_each_oldnew_private_obj_in_state(),
+ * for_each_new_private_obj_in_state() and for_each_old_private_obj_in_state().
+ * Drivers are recommended to wrap these for each type of driver private state
+ * object they have, filtering on &drm_private_obj.funcs using for_each_if(), at
+ * least if they want to iterate over all objects of a given type.
+ *
+ * An earlier way to handle driver private state was by subclassing struct
+ * &drm_atomic_state. But since that encourages non-standard ways to implement
+ * the check/commit split atomic requires (by using e.g. "check and rollback or
+ * commit instead" of "duplicate state, check, then either commit or release
+ * duplicated state) it is deprecated in favour of using &drm_private_state.
+ */
+
+/**
* drm_atomic_private_obj_init - initialize private object
* @obj: private object
* @state: initial private object state
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 2e5e089dd912..4c62dff14893 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -214,9 +214,11 @@ EXPORT_SYMBOL(drm_rotation_simplify);
* This function initializes generic mutable zpos property and enables support
* for it in drm core. Drivers can then attach this property to planes to enable
* support for configurable planes arrangement during blending operation.
- * Once mutable zpos property has been enabled, the DRM core will automatically
- * calculate &drm_plane_state.normalized_zpos values. Usually min should be set
- * to 0 and max to maximal number of planes for given crtc - 1.
+ * Drivers that attach a mutable zpos property to any plane should call the
+ * drm_atomic_normalize_zpos() helper during their implementation of
+ * &drm_mode_config_funcs.atomic_check(), which will update the normalized zpos
+ * values and store them in &drm_plane_state.normalized_zpos. Usually min
+ * should be set to 0 and max to maximal number of planes for given crtc - 1.
*
* If zpos of some planes cannot be changed (like fixed background or
* cursor/topmost planes), driver should adjust min/max values and assign those
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 624edeb5c50d..e6a21e69059c 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -153,6 +153,25 @@ static void drm_connector_free(struct kref *kref)
connector->funcs->destroy(connector);
}
+void drm_connector_free_work_fn(struct work_struct *work)
+{
+ struct drm_connector *connector, *n;
+ struct drm_device *dev =
+ container_of(work, struct drm_device, mode_config.connector_free_work);
+ struct drm_mode_config *config = &dev->mode_config;
+ unsigned long flags;
+ struct llist_node *freed;
+
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ freed = llist_del_all(&config->connector_free_list);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+
+ llist_for_each_entry_safe(connector, n, freed, free_node) {
+ drm_mode_object_unregister(dev, &connector->base);
+ connector->funcs->destroy(connector);
+ }
+}
+
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@@ -532,6 +551,25 @@ void drm_connector_list_iter_begin(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_connector_list_iter_begin);
+/*
+ * Extra-safe connector put function that works in any context. Should only be
+ * used from the connector_iter functions, where we never really expect to
+ * actually release the connector when dropping our final reference.
+ */
+static void
+__drm_connector_put_safe(struct drm_connector *conn)
+{
+ struct drm_mode_config *config = &conn->dev->mode_config;
+
+ lockdep_assert_held(&config->connector_list_lock);
+
+ if (!refcount_dec_and_test(&conn->base.refcount.refcount))
+ return;
+
+ llist_add(&conn->free_node, &config->connector_free_list);
+ schedule_work(&config->connector_free_work);
+}
+
/**
* drm_connector_list_iter_next - return next connector
* @iter: connectr_list iterator
@@ -561,10 +599,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter)
/* loop until it's not a zombie connector */
} while (!kref_get_unless_zero(&iter->conn->base.refcount));
- spin_unlock_irqrestore(&config->connector_list_lock, flags);
if (old_conn)
- drm_connector_put(old_conn);
+ __drm_connector_put_safe(old_conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
return iter->conn;
}
@@ -581,9 +619,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next);
*/
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter)
{
+ struct drm_mode_config *config = &iter->dev->mode_config;
+ unsigned long flags;
+
iter->dev = NULL;
- if (iter->conn)
- drm_connector_put(iter->conn);
+ if (iter->conn) {
+ spin_lock_irqsave(&config->connector_list_lock, flags);
+ __drm_connector_put_safe(iter->conn);
+ spin_unlock_irqrestore(&config->connector_list_lock, flags);
+ }
lock_release(&connector_list_iter_dep_map, 0, _RET_IP_);
}
EXPORT_SYMBOL(drm_connector_list_iter_end);
@@ -1229,6 +1273,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ /* Set the display info, using edid if available, otherwise
+ * reseting the values to defaults. This duplicates the work
+ * done in drm_add_edid_modes, but that function is not
+ * consistently called before this one in all drivers and the
+ * computation is cheap enough that it seems better to
+ * duplicate it rather than attempt to ensure some arbitrary
+ * ordering of calls.
+ */
+ if (edid)
+ drm_add_display_info(connector, edid);
+ else
+ drm_reset_display_info(connector);
+
drm_object_property_set_value(&connector->base,
dev->mode_config.non_desktop_property,
connector->display_info.non_desktop);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 9ebb8841778c..af00f42ba269 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
uint64_t value);
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
+void drm_connector_free_work_fn(struct work_struct *work);
/* IOCTL */
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 524eace3d460..ddd537914575 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, const char *vendor)
+static bool edid_vendor(const struct edid *edid, const char *vendor)
{
char edid_vendor[3];
@@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor)
*
* This tells subsequent routines what fixes they need to apply.
*/
-static u32 edid_get_quirks(struct edid *edid)
+static u32 edid_get_quirks(const struct edid *edid)
{
const struct edid_quirk *quirk;
int i;
@@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
+static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
{
u8 *edid_ext = NULL;
int i;
@@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
return edid_ext;
}
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, CEA_EXT);
}
-static u8 *drm_find_displayid_extension(struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid)
{
return drm_find_edid_extension(edid, DISPLAYID_EXT);
}
@@ -4378,7 +4378,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
}
static void drm_parse_cea_ext(struct drm_connector *connector,
- struct edid *edid)
+ const struct edid *edid)
{
struct drm_display_info *info = &connector->display_info;
const u8 *edid_ext;
@@ -4412,11 +4412,34 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
-static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
+ * all of the values which would have been set from EDID
+ */
+void
+drm_reset_display_info(struct drm_connector *connector)
{
struct drm_display_info *info = &connector->display_info;
+ info->width_mm = 0;
+ info->height_mm = 0;
+
+ info->bpc = 0;
+ info->color_formats = 0;
+ info->cea_rev = 0;
+ info->max_tmds_clock = 0;
+ info->dvi_dual = false;
+ info->has_hdmi_infoframe = false;
+
+ info->non_desktop = 0;
+}
+EXPORT_SYMBOL_GPL(drm_reset_display_info);
+
+u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ u32 quirks = edid_get_quirks(edid);
+
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -4430,11 +4453,13 @@ static void drm_add_display_info(struct drm_connector *connector,
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
+
if (edid->revision < 3)
- return;
+ return quirks;
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
- return;
+ return quirks;
drm_parse_cea_ext(connector, edid);
@@ -4454,7 +4479,7 @@ static void drm_add_display_info(struct drm_connector *connector,
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
- return;
+ return quirks;
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
@@ -4489,7 +4514,9 @@ static void drm_add_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+ return quirks;
}
+EXPORT_SYMBOL_GPL(drm_add_display_info);
static int validate_displayid(u8 *displayid, int length, int idx)
{
@@ -4645,8 +4672,6 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0;
}
- quirks = edid_get_quirks(edid);
-
drm_edid_to_eld(connector, edid);
/*
@@ -4654,7 +4679,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid, quirks);
+ quirks = drm_add_display_info(connector, edid);
/*
* EDID spec says modes should be preferred in this order:
@@ -4846,6 +4871,11 @@ EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
* @mode: DRM display mode
* @rgb_quant_range: RGB quantization range (Q)
* @rgb_quant_range_selectable: Sink support selectable RGB quantization range (QS)
+ * @is_hdmi2_sink: HDMI 2.0 sink, which has different default recommendations
+ *
+ * Note that @is_hdmi2_sink can be derived by looking at the
+ * &drm_scdc.supported flag stored in &drm_hdmi_info.scdc,
+ * &drm_display_info.hdmi, which can be found in &drm_connector.display_info.
*/
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 35b56dfba929..186d00adfb5f 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_print.h>
#include <linux/module.h>
#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -42,7 +43,7 @@ struct drm_fbdev_cma {
* callback function to create a cma backed framebuffer.
*
* An fbdev framebuffer backed by cma is also available by calling
- * drm_fbdev_cma_init(). drm_fbdev_cma_fini() tears it down.
+ * drm_fb_cma_fbdev_init(). drm_fb_cma_fbdev_fini() tears it down.
* If the &drm_framebuffer_funcs.dirty callback is set, fb_deferred_io will be
* set up automatically. &drm_framebuffer_funcs.dirty is called by
* drm_fb_helper_deferred_io() in process context (&struct delayed_work).
@@ -68,7 +69,7 @@ struct drm_fbdev_cma {
*
* Initialize::
*
- * fbdev = drm_fbdev_cma_init_with_funcs(dev, 16,
+ * fbdev = drm_fb_cma_fbdev_init_with_funcs(dev, 16,
* dev->mode_config.num_crtc,
* dev->mode_config.num_connector,
* &driver_fb_funcs);
@@ -256,7 +257,7 @@ drm_fbdev_cma_create(struct drm_fb_helper *helper,
fbi->screen_size = size;
fbi->fix.smem_len = size;
- if (fbdev_cma->fb_funcs->dirty) {
+ if (fb->funcs->dirty) {
ret = drm_fbdev_cma_defio_init(fbi, obj);
if (ret)
goto err_cma_destroy;
@@ -278,6 +279,118 @@ static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
};
/**
+ * drm_fb_cma_fbdev_init_with_funcs() - Allocate and initialize fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ * @max_conn_count: Maximum number of connectors.
+ * @dev->mode_config.num_connector is used if this is zero.
+ * @funcs: Framebuffer functions, in particular a custom dirty() callback.
+ * Can be NULL.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
+ unsigned int preferred_bpp, unsigned int max_conn_count,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_fbdev_cma *fbdev_cma;
+ struct drm_fb_helper *fb_helper;
+ int ret;
+
+ if (!preferred_bpp)
+ preferred_bpp = dev->mode_config.preferred_depth;
+ if (!preferred_bpp)
+ preferred_bpp = 32;
+
+ if (!max_conn_count)
+ max_conn_count = dev->mode_config.num_connector;
+
+ fbdev_cma = kzalloc(sizeof(*fbdev_cma), GFP_KERNEL);
+ if (!fbdev_cma)
+ return -ENOMEM;
+
+ fbdev_cma->fb_funcs = funcs;
+ fb_helper = &fbdev_cma->fb_helper;
+
+ drm_fb_helper_prepare(dev, fb_helper, &drm_fb_cma_helper_funcs);
+
+ ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper.\n");
+ goto err_free;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to add connectors.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration.\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_free:
+ kfree(fbdev_cma);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init_with_funcs);
+
+/**
+ * drm_fb_cma_fbdev_init() - Allocate and initialize fbdev emulation
+ * @dev: DRM device
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ * @max_conn_count: Maximum number of connectors.
+ * @dev->mode_config.num_connector is used if this is zero.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
+ unsigned int max_conn_count)
+{
+ return drm_fb_cma_fbdev_init_with_funcs(dev, preferred_bpp,
+ max_conn_count, NULL);
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
+
+/**
+ * drm_fb_cma_fbdev_fini() - Teardown fbdev emulation
+ * @dev: DRM device
+ */
+void drm_fb_cma_fbdev_fini(struct drm_device *dev)
+{
+ struct drm_fb_helper *fb_helper = dev->fb_helper;
+
+ if (!fb_helper)
+ return;
+
+ /* Unregister if it hasn't been done already */
+ if (fb_helper->fbdev && fb_helper->fbdev->dev)
+ drm_fb_helper_unregister_fbi(fb_helper);
+
+ if (fb_helper->fbdev)
+ drm_fbdev_cma_defio_fini(fb_helper->fbdev);
+
+ if (fb_helper->fb)
+ drm_framebuffer_remove(fb_helper->fb);
+
+ drm_fb_helper_fini(fb_helper);
+ kfree(to_fbdev_cma(fb_helper));
+}
+EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_fini);
+
+/**
* drm_fbdev_cma_init_with_funcs() - Allocate and initializes a drm_fbdev_cma struct
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 04a3a5ce370a..035784ddd133 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -66,19 +66,23 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
- * Initialization is done as a four-step process with drm_fb_helper_prepare(),
- * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
- * drm_fb_helper_initial_config(). Drivers with fancier requirements than the
- * default behaviour can override the third step with their own code.
- * Teardown is done with drm_fb_helper_fini() after the fbdev device is
- * unregisters using drm_fb_helper_unregister_fbi().
- *
- * At runtime drivers should restore the fbdev console by calling
- * drm_fb_helper_restore_fbdev_mode_unlocked() from their &drm_driver.lastclose
- * callback. They should also notify the fb helper code from updates to the
- * output configuration by calling drm_fb_helper_hotplug_event(). For easier
- * integration with the output polling code in drm_crtc_helper.c the modeset
- * code provides a &drm_mode_config_funcs.output_poll_changed callback.
+ * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it
+ * down by calling drm_fb_helper_fbdev_teardown().
+ *
+ * Drivers that need to handle connector hotplugging (e.g. dp mst) can't use
+ * the setup helper and will need to do the whole four-step setup process with
+ * drm_fb_helper_prepare(), drm_fb_helper_init(),
+ * drm_fb_helper_single_add_all_connectors(), enable hotplugging and
+ * drm_fb_helper_initial_config() to avoid a possible race window.
+ *
+ * At runtime drivers should restore the fbdev console by using
+ * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback.
+ * They should also notify the fb helper code from updates to the output
+ * configuration by using drm_fb_helper_output_poll_changed() as their
+ * &drm_mode_config_funcs.output_poll_changed callback.
+ *
+ * For suspend/resume consider using drm_mode_config_helper_suspend() and
+ * drm_mode_config_helper_resume() which takes care of fbdev as well.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
@@ -103,7 +107,8 @@ static DEFINE_MUTEX(kernel_fb_helper_lock);
* always run in process context since the fb_*() function could be running in
* atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io
* callback it will also schedule dirty_work with the damage collected from the
- * mmap page writes.
+ * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup
+ * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()).
*/
#define drm_fb_helper_for_each_connector(fbh, i__) \
@@ -1025,6 +1030,49 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
/**
+ * drm_fb_helper_defio_init - fbdev deferred I/O initialization
+ * @fb_helper: driver-allocated fbdev helper
+ *
+ * This function allocates &fb_deferred_io, sets callback to
+ * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init().
+ * It should be called from the &drm_fb_helper_funcs->fb_probe callback.
+ * drm_fb_helper_fbdev_teardown() cleans up deferred I/O.
+ *
+ * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done
+ * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby
+ * affect other instances of that &fb_ops.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper)
+{
+ struct fb_info *info = fb_helper->fbdev;
+ struct fb_deferred_io *fbdefio;
+ struct fb_ops *fbops;
+
+ fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL);
+ fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
+ if (!fbdefio || !fbops) {
+ kfree(fbdefio);
+ kfree(fbops);
+ return -ENOMEM;
+ }
+
+ info->fbdefio = fbdefio;
+ fbdefio->delay = msecs_to_jiffies(50);
+ fbdefio->deferred_io = drm_fb_helper_deferred_io;
+
+ *fbops = *info->fbops;
+ info->fbops = fbops;
+
+ fb_deferred_io_init(info);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_defio_init);
+
+/**
* drm_fb_helper_sys_read - wrapper around fb_sys_read
* @info: fb_info struct pointer
* @buf: userspace buffer to read from framebuffer memory
@@ -1848,6 +1896,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (ret < 0)
return ret;
+ strcpy(fb_helper->fb->comm, "[fbcon]");
return 0;
}
@@ -2730,6 +2779,120 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/**
+ * drm_fb_helper_fbdev_setup() - Setup fbdev emulation
+ * @dev: DRM device
+ * @fb_helper: fbdev helper structure to set up
+ * @funcs: fbdev helper functions
+ * @preferred_bpp: Preferred bits per pixel for the device.
+ * @dev->mode_config.preferred_depth is used if this is zero.
+ * @max_conn_count: Maximum number of connectors.
+ * @dev->mode_config.num_connector is used if this is zero.
+ *
+ * This function sets up fbdev emulation and registers fbdev for access by
+ * userspace. If all connectors are disconnected, setup is deferred to the next
+ * time drm_fb_helper_hotplug_event() is called.
+ * The caller must to provide a &drm_fb_helper_funcs->fb_probe callback
+ * function.
+ *
+ * See also: drm_fb_helper_initial_config()
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_fb_helper_fbdev_setup(struct drm_device *dev,
+ struct drm_fb_helper *fb_helper,
+ const struct drm_fb_helper_funcs *funcs,
+ unsigned int preferred_bpp,
+ unsigned int max_conn_count)
+{
+ int ret;
+
+ if (!preferred_bpp)
+ preferred_bpp = dev->mode_config.preferred_depth;
+ if (!preferred_bpp)
+ preferred_bpp = 32;
+
+ if (!max_conn_count)
+ max_conn_count = dev->mode_config.num_connector;
+ if (!max_conn_count) {
+ DRM_DEV_ERROR(dev->dev, "No connectors\n");
+ return -EINVAL;
+ }
+
+ drm_fb_helper_prepare(dev, fb_helper, funcs);
+
+ ret = drm_fb_helper_init(dev, fb_helper, max_conn_count);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to initialize fbdev helper\n");
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to add connectors\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(fb_helper, preferred_bpp);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev->dev, "Failed to set fbdev configuration\n");
+ goto err_drm_fb_helper_fini;
+ }
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_fb_helper_fbdev_setup);
+
+/**
+ * drm_fb_helper_fbdev_teardown - Tear down fbdev emulation
+ * @dev: DRM device
+ *
+ * This function unregisters fbdev if not already done and cleans up the
+ * associated resources including the &drm_framebuffer.
+ * The driver is responsible for freeing the &drm_fb_helper structure which is
+ * stored in &drm_device->fb_helper. Do note that this pointer has been cleared
+ * when this function returns.
+ *
+ * In order to support device removal/unplug while file handles are still open,
+ * drm_fb_helper_unregister_fbi() should be called on device removal and
+ * drm_fb_helper_fbdev_teardown() in the &drm_driver->release callback when
+ * file handles are closed.
+ */
+void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
+{
+ struct drm_fb_helper *fb_helper = dev->fb_helper;
+ struct fb_ops *fbops = NULL;
+
+ if (!fb_helper)
+ return;
+
+ /* Unregister if it hasn't been done already */
+ if (fb_helper->fbdev && fb_helper->fbdev->dev)
+ drm_fb_helper_unregister_fbi(fb_helper);
+
+ if (fb_helper->fbdev && fb_helper->fbdev->fbdefio) {
+ fb_deferred_io_cleanup(fb_helper->fbdev);
+ kfree(fb_helper->fbdev->fbdefio);
+ fbops = fb_helper->fbdev->fbops;
+ }
+
+ drm_fb_helper_fini(fb_helper);
+ kfree(fbops);
+
+ if (fb_helper->fb)
+ drm_framebuffer_remove(fb_helper->fb);
+}
+EXPORT_SYMBOL(drm_fb_helper_fbdev_teardown);
+
+/**
* drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
* @dev: DRM device
*
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index d63d4c2ac4c8..5a13ff29f4f0 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -664,6 +664,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
INIT_LIST_HEAD(&fb->filp_head);
fb->funcs = funcs;
+ strcpy(fb->comm, current->comm);
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
false, drm_framebuffer_free);
@@ -978,6 +979,7 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
struct drm_format_name_buf format_name;
unsigned int i;
+ drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm);
drm_printf_indent(p, indent, "refcount=%u\n",
drm_framebuffer_read_refcount(fb));
drm_printf_indent(p, indent, "format=%s\n",
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index aa8cb9bfa499..4d682a6e8bcb 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -272,7 +272,8 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
* @sizes: fbdev size description
* @pitch_align: Optional pitch alignment
* @obj: GEM object backing the framebuffer
- * @funcs: vtable to be used for the new framebuffer object
+ * @funcs: Optional vtable to be used for the new framebuffer object when the
+ * dirty callback is needed.
*
* This function creates a framebuffer from a &drm_fb_helper_surface_size
* description for use in the &drm_fb_helper_funcs.fb_probe callback.
@@ -300,6 +301,9 @@ drm_gem_fbdev_fb_create(struct drm_device *dev,
if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
return ERR_PTR(-EINVAL);
+ if (!funcs)
+ funcs = &drm_gem_fb_funcs;
+
return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs);
}
EXPORT_SYMBOL(drm_gem_fbdev_fb_create);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d1eb56a1eff4..59849f02e2ad 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
return lessee;
out_lessee:
- drm_master_put(&lessee);
-
mutex_unlock(&dev->mode_config.idr_mutex);
+ drm_master_put(&lessee);
+
return ERR_PTR(error);
}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index eb86bc3f753b..186c4e90cc1c 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node);
*/
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
+ struct drm_mm *mm = old->mm;
+
DRM_MM_BUG_ON(!old->allocated);
*new = *old;
list_replace(&old->node_list, &new->node_list);
- rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root);
+ rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
if (drm_mm_hole_follows(old)) {
list_replace(&old->hole_stack, &new->hole_stack);
rb_replace_node(&old->rb_hole_size,
&new->rb_hole_size,
- &old->mm->holes_size);
+ &mm->holes_size);
rb_replace_node(&old->rb_hole_addr,
&new->rb_hole_addr,
- &old->mm->holes_addr);
+ &mm->holes_addr);
}
old->allocated = false;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index cda8bfab6d3b..e5c653357024 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev)
ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock);
+ init_llist_head(&dev->mode_config.connector_free_list);
+ INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
+
drm_mode_create_standard_properties(dev);
/* Just to be sure */
@@ -431,6 +434,8 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_connector_put(connector);
}
drm_connector_list_iter_end(&conn_iter);
+ /* connector_iter drops references in a work item. */
+ flush_work(&dev->mode_config.connector_free_work);
if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) {
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter)
@@ -467,6 +472,9 @@ void drm_mode_config_cleanup(struct drm_device *dev)
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
+ struct drm_printer p = drm_debug_printer("[leaked fb]");
+ drm_printf(&p, "framebuffer[%u]:\n", fb->base.id);
+ drm_framebuffer_print_info(&p, 1, fb);
drm_framebuffer_free(&fb->base.refcount);
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 901a4e9a87a3..1f2af707ce03 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -9,6 +9,7 @@
*/
#include <linux/dmi.h>
+#include <linux/module.h>
#include <drm/drm_connector.h>
#ifdef CONFIG_DMI
@@ -172,3 +173,5 @@ int drm_get_panel_orientation_quirk(int width, int height)
EXPORT_SYMBOL(drm_get_panel_orientation_quirk);
#endif
+
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 9b733c510cbf..131695915acd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -29,9 +29,9 @@
/**
* DOC: Overview
*
- * DRM synchronisation objects (syncobj) are a persistent objects,
- * that contain an optional fence. The fence can be updated with a new
- * fence, or be NULL.
+ * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
+ * persistent objects that contain an optional fence. The fence can be updated
+ * with a new fence, or be NULL.
*
* syncobj's can be waited upon, where it will wait for the underlying
* fence.
@@ -61,7 +61,8 @@
* @file_private: drm file private pointer
* @handle: sync object handle to lookup.
*
- * Returns a reference to the syncobj pointed to by handle or NULL.
+ * Returns a reference to the syncobj pointed to by handle or NULL. The
+ * reference must be released by calling drm_syncobj_put().
*/
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle)
@@ -229,6 +230,19 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
return 0;
}
+/**
+ * drm_syncobj_find_fence - lookup and reference the fence in a sync object
+ * @file_private: drm file private pointer
+ * @handle: sync object handle to lookup.
+ * @fence: out parameter for the fence
+ *
+ * This is just a convenience function that combines drm_syncobj_find() and
+ * drm_syncobj_fence_get().
+ *
+ * Returns 0 on success or a negative error value on failure. On success @fence
+ * contains a reference to the fence, which must be released by calling
+ * dma_fence_put().
+ */
int drm_syncobj_find_fence(struct drm_file *file_private,
u32 handle,
struct dma_fence **fence)
@@ -269,6 +283,12 @@ EXPORT_SYMBOL(drm_syncobj_free);
* @out_syncobj: returned syncobj
* @flags: DRM_SYNCOBJ_* flags
* @fence: if non-NULL, the syncobj will represent this fence
+ *
+ * This is the first function to create a sync object. After creating, drivers
+ * probably want to make it available to userspace, either through
+ * drm_syncobj_get_handle() or drm_syncobj_get_fd().
+ *
+ * Returns 0 on success or a negative error value on failure.
*/
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence)
@@ -302,6 +322,14 @@ EXPORT_SYMBOL(drm_syncobj_create);
/**
* drm_syncobj_get_handle - get a handle from a syncobj
+ * @file_private: drm file private pointer
+ * @syncobj: Sync object to export
+ * @handle: out parameter with the new handle
+ *
+ * Exports a sync object created with drm_syncobj_create() as a handle on
+ * @file_private to userspace.
+ *
+ * Returns 0 on success or a negative error value on failure.
*/
int drm_syncobj_get_handle(struct drm_file *file_private,
struct drm_syncobj *syncobj, u32 *handle)
@@ -388,6 +416,15 @@ static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj)
return 0;
}
+/**
+ * drm_syncobj_get_fd - get a file descriptor from a syncobj
+ * @syncobj: Sync object to export
+ * @p_fd: out parameter with the new file descriptor
+ *
+ * Exports a sync object created with drm_syncobj_create() as a file descriptor.
+ *
+ * Returns 0 on success or a negative error value on failure.
+ */
int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
{
int ret;
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index a29b8f59eb15..3f58b4077767 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -6,6 +6,7 @@ config DRM_ETNAVIV
depends on MMU
select SHMEM
select SYNC_FILE
+ select THERMAL if DRM_ETNAVIV_THERMAL
select TMPFS
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
@@ -13,6 +14,14 @@ config DRM_ETNAVIV
help
DRM driver for Vivante GPUs.
+config DRM_ETNAVIV_THERMAL
+ bool "enable ETNAVIV thermal throttling"
+ depends on DRM_ETNAVIV
+ default y
+ help
+ Compile in support for thermal throttling.
+ Say Y unless you want to risk burning your SoC.
+
config DRM_ETNAVIV_REGISTER_LOGGING
bool "enable ETNAVIV register logging"
depends on DRM_ETNAVIV
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 9e7098e3207f..99ad2f073c6e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -100,6 +100,8 @@ static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu,
{
u32 flush = 0;
+ lockdep_assert_held(&gpu->lock);
+
/*
* This assumes that if we're switching to 2D, we're switching
* away from 3D, and vice versa. Hence, if we're switching to
@@ -164,7 +166,9 @@ static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu,
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
/* initialize buffer */
buffer->user_size = 0;
@@ -178,7 +182,9 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
buffer->user_size = 0;
@@ -211,10 +217,12 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 link_target, flush = 0;
+ lockdep_assert_held(&gpu->lock);
+
if (gpu->exec_state == ETNA_PIPE_2D)
flush = VIVS_GL_FLUSH_CACHE_PE2D;
else if (gpu->exec_state == ETNA_PIPE_3D)
@@ -253,10 +261,12 @@ void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
/* Append a 'sync point' to the ring buffer. */
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 dwords, target;
+ lockdep_assert_held(&gpu->lock);
+
/*
* We need at most 3 dwords in the return target:
* 1 event + 1 end + 1 wait + 1 link.
@@ -287,13 +297,16 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
}
/* Append a command buffer to the ring buffer. */
-void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
- struct etnaviv_cmdbuf *cmdbuf)
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ unsigned int event, struct etnaviv_cmdbuf *cmdbuf)
{
- struct etnaviv_cmdbuf *buffer = gpu->buffer;
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
unsigned int waitlink_offset = buffer->user_size - 16;
u32 return_target, return_dwords;
u32 link_target, link_dwords;
+ bool switch_context = gpu->exec_state != exec_state;
+
+ lockdep_assert_held(&gpu->lock);
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
@@ -306,7 +319,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
* need to append a mmu flush load state, followed by a new
* link to this buffer - a total of four additional words.
*/
- if (gpu->mmu->need_flush || gpu->switch_context) {
+ if (gpu->mmu->need_flush || switch_context) {
u32 target, extra_dwords;
/* link command */
@@ -321,7 +334,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
}
/* pipe switch commands */
- if (gpu->switch_context)
+ if (switch_context)
extra_dwords += 4;
target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
@@ -349,10 +362,9 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
gpu->mmu->need_flush = false;
}
- if (gpu->switch_context) {
- etnaviv_cmd_select_pipe(gpu, buffer, cmdbuf->exec_state);
- gpu->exec_state = cmdbuf->exec_state;
- gpu->switch_context = false;
+ if (switch_context) {
+ etnaviv_cmd_select_pipe(gpu, buffer, exec_state);
+ gpu->exec_state = exec_state;
}
/* And the link to the submitted buffer */
@@ -421,4 +433,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
+
+ gpu->lastctx = cmdbuf->ctx;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
index 6e3bbcf24160..68e6d3772ad8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmd_parser.c
@@ -78,6 +78,7 @@ static const struct {
ST(0x17c0, 8),
ST(0x17e0, 8),
ST(0x2400, 14 * 16),
+ ST(0x3824, 1),
ST(0x10800, 32 * 16),
ST(0x14600, 16),
ST(0x14800, 8 * 8),
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
index 66ac79558bbd..3746827f45eb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
@@ -86,26 +86,11 @@ void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
kfree(suballoc);
}
-struct etnaviv_cmdbuf *
-etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos, size_t nr_pmrs)
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size)
{
- struct etnaviv_cmdbuf *cmdbuf;
- struct etnaviv_perfmon_request *pmrs;
- size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
- sizeof(*cmdbuf));
int granule_offs, order, ret;
- cmdbuf = kzalloc(sz, GFP_KERNEL);
- if (!cmdbuf)
- return NULL;
-
- sz = sizeof(*pmrs) * nr_pmrs;
- pmrs = kzalloc(sz, GFP_KERNEL);
- if (!pmrs)
- goto out_free_cmdbuf;
-
- cmdbuf->pmrs = pmrs;
cmdbuf->suballoc = suballoc;
cmdbuf->size = size;
@@ -123,7 +108,7 @@ retry:
if (!ret) {
dev_err(suballoc->gpu->dev,
"Timeout waiting for cmdbuf space\n");
- return NULL;
+ return -ETIMEDOUT;
}
goto retry;
}
@@ -131,11 +116,7 @@ retry:
cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE;
cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset;
- return cmdbuf;
-
-out_free_cmdbuf:
- kfree(cmdbuf);
- return NULL;
+ return 0;
}
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
@@ -151,8 +132,6 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
suballoc->free_space = 1;
mutex_unlock(&suballoc->lock);
wake_up_all(&suballoc->free_event);
- kfree(cmdbuf->pmrs);
- kfree(cmdbuf);
}
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index b6348b9f2a9d..ddc3f7ea169c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -33,27 +33,15 @@ struct etnaviv_cmdbuf {
void *vaddr;
u32 size;
u32 user_size;
- /* fence after which this buffer is to be disposed */
- struct dma_fence *fence;
- /* target exec state */
- u32 exec_state;
- /* per GPU in-flight list */
- struct list_head node;
- /* perfmon requests */
- unsigned int nr_pmrs;
- struct etnaviv_perfmon_request *pmrs;
- /* BOs attached to this command buffer */
- unsigned int nr_bos;
- struct etnaviv_vram_mapping *bo_map[0];
};
struct etnaviv_cmdbuf_suballoc *
etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu);
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
-struct etnaviv_cmdbuf *
-etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size,
- size_t nr_bos, size_t nr_pmrs);
+
+int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc,
+ struct etnaviv_cmdbuf *cmdbuf, u32 size);
void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 491eddf9b150..6faf4042db23 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -172,7 +172,7 @@ static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
{
- struct etnaviv_cmdbuf *buf = gpu->buffer;
+ struct etnaviv_cmdbuf *buf = &gpu->buffer;
u32 size = buf->size;
u32 *ptr = buf->vaddr;
u32 i;
@@ -459,9 +459,6 @@ static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
struct drm_etnaviv_pm_domain *args = data;
struct etnaviv_gpu *gpu;
- /* reject as long as the feature isn't stable */
- return -EINVAL;
-
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
@@ -479,9 +476,6 @@ static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
struct drm_etnaviv_pm_signal *args = data;
struct etnaviv_gpu *gpu;
- /* reject as long as the feature isn't stable */
- return -EINVAL;
-
if (args->pipe >= ETNA_MAX_PIPES)
return -EINVAL;
@@ -556,7 +550,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 1,
+ .minor = 2,
};
/*
@@ -580,12 +574,6 @@ static int etnaviv_bind(struct device *dev)
}
drm->dev_private = priv;
- priv->wq = alloc_ordered_workqueue("etnaviv", 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto out_wq;
- }
-
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@@ -607,9 +595,6 @@ static int etnaviv_bind(struct device *dev)
out_register:
component_unbind_all(dev, drm);
out_bind:
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-out_wq:
kfree(priv);
out_unref:
drm_dev_unref(drm);
@@ -624,9 +609,6 @@ static void etnaviv_unbind(struct device *dev)
drm_dev_unregister(drm);
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
component_unbind_all(dev, drm);
drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d249acb6da08..a54f0b758a5c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -56,18 +56,8 @@ struct etnaviv_drm_private {
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
-
- struct workqueue_struct *wq;
};
-static inline void etnaviv_queue_work(struct drm_device *dev,
- struct work_struct *w)
-{
- struct etnaviv_drm_private *priv = dev->dev_private;
-
- queue_work(priv->wq, w);
-}
-
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -97,8 +87,8 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
-void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
- struct etnaviv_cmdbuf *cmdbuf);
+void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+ unsigned int event, struct etnaviv_cmdbuf *cmdbuf);
void etnaviv_validate_init(void);
bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
u32 *stream, unsigned int size,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 2d955d7d7b6d..6d0909c589d1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -120,7 +120,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
struct core_dump_iterator iter;
struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
- struct etnaviv_cmdbuf *cmd;
+ struct etnaviv_gem_submit *submit;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
@@ -132,11 +132,11 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
n_bomap_pages = 0;
file_size = ARRAY_SIZE(etnaviv_dump_registers) *
sizeof(struct etnaviv_dump_registers) +
- mmu_size + gpu->buffer->size;
+ mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- list_for_each_entry(cmd, &gpu->active_cmd_list, node) {
- file_size += cmd->size;
+ list_for_each_entry(submit, &gpu->active_submit_list, node) {
+ file_size += submit->cmdbuf.size;
n_obj++;
}
@@ -176,13 +176,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu, mmu_size);
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr,
- gpu->buffer->size,
- etnaviv_cmdbuf_get_va(gpu->buffer));
-
- list_for_each_entry(cmd, &gpu->active_cmd_list, node)
- etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr,
- cmd->size, etnaviv_cmdbuf_get_va(cmd));
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
+ gpu->buffer.size,
+ etnaviv_cmdbuf_get_va(&gpu->buffer));
+
+ list_for_each_entry(submit, &gpu->active_submit_list, node)
+ etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
+ submit->cmdbuf.vaddr, submit->cmdbuf.size,
+ etnaviv_cmdbuf_get_va(&submit->cmdbuf));
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index daee3f1196df..fcc969fa0e69 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -24,6 +24,9 @@
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
+static struct lock_class_key etnaviv_shm_lock_class;
+static struct lock_class_key etnaviv_userptr_lock_class;
+
static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
{
struct drm_device *dev = etnaviv_obj->base.dev;
@@ -583,7 +586,7 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
kfree(etnaviv_obj);
}
-int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
+void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
@@ -591,8 +594,6 @@ int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_lock(&priv->gem_lock);
list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
mutex_unlock(&priv->gem_lock);
-
- return 0;
}
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
@@ -640,8 +641,9 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
return 0;
}
-static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
- u32 size, u32 flags)
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle)
{
struct drm_gem_object *obj = NULL;
int ret;
@@ -653,6 +655,8 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
if (ret)
goto fail;
+ lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
+
ret = drm_gem_object_init(dev, obj, size);
if (ret == 0) {
struct address_space *mapping;
@@ -660,7 +664,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
/*
* Our buffers are kept pinned, so allocating them
* from the MOVABLE zone is a really bad idea, and
- * conflicts with CMA. See coments above new_inode()
+ * conflicts with CMA. See comments above new_inode()
* why this is required _and_ expected if you're
* going to pin these pages.
*/
@@ -672,33 +676,12 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
if (ret)
goto fail;
- return obj;
-
-fail:
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(ret);
-}
-
-/* convenience method to construct a GEM buffer object, and userspace handle */
-int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- u32 size, u32 flags, u32 *handle)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = __etnaviv_gem_new(dev, size, flags);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- ret = etnaviv_gem_obj_add(dev, obj);
- if (ret < 0) {
- drm_gem_object_put_unlocked(obj);
- return ret;
- }
+ etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
+fail:
drm_gem_object_put_unlocked(obj);
return ret;
@@ -722,139 +705,41 @@ int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
return 0;
}
-struct get_pages_work {
- struct work_struct work;
- struct mm_struct *mm;
- struct task_struct *task;
- struct etnaviv_gem_object *etnaviv_obj;
-};
-
-static struct page **etnaviv_gem_userptr_do_get_pages(
- struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
-{
- int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- struct page **pvec;
- uintptr_t ptr;
- unsigned int flags = 0;
-
- pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!pvec)
- return ERR_PTR(-ENOMEM);
-
- if (!etnaviv_obj->userptr.ro)
- flags |= FOLL_WRITE;
-
- pinned = 0;
- ptr = etnaviv_obj->userptr.ptr;
-
- down_read(&mm->mmap_sem);
- while (pinned < npages) {
- ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- flags, pvec + pinned, NULL, NULL);
- if (ret < 0)
- break;
-
- ptr += ret * PAGE_SIZE;
- pinned += ret;
- }
- up_read(&mm->mmap_sem);
-
- if (ret < 0) {
- release_pages(pvec, pinned);
- kvfree(pvec);
- return ERR_PTR(ret);
- }
-
- return pvec;
-}
-
-static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
-{
- struct get_pages_work *work = container_of(_work, typeof(*work), work);
- struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
- struct page **pvec;
-
- pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
-
- mutex_lock(&etnaviv_obj->lock);
- if (IS_ERR(pvec)) {
- etnaviv_obj->userptr.work = ERR_CAST(pvec);
- } else {
- etnaviv_obj->userptr.work = NULL;
- etnaviv_obj->pages = pvec;
- }
-
- mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_put_unlocked(&etnaviv_obj->base);
-
- mmput(work->mm);
- put_task_struct(work->task);
- kfree(work);
-}
-
static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
{
struct page **pvec = NULL;
- struct get_pages_work *work;
- struct mm_struct *mm;
- int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
-
- if (etnaviv_obj->userptr.work) {
- if (IS_ERR(etnaviv_obj->userptr.work)) {
- ret = PTR_ERR(etnaviv_obj->userptr.work);
- etnaviv_obj->userptr.work = NULL;
- } else {
- ret = -EAGAIN;
- }
- return ret;
- }
+ struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
+ int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- mm = get_task_mm(etnaviv_obj->userptr.task);
- pinned = 0;
- if (mm == current->mm) {
- pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!pvec) {
- mmput(mm);
- return -ENOMEM;
- }
-
- pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
- !etnaviv_obj->userptr.ro, pvec);
- if (pinned < 0) {
- kvfree(pvec);
- mmput(mm);
- return pinned;
- }
-
- if (pinned == npages) {
- etnaviv_obj->pages = pvec;
- mmput(mm);
- return 0;
- }
- }
+ might_lock_read(&current->mm->mmap_sem);
- release_pages(pvec, pinned);
- kvfree(pvec);
+ if (userptr->mm != current->mm)
+ return -EPERM;
- work = kmalloc(sizeof(*work), GFP_KERNEL);
- if (!work) {
- mmput(mm);
+ pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!pvec)
return -ENOMEM;
- }
- get_task_struct(current);
- drm_gem_object_get(&etnaviv_obj->base);
+ do {
+ unsigned num_pages = npages - pinned;
+ uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
+ struct page **pages = pvec + pinned;
- work->mm = mm;
- work->task = current;
- work->etnaviv_obj = etnaviv_obj;
+ ret = get_user_pages_fast(ptr, num_pages,
+ !userptr->ro ? FOLL_WRITE : 0, pages);
+ if (ret < 0) {
+ release_pages(pvec, pinned);
+ kvfree(pvec);
+ return ret;
+ }
+
+ pinned += ret;
- etnaviv_obj->userptr.work = &work->work;
- INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
+ } while (pinned < npages);
- etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
+ etnaviv_obj->pages = pvec;
- return -EAGAIN;
+ return 0;
}
static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
@@ -870,7 +755,6 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
- put_task_struct(etnaviv_obj->userptr.task);
}
static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
@@ -897,17 +781,16 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
if (ret)
return ret;
+ lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
+
etnaviv_obj->userptr.ptr = ptr;
- etnaviv_obj->userptr.task = current;
+ etnaviv_obj->userptr.mm = current->mm;
etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
- get_task_struct(current);
- ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
- if (ret)
- goto unreference;
+ etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
-unreference:
+
/* drop reference from allocate - handle holds it now */
drm_gem_object_put_unlocked(&etnaviv_obj->base);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index e437fba1209d..be72a9833f2b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -18,6 +18,7 @@
#define __ETNAVIV_GEM_H__
#include <linux/reservation.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct dma_fence;
@@ -26,8 +27,7 @@ struct etnaviv_gem_object;
struct etnaviv_gem_userptr {
uintptr_t ptr;
- struct task_struct *task;
- struct work_struct *work;
+ struct mm_struct *mm;
bool ro;
};
@@ -98,26 +98,32 @@ struct etnaviv_gem_submit_bo {
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
- * make it easier to unwind when things go wrong, etc). This only
- * lasts for the duration of the submit-ioctl.
+ * make it easier to unwind when things go wrong, etc).
*/
struct etnaviv_gem_submit {
- struct drm_device *dev;
+ struct kref refcount;
struct etnaviv_gpu *gpu;
- struct ww_acquire_ctx ticket;
- struct dma_fence *fence;
+ struct dma_fence *out_fence, *in_fence;
+ struct list_head node; /* GPU active submit list */
+ struct etnaviv_cmdbuf cmdbuf;
+ bool runtime_resumed;
+ u32 exec_state;
u32 flags;
+ unsigned int nr_pmrs;
+ struct etnaviv_perfmon_request *pmrs;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
/* No new members here, the previous one is variable-length! */
};
+void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
+
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
struct timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
struct etnaviv_gem_object **res);
-int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
+void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index ae884723e9b1..5704305d41e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -19,6 +19,7 @@
#include "etnaviv_drv.h"
#include "etnaviv_gem.h"
+static struct lock_class_key etnaviv_prime_lock_class;
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@@ -125,6 +126,8 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
if (ret < 0)
return ERR_PTR(ret);
+ lockdep_set_class(&etnaviv_obj->lock, &etnaviv_prime_lock_class);
+
npages = size / PAGE_SIZE;
etnaviv_obj->sgt = sgt;
@@ -139,9 +142,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
if (ret)
goto fail;
- ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
- if (ret)
- goto fail;
+ etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
return &etnaviv_obj->base;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index ff911541a190..1f8202bca061 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -33,22 +33,25 @@
#define BO_PINNED 0x2000
static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
- struct etnaviv_gpu *gpu, size_t nr)
+ struct etnaviv_gpu *gpu, size_t nr_bos, size_t nr_pmrs)
{
struct etnaviv_gem_submit *submit;
- size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit));
+ size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit));
- submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
- if (submit) {
- submit->dev = dev;
- submit->gpu = gpu;
+ submit = kzalloc(sz, GFP_KERNEL);
+ if (!submit)
+ return NULL;
- /* initially, until copy_from_user() and bo lookup succeeds: */
- submit->nr_bos = 0;
- submit->fence = NULL;
-
- ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request),
+ GFP_KERNEL);
+ if (!submit->pmrs) {
+ kfree(submit);
+ return NULL;
}
+ submit->nr_pmrs = nr_pmrs;
+
+ submit->gpu = gpu;
+ kref_init(&submit->refcount);
return submit;
}
@@ -111,7 +114,8 @@ static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
}
}
-static int submit_lock_objects(struct etnaviv_gem_submit *submit)
+static int submit_lock_objects(struct etnaviv_gem_submit *submit,
+ struct ww_acquire_ctx *ticket)
{
int contended, slow_locked = -1, i, ret = 0;
@@ -126,7 +130,7 @@ retry:
if (!(submit->bos[i].flags & BO_LOCKED)) {
ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
- &submit->ticket);
+ ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
i);
@@ -136,7 +140,7 @@ retry:
}
}
- ww_acquire_done(&submit->ticket);
+ ww_acquire_done(ticket);
return 0;
@@ -154,7 +158,7 @@ fail:
/* we lost out in a seqno race, lock and retry.. */
ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
- &submit->ticket);
+ ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
slow_locked = contended;
@@ -181,19 +185,33 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
break;
}
+ if (submit->flags & ETNA_SUBMIT_FENCE_FD_IN) {
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
+ */
+ if (!dma_fence_match_context(submit->in_fence, context))
+ ret = dma_fence_wait(submit->in_fence, true);
+ }
+
return ret;
}
-static void submit_unpin_objects(struct etnaviv_gem_submit *submit)
+static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
{
int i;
for (i = 0; i < submit->nr_bos; i++) {
- if (submit->bos[i].flags & BO_PINNED)
- etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
+ struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+
+ if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
+ reservation_object_add_excl_fence(etnaviv_obj->resv,
+ submit->out_fence);
+ else
+ reservation_object_add_shared_fence(etnaviv_obj->resv,
+ submit->out_fence);
- submit->bos[i].mapping = NULL;
- submit->bos[i].flags &= ~BO_PINNED;
+ submit_unlock_object(submit, i);
}
}
@@ -211,6 +229,7 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
ret = PTR_ERR(mapping);
break;
}
+ atomic_inc(&etnaviv_obj->gpu_active);
submit->bos[i].flags |= BO_PINNED;
submit->bos[i].mapping = mapping;
@@ -285,13 +304,11 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
}
static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
- struct etnaviv_cmdbuf *cmdbuf,
- const struct drm_etnaviv_gem_submit_pmr *pmrs,
- u32 nr_pms)
+ u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs)
{
u32 i;
- for (i = 0; i < nr_pms; i++) {
+ for (i = 0; i < submit->nr_pmrs; i++) {
const struct drm_etnaviv_gem_submit_pmr *r = pmrs + i;
struct etnaviv_gem_submit_bo *bo;
int ret;
@@ -316,39 +333,65 @@ static int submit_perfmon_validate(struct etnaviv_gem_submit *submit,
return -EINVAL;
}
- if (etnaviv_pm_req_validate(r, cmdbuf->exec_state)) {
+ if (etnaviv_pm_req_validate(r, exec_state)) {
DRM_ERROR("perfmon request: domain or signal not valid");
return -EINVAL;
}
- cmdbuf->pmrs[i].flags = r->flags;
- cmdbuf->pmrs[i].domain = r->domain;
- cmdbuf->pmrs[i].signal = r->signal;
- cmdbuf->pmrs[i].sequence = r->sequence;
- cmdbuf->pmrs[i].offset = r->read_offset;
- cmdbuf->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
+ submit->pmrs[i].flags = r->flags;
+ submit->pmrs[i].domain = r->domain;
+ submit->pmrs[i].signal = r->signal;
+ submit->pmrs[i].sequence = r->sequence;
+ submit->pmrs[i].offset = r->read_offset;
+ submit->pmrs[i].bo_vma = etnaviv_gem_vmap(&bo->obj->base);
}
return 0;
}
-static void submit_cleanup(struct etnaviv_gem_submit *submit)
+static void submit_cleanup(struct kref *kref)
{
+ struct etnaviv_gem_submit *submit =
+ container_of(kref, struct etnaviv_gem_submit, refcount);
unsigned i;
+ if (submit->runtime_resumed)
+ pm_runtime_put_autosuspend(submit->gpu->dev);
+
+ if (submit->cmdbuf.suballoc)
+ etnaviv_cmdbuf_free(&submit->cmdbuf);
+
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ /* unpin all objects */
+ if (submit->bos[i].flags & BO_PINNED) {
+ etnaviv_gem_mapping_unreference(submit->bos[i].mapping);
+ atomic_dec(&etnaviv_obj->gpu_active);
+ submit->bos[i].mapping = NULL;
+ submit->bos[i].flags &= ~BO_PINNED;
+ }
+
+ /* if the GPU submit failed, objects might still be locked */
submit_unlock_object(submit, i);
drm_gem_object_put_unlocked(&etnaviv_obj->base);
}
- ww_acquire_fini(&submit->ticket);
- if (submit->fence)
- dma_fence_put(submit->fence);
+ wake_up_all(&submit->gpu->fence_event);
+
+ if (submit->in_fence)
+ dma_fence_put(submit->in_fence);
+ if (submit->out_fence)
+ dma_fence_put(submit->out_fence);
+ kfree(submit->pmrs);
kfree(submit);
}
+void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
+{
+ kref_put(&submit->refcount, submit_cleanup);
+}
+
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -358,10 +401,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_etnaviv_gem_submit_pmr *pmrs;
struct drm_etnaviv_gem_submit_bo *bos;
struct etnaviv_gem_submit *submit;
- struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu;
- struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
+ struct ww_acquire_ctx ticket;
int out_fence_fd = -1;
void *stream;
int ret;
@@ -399,17 +441,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL);
pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL);
stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL);
- cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc,
- ALIGN(args->stream_size, 8) + 8,
- args->nr_bos, args->nr_pmrs);
- if (!bos || !relocs || !pmrs || !stream || !cmdbuf) {
+ if (!bos || !relocs || !pmrs || !stream) {
ret = -ENOMEM;
goto err_submit_cmds;
}
- cmdbuf->exec_state = args->exec_state;
- cmdbuf->ctx = file->driver_priv;
-
ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos));
if (ret) {
@@ -430,7 +466,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = -EFAULT;
goto err_submit_cmds;
}
- cmdbuf->nr_pmrs = args->nr_pmrs;
ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
@@ -447,19 +482,28 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- submit = submit_create(dev, gpu, args->nr_bos);
+ ww_acquire_init(&ticket, &reservation_ww_class);
+
+ submit = submit_create(dev, gpu, args->nr_bos, args->nr_pmrs);
if (!submit) {
ret = -ENOMEM;
- goto err_submit_cmds;
+ goto err_submit_ww_acquire;
}
+ ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf,
+ ALIGN(args->stream_size, 8) + 8);
+ if (ret)
+ goto err_submit_objects;
+
+ submit->cmdbuf.ctx = file->driver_priv;
+ submit->exec_state = args->exec_state;
submit->flags = args->flags;
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_objects;
- ret = submit_lock_objects(submit);
+ ret = submit_lock_objects(submit, &ticket);
if (ret)
goto err_submit_objects;
@@ -470,21 +514,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
- in_fence = sync_file_get_fence(args->fence_fd);
- if (!in_fence) {
+ submit->in_fence = sync_file_get_fence(args->fence_fd);
+ if (!submit->in_fence) {
ret = -EINVAL;
goto err_submit_objects;
}
-
- /*
- * Wait if the fence is from a foreign context, or if the fence
- * array contains any fence from a foreign context.
- */
- if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
- ret = dma_fence_wait(in_fence, true);
- if (ret)
- goto err_submit_objects;
- }
}
ret = submit_fence_sync(submit);
@@ -493,25 +527,25 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = submit_pin_objects(submit);
if (ret)
- goto out;
+ goto err_submit_objects;
ret = submit_reloc(submit, stream, args->stream_size / 4,
relocs, args->nr_relocs);
if (ret)
- goto out;
+ goto err_submit_objects;
- ret = submit_perfmon_validate(submit, cmdbuf, pmrs, args->nr_pmrs);
+ ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
if (ret)
- goto out;
+ goto err_submit_objects;
- memcpy(cmdbuf->vaddr, stream, args->stream_size);
- cmdbuf->user_size = ALIGN(args->stream_size, 8);
+ memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
+ submit->cmdbuf.user_size = ALIGN(args->stream_size, 8);
- ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
+ ret = etnaviv_gpu_submit(gpu, submit);
if (ret)
- goto out;
+ goto err_submit_objects;
- cmdbuf = NULL;
+ submit_attach_object_fences(submit);
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
@@ -520,39 +554,26 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
* fence to the sync file here, eliminating the ENOMEM
* possibility at this stage.
*/
- sync_file = sync_file_create(submit->fence);
+ sync_file = sync_file_create(submit->out_fence);
if (!sync_file) {
ret = -ENOMEM;
- goto out;
+ goto err_submit_objects;
}
fd_install(out_fence_fd, sync_file->file);
}
args->fence_fd = out_fence_fd;
- args->fence = submit->fence->seqno;
-
-out:
- submit_unpin_objects(submit);
-
- /*
- * If we're returning -EAGAIN, it may be due to the userptr code
- * wanting to run its workqueue outside of any locks. Flush our
- * workqueue to ensure that it is run in a timely manner.
- */
- if (ret == -EAGAIN)
- flush_workqueue(priv->wq);
+ args->fence = submit->out_fence->seqno;
err_submit_objects:
- if (in_fence)
- dma_fence_put(in_fence);
- submit_cleanup(submit);
+ etnaviv_submit_put(submit);
+
+err_submit_ww_acquire:
+ ww_acquire_fini(&ticket);
err_submit_cmds:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
- /* if we still own the cmdbuf */
- if (cmdbuf)
- etnaviv_cmdbuf_free(cmdbuf);
if (stream)
kvfree(stream);
if (bos)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index e19cbe05da2a..21d0d22f1168 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -644,7 +644,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_init(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
- etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer),
+ etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer),
prefetch);
}
@@ -717,15 +717,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
}
/* Create buffer: */
- gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0, 0);
- if (!gpu->buffer) {
- ret = -ENOMEM;
+ ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer,
+ PAGE_SIZE);
+ if (ret) {
dev_err(gpu->dev, "could not create command buffer\n");
goto destroy_iommu;
}
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 &&
- etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) {
+ etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) {
ret = -EINVAL;
dev_err(gpu->dev,
"command buffer outside valid memory window\n");
@@ -751,8 +751,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
return 0;
free_buffer:
- etnaviv_cmdbuf_free(gpu->buffer);
- gpu->buffer = NULL;
+ etnaviv_cmdbuf_free(&gpu->buffer);
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@@ -958,7 +957,7 @@ static void recover_worker(struct work_struct *work)
pm_runtime_put_autosuspend(gpu->dev);
/* Retire the buffer objects in a work */
- etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+ queue_work(gpu->wq, &gpu->retire_work);
}
static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
@@ -994,7 +993,7 @@ static void hangcheck_handler(struct timer_list *t)
dev_err(gpu->dev, " completed fence: %u\n", fence);
dev_err(gpu->dev, " active fence: %u\n",
gpu->active_fence);
- etnaviv_queue_work(gpu->drm, &gpu->recover_work);
+ queue_work(gpu->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
@@ -1201,42 +1200,23 @@ static void retire_worker(struct work_struct *work)
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
retire_work);
u32 fence = gpu->completed_fence;
- struct etnaviv_cmdbuf *cmdbuf, *tmp;
- unsigned int i;
+ struct etnaviv_gem_submit *submit, *tmp;
+ LIST_HEAD(retire_list);
mutex_lock(&gpu->lock);
- list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) {
- if (!dma_fence_is_signaled(cmdbuf->fence))
+ list_for_each_entry_safe(submit, tmp, &gpu->active_submit_list, node) {
+ if (!dma_fence_is_signaled(submit->out_fence))
break;
- list_del(&cmdbuf->node);
- dma_fence_put(cmdbuf->fence);
-
- for (i = 0; i < cmdbuf->nr_bos; i++) {
- struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
- struct etnaviv_gem_object *etnaviv_obj = mapping->object;
-
- atomic_dec(&etnaviv_obj->gpu_active);
- /* drop the refcount taken in etnaviv_gpu_submit */
- etnaviv_gem_mapping_unreference(mapping);
- }
-
- etnaviv_cmdbuf_free(cmdbuf);
- /*
- * We need to balance the runtime PM count caused by
- * each submission. Upon submission, we increment
- * the runtime PM counter, and allocate one event.
- * So here, we put the runtime PM count for each
- * completed event.
- */
- pm_runtime_put_autosuspend(gpu->dev);
+ list_move(&submit->node, &retire_list);
}
gpu->retired_fence = fence;
mutex_unlock(&gpu->lock);
- wake_up_all(&gpu->fence_event);
+ list_for_each_entry_safe(submit, tmp, &retire_list, node)
+ etnaviv_submit_put(submit);
}
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
@@ -1295,41 +1275,25 @@ int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
ret = wait_event_interruptible_timeout(gpu->fence_event,
!is_active(etnaviv_obj),
remaining);
- if (ret > 0) {
- struct etnaviv_drm_private *priv = gpu->drm->dev_private;
-
- /* Synchronise with the retire worker */
- flush_workqueue(priv->wq);
+ if (ret > 0)
return 0;
- } else if (ret == -ERESTARTSYS) {
+ else if (ret == -ERESTARTSYS)
return -ERESTARTSYS;
- } else {
+ else
return -ETIMEDOUT;
- }
-}
-
-int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
-{
- return pm_runtime_get_sync(gpu->dev);
-}
-
-void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
-{
- pm_runtime_mark_last_busy(gpu->dev);
- pm_runtime_put_autosuspend(gpu->dev);
}
static void sync_point_perfmon_sample(struct etnaviv_gpu *gpu,
struct etnaviv_event *event, unsigned int flags)
{
- const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
- for (i = 0; i < cmdbuf->nr_pmrs; i++) {
- const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+ for (i = 0; i < submit->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
if (pmr->flags == flags)
- etnaviv_perfmon_process(gpu, pmr);
+ etnaviv_perfmon_process(gpu, pmr, submit->exec_state);
}
}
@@ -1354,14 +1318,14 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
struct etnaviv_event *event)
{
- const struct etnaviv_cmdbuf *cmdbuf = event->cmdbuf;
+ const struct etnaviv_gem_submit *submit = event->submit;
unsigned int i;
u32 val;
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
- for (i = 0; i < cmdbuf->nr_pmrs; i++) {
- const struct etnaviv_perfmon_request *pmr = cmdbuf->pmrs + i;
+ for (i = 0; i < submit->nr_pmrs; i++) {
+ const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
*pmr->bo_vma = pmr->sequence;
}
@@ -1380,24 +1344,15 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
/* add bo's to gpu's ring, and kick gpu: */
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf)
+ struct etnaviv_gem_submit *submit)
{
- struct dma_fence *fence;
unsigned int i, nr_events = 1, event[3];
int ret;
- ret = etnaviv_gpu_pm_get_sync(gpu);
+ ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0)
return ret;
-
- /*
- * TODO
- *
- * - flush
- * - data endian
- * - prefetch
- *
- */
+ submit->runtime_resumed = true;
/*
* if there are performance monitor requests we need to have
@@ -1406,19 +1361,19 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
* - a sync point to re-configure gpu, process ETNA_PM_PROCESS_POST requests
* and update the sequence number for userspace.
*/
- if (cmdbuf->nr_pmrs)
+ if (submit->nr_pmrs)
nr_events = 3;
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
- goto out_pm_put;
+ return ret;
}
mutex_lock(&gpu->lock);
- fence = etnaviv_gpu_fence_alloc(gpu);
- if (!fence) {
+ submit->out_fence = etnaviv_gpu_fence_alloc(gpu);
+ if (!submit->out_fence) {
for (i = 0; i < nr_events; i++)
event_free(gpu, event[i]);
@@ -1426,80 +1381,51 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
goto out_unlock;
}
- gpu->event[event[0]].fence = fence;
- submit->fence = dma_fence_get(fence);
- gpu->active_fence = submit->fence->seqno;
+ gpu->active_fence = submit->out_fence->seqno;
- if (gpu->lastctx != cmdbuf->ctx) {
- gpu->mmu->need_flush = true;
- gpu->switch_context = true;
- gpu->lastctx = cmdbuf->ctx;
- }
-
- if (cmdbuf->nr_pmrs) {
+ if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
- gpu->event[event[1]].cmdbuf = cmdbuf;
+ kref_get(&submit->refcount);
+ gpu->event[event[1]].submit = submit;
etnaviv_sync_point_queue(gpu, event[1]);
}
- etnaviv_buffer_queue(gpu, event[0], cmdbuf);
+ kref_get(&submit->refcount);
+ gpu->event[event[0]].fence = submit->out_fence;
+ etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
+ &submit->cmdbuf);
- if (cmdbuf->nr_pmrs) {
+ if (submit->nr_pmrs) {
gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post;
- gpu->event[event[2]].cmdbuf = cmdbuf;
+ kref_get(&submit->refcount);
+ gpu->event[event[2]].submit = submit;
etnaviv_sync_point_queue(gpu, event[2]);
}
- cmdbuf->fence = fence;
- list_add_tail(&cmdbuf->node, &gpu->active_cmd_list);
-
- /* We're committed to adding this command buffer, hold a PM reference */
- pm_runtime_get_noresume(gpu->dev);
-
- for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ list_add_tail(&submit->node, &gpu->active_submit_list);
- /* Each cmdbuf takes a refcount on the mapping */
- etnaviv_gem_mapping_reference(submit->bos[i].mapping);
- cmdbuf->bo_map[i] = submit->bos[i].mapping;
- atomic_inc(&etnaviv_obj->gpu_active);
-
- if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(etnaviv_obj->resv,
- fence);
- else
- reservation_object_add_shared_fence(etnaviv_obj->resv,
- fence);
- }
- cmdbuf->nr_bos = submit->nr_bos;
hangcheck_timer_reset(gpu);
ret = 0;
out_unlock:
mutex_unlock(&gpu->lock);
-out_pm_put:
- etnaviv_gpu_pm_put(gpu);
-
return ret;
}
-static void etnaviv_process_sync_point(struct etnaviv_gpu *gpu,
- struct etnaviv_event *event)
-{
- u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
-
- event->sync_point(gpu, event);
- etnaviv_gpu_start_fe(gpu, addr + 2, 2);
-}
-
static void sync_point_worker(struct work_struct *work)
{
struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
sync_point_work);
+ struct etnaviv_event *event = &gpu->event[gpu->sync_point_event];
+ u32 addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
- etnaviv_process_sync_point(gpu, &gpu->event[gpu->sync_point_event]);
+ event->sync_point(gpu, event);
+ etnaviv_submit_put(event->submit);
event_free(gpu, gpu->sync_point_event);
+
+ /* restart FE last to avoid GPU and IRQ racing against this worker */
+ etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
/*
@@ -1550,7 +1476,7 @@ static irqreturn_t irq_handler(int irq, void *data)
if (gpu->event[event].sync_point) {
gpu->sync_point_event = event;
- etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
+ queue_work(gpu->wq, &gpu->sync_point_work);
}
fence = gpu->event[event].fence;
@@ -1576,7 +1502,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
/* Retire the buffer objects in a work */
- etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+ queue_work(gpu->wq, &gpu->retire_work);
ret = IRQ_HANDLED;
}
@@ -1653,9 +1579,11 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
- if (gpu->buffer) {
+ if (gpu->buffer.suballoc) {
/* Replace the last WAIT with END */
+ mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
+ mutex_unlock(&gpu->lock);
/*
* We know that only the FE is busy here, this should
@@ -1680,7 +1608,7 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
- gpu->switch_context = true;
+ gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1738,20 +1666,29 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
- if (IS_ENABLED(CONFIG_THERMAL)) {
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) {
gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
if (IS_ERR(gpu->cooling))
return PTR_ERR(gpu->cooling);
}
+ gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
+ if (!gpu->wq) {
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
+ return -ENOMEM;
+ }
+
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
#else
ret = etnaviv_gpu_clk_enable(gpu);
#endif
if (ret < 0) {
- thermal_cooling_device_unregister(gpu->cooling);
+ destroy_workqueue(gpu->wq);
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
return ret;
}
@@ -1759,7 +1696,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
gpu->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&gpu->fence_spinlock);
- INIT_LIST_HEAD(&gpu->active_cmd_list);
+ INIT_LIST_HEAD(&gpu->active_submit_list);
INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
@@ -1784,6 +1721,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
hangcheck_disable(gpu);
+ flush_workqueue(gpu->wq);
+ destroy_workqueue(gpu->wq);
+
#ifdef CONFIG_PM
pm_runtime_get_sync(gpu->dev);
pm_runtime_put_sync_suspend(gpu->dev);
@@ -1791,10 +1731,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_gpu_hw_suspend(gpu);
#endif
- if (gpu->buffer) {
- etnaviv_cmdbuf_free(gpu->buffer);
- gpu->buffer = NULL;
- }
+ if (gpu->buffer.suballoc)
+ etnaviv_cmdbuf_free(&gpu->buffer);
if (gpu->cmdbuf_suballoc) {
etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc);
@@ -1808,7 +1746,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
gpu->drm = NULL;
- thermal_cooling_device_unregister(gpu->cooling);
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
gpu->cooling = NULL;
}
@@ -1931,7 +1870,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return ret;
/* Re-initialise the basic hardware state */
- if (gpu->drm && gpu->buffer) {
+ if (gpu->drm && gpu->buffer.suballoc) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 4f10f147297a..7623905210dc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -20,6 +20,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
struct etnaviv_gem_submit;
@@ -89,7 +90,7 @@ struct etnaviv_chip_identity {
struct etnaviv_event {
struct dma_fence *fence;
- struct etnaviv_cmdbuf *cmdbuf;
+ struct etnaviv_gem_submit *submit;
void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
};
@@ -106,10 +107,10 @@ struct etnaviv_gpu {
struct mutex lock;
struct etnaviv_chip_identity identity;
struct etnaviv_file_private *lastctx;
- bool switch_context;
+ struct workqueue_struct *wq;
/* 'ring'-buffer: */
- struct etnaviv_cmdbuf *buffer;
+ struct etnaviv_cmdbuf buffer;
int exec_state;
/* bus base address of memory */
@@ -122,7 +123,7 @@ struct etnaviv_gpu {
spinlock_t event_spinlock;
/* list of currently in-flight command buffers */
- struct list_head active_cmd_list;
+ struct list_head active_submit_list;
u32 idle_mask;
@@ -202,7 +203,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf);
+ struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 14e24ac6573f..7a8c94731748 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -70,9 +70,8 @@ static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain)
return -ENOMEM;
}
- for (i = 0; i < PT_ENTRIES; i++)
- etnaviv_domain->pgtable_cpu[i] =
- etnaviv_domain->base.bad_page_dma;
+ memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
+ PT_ENTRIES);
return 0;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index fc60fc8ddbf0..1e956e266aa3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -229,7 +229,7 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
prefetch = etnaviv_buffer_config_mmuv2(gpu,
(u32)etnaviv_domain->mtlb_dma,
(u32)etnaviv_domain->base.bad_page_dma);
- etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer),
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 35074b944778..d113fe06e6b5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -263,18 +263,16 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
mapping->iova = iova;
list_add_tail(&mapping->mmu_node, &mmu->mappings);
- mutex_unlock(&mmu->lock);
- return 0;
+ ret = 0;
+ goto unlock;
}
}
node = &mapping->vram_node;
ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
- if (ret < 0) {
- mutex_unlock(&mmu->lock);
- return ret;
- }
+ if (ret < 0)
+ goto unlock;
mmu->last_iova = node->start + etnaviv_obj->base.size;
mapping->iova = node->start;
@@ -283,12 +281,12 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
if (ret < 0) {
drm_mm_remove_node(node);
- mutex_unlock(&mmu->lock);
- return ret;
+ goto unlock;
}
list_add_tail(&mapping->mmu_node, &mmu->mappings);
mmu->need_flush = true;
+unlock:
mutex_unlock(&mmu->lock);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 768f5aafdd18..26dddfc41aac 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -479,9 +479,9 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
}
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
- const struct etnaviv_perfmon_request *pmr)
+ const struct etnaviv_perfmon_request *pmr, u32 exec_state)
{
- const struct etnaviv_pm_domain_meta *meta = &doms_meta[gpu->exec_state];
+ const struct etnaviv_pm_domain_meta *meta = &doms_meta[exec_state];
const struct etnaviv_pm_domain *dom;
const struct etnaviv_pm_signal *sig;
u32 *bo = pmr->bo_vma;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
index 35dce194cb00..c1653c64ab6b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.h
@@ -44,6 +44,6 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
u32 exec_state);
void etnaviv_perfmon_process(struct etnaviv_gpu *gpu,
- const struct etnaviv_perfmon_request *pmr);
+ const struct etnaviv_perfmon_request *pmr, u32 exec_state);
#endif /* __ETNAVIV_PERFMON_H__ */
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 5a7c9d8abd6b..735ce47688f9 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -95,26 +95,21 @@ config DRM_EXYNOS_G2D
help
Choose this option if you want to use Exynos G2D for DRM.
-config DRM_EXYNOS_IPP
- bool "Image Post Processor"
- help
- Choose this option if you want to use IPP feature for DRM.
-
config DRM_EXYNOS_FIMC
bool "FIMC"
- depends on DRM_EXYNOS_IPP && MFD_SYSCON
+ depends on BROKEN && MFD_SYSCON
help
Choose this option if you want to use Exynos FIMC for DRM.
config DRM_EXYNOS_ROTATOR
bool "Rotator"
- depends on DRM_EXYNOS_IPP
+ depends on BROKEN
help
Choose this option if you want to use Exynos Rotator for DRM.
config DRM_EXYNOS_GSC
bool "GScaler"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
+ depends on BROKEN && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index bdf4212dde7b..a51c5459bb13 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -18,7 +18,6 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6be5b53c3b27..1c330f2a7a5d 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -21,13 +21,12 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <video/exynos5433_decon.h>
-
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_iommu.h"
+#include "regs-decon5433.h"
#define DSD_CFG_MUX 0x1004
#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13)
@@ -744,11 +743,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "cannot find IO resource\n");
- return -ENXIO;
- }
-
ctx->addr = devm_ioremap_resource(dev, res);
if (IS_ERR(ctx->addr)) {
dev_err(dev, "ioremap failed\n");
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 615efcf7782a..3931d5e33fe0 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -25,13 +25,13 @@
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
-#include <video/exynos7_decon.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_iommu.h"
+#include "regs-decon7.h"
/*
* DECON stands for Display and Enhancement controller.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 82b72425a42f..a518e9c6d6cc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <linux/component.h>
@@ -28,7 +29,6 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_ipp.h"
#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
@@ -37,8 +37,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static struct device *exynos_drm_get_dma_device(void);
-
int exynos_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -89,11 +87,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
file->driver_priv = NULL;
}
-static void exynos_drm_lastclose(struct drm_device *dev)
-{
- exynos_drm_fbdev_restore_mode(dev);
-}
-
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
@@ -115,14 +108,6 @@ static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY, exynos_drm_ipp_get_property,
- DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY, exynos_drm_ipp_set_property,
- DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF, exynos_drm_ipp_queue_buf,
- DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL, exynos_drm_ipp_cmd_ctrl,
- DRM_AUTH | DRM_RENDER_ALLOW),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -140,7 +125,7 @@ static struct drm_driver exynos_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
- .lastclose = exynos_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.postclose = exynos_drm_postclose,
.gem_free_object_unlocked = exynos_drm_gem_free_object,
.gem_vm_ops = &exynos_drm_gem_vm_ops,
@@ -148,7 +133,7 @@ static struct drm_driver exynos_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
.gem_prime_vmap = exynos_drm_gem_prime_vmap,
@@ -263,9 +248,6 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
}, {
DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC),
}, {
- DRV_PTR(ipp_driver, CONFIG_DRM_EXYNOS_IPP),
- DRM_VIRTUAL_DEVICE
- }, {
&exynos_drm_platform_driver,
DRM_VIRTUAL_DEVICE
}
@@ -301,6 +283,27 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
return match ?: ERR_PTR(-ENODEV);
}
+static struct device *exynos_drm_get_dma_device(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
+ struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
+ struct device *dev;
+
+ if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
+ continue;
+
+ while ((dev = bus_find_device(&platform_bus_type, NULL,
+ &info->driver->driver,
+ (void *)platform_bus_type.match))) {
+ put_device(dev);
+ return dev;
+ }
+ }
+ return NULL;
+}
+
static int exynos_drm_bind(struct device *dev)
{
struct exynos_drm_private *private;
@@ -469,27 +472,6 @@ static struct platform_driver exynos_drm_platform_driver = {
},
};
-static struct device *exynos_drm_get_dma_device(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
- struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- struct device *dev;
-
- if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
- continue;
-
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
- put_device(dev);
- return dev;
- }
- }
- return NULL;
-}
-
static void exynos_drm_unregister_devices(void)
{
int i;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c6847fa708fa..df2262f70d91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -188,17 +188,11 @@ struct exynos_drm_g2d_private {
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
- struct device *ipp_dev;
};
/*
* Exynos drm private structure.
*
- * @da_start: start address to device address space.
- * with iommu, device address space starts from this address
- * otherwise default one.
- * @da_space_size: size of device address space.
- * if 0 then default value is used for it.
* @pending: the crtcs that have pending updates to finish
* @lock: protect access to @pending
* @wait: wait an atomic commit to finish
@@ -296,6 +290,5 @@ extern struct platform_driver g2d_driver;
extern struct platform_driver fimc_driver;
extern struct platform_driver rotator_driver;
extern struct platform_driver gsc_driver;
-extern struct platform_driver ipp_driver;
extern struct platform_driver mic_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 8208df56a88f..0faaf829f5bf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -205,7 +205,7 @@ static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {
static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
.fb_create = exynos_user_fb_create,
- .output_poll_changed = exynos_drm_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = exynos_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index dfb66ecf417b..132dd52d0ac7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -270,24 +270,6 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
private->fb_helper = NULL;
}
-void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
-
- if (!private || !private->fb_helper)
- return;
-
- drm_fb_helper_restore_fbdev_mode_unlocked(private->fb_helper);
-}
-
-void exynos_drm_output_poll_changed(struct drm_device *dev)
-{
- struct exynos_drm_private *private = dev->dev_private;
- struct drm_fb_helper *fb_helper = private->fb_helper;
-
- drm_fb_helper_hotplug_event(fb_helper);
-}
-
void exynos_drm_fbdev_suspend(struct drm_device *dev)
{
struct exynos_drm_private *private = dev->dev_private;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 645d1bb7f665..b33847223a85 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -19,8 +19,6 @@
int exynos_drm_fbdev_init(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
-void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
-void exynos_drm_output_poll_changed(struct drm_device *dev);
void exynos_drm_fbdev_suspend(struct drm_device *drm);
void exynos_drm_fbdev_resume(struct drm_device *drm);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 077de014d610..11cc01b47bc0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
if (IS_ERR(exynos_gem))
return exynos_gem;
+ if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
+ /*
+ * when no IOMMU is available, all allocated buffers are
+ * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
+ */
+ flags &= ~EXYNOS_BO_NONCONTIG;
+ DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
+ }
+
/* set memory type and cache attribute from user side. */
exynos_gem->flags = flags;
@@ -506,6 +515,12 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
}
/* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
+}
+
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e86d1a9518c3..5a4c7de80f65 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -117,6 +117,8 @@ int exynos_drm_gem_fault(struct vm_fault *vmf);
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
/* low-level interface prime helpers */
+struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *
exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
deleted file mode 100644
index 3edda18cc2d2..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ /dev/null
@@ -1,1806 +0,0 @@
-/*
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- * Authors:
- * Eunchul Kim <chulspro.kim@samsung.com>
- * Jinyoung Jeon <jy0.jeon@samsung.com>
- * Sangmin Lee <lsmin.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
-#include "exynos_drm_ipp.h"
-#include "exynos_drm_iommu.h"
-
-/*
- * IPP stands for Image Post Processing and
- * supports image scaler/rotator and input/output DMA operations.
- * using FIMC, GSC, Rotator, so on.
- * IPP is integration device driver of same attribute h/w
- */
-
-/*
- * TODO
- * 1. expand command control id.
- * 2. integrate property and config.
- * 3. removed send_event id check routine.
- * 4. compare send_event id if needed.
- * 5. free subdrv_remove notifier callback list if needed.
- * 6. need to check subdrv_open about multi-open.
- * 7. need to power_on implement power and sysmmu ctrl.
- */
-
-#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
-#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
-
-/*
- * A structure of event.
- *
- * @base: base of event.
- * @event: ipp event.
- */
-struct drm_exynos_ipp_send_event {
- struct drm_pending_event base;
- struct drm_exynos_ipp_event event;
-};
-
-/*
- * A structure of memory node.
- *
- * @list: list head to memory queue information.
- * @ops_id: id of operations.
- * @prop_id: id of property.
- * @buf_id: id of buffer.
- * @buf_info: gem objects and dma address, size.
- * @filp: a pointer to drm_file.
- */
-struct drm_exynos_ipp_mem_node {
- struct list_head list;
- enum drm_exynos_ops_id ops_id;
- u32 prop_id;
- u32 buf_id;
- struct drm_exynos_ipp_buf_info buf_info;
-};
-
-/*
- * A structure of ipp context.
- *
- * @subdrv: prepare initialization using subdrv.
- * @ipp_lock: lock for synchronization of access to ipp_idr.
- * @prop_lock: lock for synchronization of access to prop_idr.
- * @ipp_idr: ipp driver idr.
- * @prop_idr: property idr.
- * @event_workq: event work queue.
- * @cmd_workq: command work queue.
- */
-struct ipp_context {
- struct exynos_drm_subdrv subdrv;
- struct mutex ipp_lock;
- struct mutex prop_lock;
- struct idr ipp_idr;
- struct idr prop_idr;
- struct workqueue_struct *event_workq;
- struct workqueue_struct *cmd_workq;
-};
-
-static LIST_HEAD(exynos_drm_ippdrv_list);
-static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
-static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
-
-int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
-{
- mutex_lock(&exynos_drm_ippdrv_lock);
- list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
- mutex_unlock(&exynos_drm_ippdrv_lock);
-
- return 0;
-}
-
-int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
-{
- mutex_lock(&exynos_drm_ippdrv_lock);
- list_del(&ippdrv->drv_list);
- mutex_unlock(&exynos_drm_ippdrv_lock);
-
- return 0;
-}
-
-static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
-{
- int ret;
-
- mutex_lock(lock);
- ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
- mutex_unlock(lock);
-
- return ret;
-}
-
-static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
-{
- mutex_lock(lock);
- idr_remove(id_idr, id);
- mutex_unlock(lock);
-}
-
-static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
-{
- void *obj;
-
- mutex_lock(lock);
- obj = idr_find(id_idr, id);
- mutex_unlock(lock);
-
- return obj;
-}
-
-static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_property *property)
-{
- if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
- !pm_runtime_suspended(ippdrv->dev)))
- return -EBUSY;
-
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property))
- return -EINVAL;
-
- return 0;
-}
-
-static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
- struct drm_exynos_ipp_property *property)
-{
- struct exynos_drm_ippdrv *ippdrv;
- u32 ipp_id = property->ipp_id;
- int ret;
-
- if (ipp_id) {
- ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
- if (!ippdrv) {
- DRM_DEBUG("ipp%d driver not found\n", ipp_id);
- return ERR_PTR(-ENODEV);
- }
-
- ret = ipp_check_driver(ippdrv, property);
- if (ret < 0) {
- DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
- return ERR_PTR(ret);
- }
-
- return ippdrv;
- } else {
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- ret = ipp_check_driver(ippdrv, property);
- if (ret == 0)
- return ippdrv;
- }
-
- DRM_DEBUG("cannot find driver suitable for given property.\n");
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
-{
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- int count = 0;
-
- DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
-
- /*
- * This case is search ipp driver by prop_id handle.
- * sometimes, ipp subsystem find driver by prop_id.
- * e.g PAUSE state, queue buf, command control.
- */
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv);
-
- mutex_lock(&ippdrv->cmd_lock);
- list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
- if (c_node->property.prop_id == prop_id) {
- mutex_unlock(&ippdrv->cmd_lock);
- return ippdrv;
- }
- }
- mutex_unlock(&ippdrv->cmd_lock);
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_prop_list *prop_list = data;
- struct exynos_drm_ippdrv *ippdrv;
- int count = 0;
-
- if (!ctx) {
- DRM_ERROR("invalid context.\n");
- return -EINVAL;
- }
-
- if (!prop_list) {
- DRM_ERROR("invalid property parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
-
- if (!prop_list->ipp_id) {
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
- count++;
-
- /*
- * Supports ippdrv list count for user application.
- * First step user application getting ippdrv count.
- * and second step getting ippdrv capability using ipp_id.
- */
- prop_list->count = count;
- } else {
- /*
- * Getting ippdrv capability by ipp_id.
- * some device not supported wb, output interface.
- * so, user application detect correct ipp driver
- * using this ioctl.
- */
- ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
- prop_list->ipp_id);
- if (!ippdrv) {
- DRM_ERROR("not found ipp%d driver.\n",
- prop_list->ipp_id);
- return -ENODEV;
- }
-
- *prop_list = ippdrv->prop_list;
- }
-
- return 0;
-}
-
-static void ipp_print_property(struct drm_exynos_ipp_property *property,
- int idx)
-{
- struct drm_exynos_ipp_config *config = &property->config[idx];
- struct drm_exynos_pos *pos = &config->pos;
- struct drm_exynos_sz *sz = &config->sz;
-
- DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
- property->prop_id, idx ? "dst" : "src", config->fmt);
-
- DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
- pos->x, pos->y, pos->w, pos->h,
- sz->hsize, sz->vsize, config->flip, config->degree);
-}
-
-static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
-{
- struct drm_exynos_ipp_cmd_work *cmd_work;
-
- cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
- if (!cmd_work)
- return ERR_PTR(-ENOMEM);
-
- INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
-
- return cmd_work;
-}
-
-static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
-{
- struct drm_exynos_ipp_event_work *event_work;
-
- event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
- if (!event_work)
- return ERR_PTR(-ENOMEM);
-
- INIT_WORK(&event_work->work, ipp_sched_event);
-
- return event_work;
-}
-
-int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_property *property = data;
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- u32 prop_id;
- int ret, i;
-
- if (!ctx) {
- DRM_ERROR("invalid context.\n");
- return -EINVAL;
- }
-
- if (!property) {
- DRM_ERROR("invalid property parameter.\n");
- return -EINVAL;
- }
-
- prop_id = property->prop_id;
-
- /*
- * This is log print for user application property.
- * user application set various property.
- */
- for_each_ipp_ops(i)
- ipp_print_property(property, i);
-
- /*
- * In case prop_id is not zero try to set existing property.
- */
- if (prop_id) {
- c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
-
- if (!c_node || c_node->filp != file) {
- DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
- return -EINVAL;
- }
-
- if (c_node->state != IPP_STATE_STOP) {
- DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
- return -EINVAL;
- }
-
- c_node->property = *property;
-
- return 0;
- }
-
- /* find ipp driver using ipp id */
- ippdrv = ipp_find_driver(ctx, property);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("failed to get ipp driver.\n");
- return -EINVAL;
- }
-
- /* allocate command node */
- c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
- if (!c_node)
- return -ENOMEM;
-
- ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
- if (ret < 0) {
- DRM_ERROR("failed to create id.\n");
- goto err_clear;
- }
- property->prop_id = ret;
-
- DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n",
- property->prop_id, property->cmd, ippdrv);
-
- /* stored property information and ippdrv in private data */
- c_node->property = *property;
- c_node->state = IPP_STATE_IDLE;
- c_node->filp = file;
-
- c_node->start_work = ipp_create_cmd_work();
- if (IS_ERR(c_node->start_work)) {
- DRM_ERROR("failed to create start work.\n");
- ret = PTR_ERR(c_node->start_work);
- goto err_remove_id;
- }
-
- c_node->stop_work = ipp_create_cmd_work();
- if (IS_ERR(c_node->stop_work)) {
- DRM_ERROR("failed to create stop work.\n");
- ret = PTR_ERR(c_node->stop_work);
- goto err_free_start;
- }
-
- c_node->event_work = ipp_create_event_work();
- if (IS_ERR(c_node->event_work)) {
- DRM_ERROR("failed to create event work.\n");
- ret = PTR_ERR(c_node->event_work);
- goto err_free_stop;
- }
-
- mutex_init(&c_node->lock);
- mutex_init(&c_node->mem_lock);
- mutex_init(&c_node->event_lock);
-
- init_completion(&c_node->start_complete);
- init_completion(&c_node->stop_complete);
-
- for_each_ipp_ops(i)
- INIT_LIST_HEAD(&c_node->mem_list[i]);
-
- INIT_LIST_HEAD(&c_node->event_list);
- mutex_lock(&ippdrv->cmd_lock);
- list_add_tail(&c_node->list, &ippdrv->cmd_list);
- mutex_unlock(&ippdrv->cmd_lock);
-
- /* make dedicated state without m2m */
- if (!ipp_is_m2m_cmd(property->cmd))
- ippdrv->dedicated = true;
-
- return 0;
-
-err_free_stop:
- kfree(c_node->stop_work);
-err_free_start:
- kfree(c_node->start_work);
-err_remove_id:
- ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
-err_clear:
- kfree(c_node);
- return ret;
-}
-
-static int ipp_validate_mem_node(struct drm_device *drm_dev,
- struct drm_exynos_ipp_mem_node *m_node,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct drm_exynos_ipp_config *ipp_cfg;
- unsigned int num_plane;
- unsigned long size, buf_size = 0, plane_size, img_size = 0;
- unsigned int bpp, width, height;
- int i;
-
- ipp_cfg = &c_node->property.config[m_node->ops_id];
- num_plane = drm_format_num_planes(ipp_cfg->fmt);
-
- /**
- * This is a rather simplified validation of a memory node.
- * It basically verifies provided gem object handles
- * and the buffer sizes with respect to current configuration.
- * This is not the best that can be done
- * but it seems more than enough
- */
- for (i = 0; i < num_plane; ++i) {
- width = ipp_cfg->sz.hsize;
- height = ipp_cfg->sz.vsize;
- bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
-
- /*
- * The result of drm_format_plane_cpp() for chroma planes must
- * be used with drm_format_xxxx_chroma_subsampling() for
- * correct result.
- */
- if (i > 0) {
- width /= drm_format_horz_chroma_subsampling(
- ipp_cfg->fmt);
- height /= drm_format_vert_chroma_subsampling(
- ipp_cfg->fmt);
- }
- plane_size = width * height * bpp;
- img_size += plane_size;
-
- if (m_node->buf_info.handles[i]) {
- size = exynos_drm_gem_get_size(drm_dev,
- m_node->buf_info.handles[i],
- c_node->filp);
- if (plane_size > size) {
- DRM_ERROR(
- "buffer %d is smaller than required\n",
- i);
- return -EINVAL;
- }
-
- buf_size += size;
- }
- }
-
- if (buf_size < img_size) {
- DRM_ERROR("size of buffers(%lu) is smaller than image(%lu)\n",
- buf_size, img_size);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ipp_put_mem_node(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_mem_node *m_node)
-{
- int i;
-
- DRM_DEBUG_KMS("node[%pK]\n", m_node);
-
- if (!m_node) {
- DRM_ERROR("invalid dequeue node.\n");
- return -EFAULT;
- }
-
- DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
-
- /* put gem buffer */
- for_each_ipp_planar(i) {
- unsigned long handle = m_node->buf_info.handles[i];
- if (handle)
- exynos_drm_gem_put_dma_addr(drm_dev, handle,
- c_node->filp);
- }
-
- list_del(&m_node->list);
- kfree(m_node);
-
- return 0;
-}
-
-static struct drm_exynos_ipp_mem_node
- *ipp_get_mem_node(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_buf_info *buf_info;
- int i;
-
- m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
- if (!m_node)
- return ERR_PTR(-ENOMEM);
-
- buf_info = &m_node->buf_info;
-
- /* operations, buffer id */
- m_node->ops_id = qbuf->ops_id;
- m_node->prop_id = qbuf->prop_id;
- m_node->buf_id = qbuf->buf_id;
- INIT_LIST_HEAD(&m_node->list);
-
- DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id);
- DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
-
- for_each_ipp_planar(i) {
- DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
-
- /* get dma address by handle */
- if (qbuf->handle[i]) {
- dma_addr_t *addr;
-
- addr = exynos_drm_gem_get_dma_addr(drm_dev,
- qbuf->handle[i], c_node->filp);
- if (IS_ERR(addr)) {
- DRM_ERROR("failed to get addr.\n");
- ipp_put_mem_node(drm_dev, c_node, m_node);
- return ERR_PTR(-EFAULT);
- }
-
- buf_info->handles[i] = qbuf->handle[i];
- buf_info->base[i] = *addr;
- DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
- &buf_info->base[i], buf_info->handles[i]);
- }
- }
-
- mutex_lock(&c_node->mem_lock);
- if (ipp_validate_mem_node(drm_dev, m_node, c_node)) {
- ipp_put_mem_node(drm_dev, c_node, m_node);
- mutex_unlock(&c_node->mem_lock);
- return ERR_PTR(-EFAULT);
- }
- list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
- mutex_unlock(&c_node->mem_lock);
-
- return m_node;
-}
-
-static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node, int ops)
-{
- struct drm_exynos_ipp_mem_node *m_node, *tm_node;
- struct list_head *head = &c_node->mem_list[ops];
-
- mutex_lock(&c_node->mem_lock);
-
- list_for_each_entry_safe(m_node, tm_node, head, list) {
- int ret;
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- }
-
- mutex_unlock(&c_node->mem_lock);
-}
-
-static int ipp_get_event(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_send_event *e;
- int ret;
-
- DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- /* make event */
- e->event.base.type = DRM_EXYNOS_IPP_EVENT;
- e->event.base.length = sizeof(e->event);
- e->event.user_data = qbuf->user_data;
- e->event.prop_id = qbuf->prop_id;
- e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
-
- ret = drm_event_reserve_init(drm_dev, c_node->filp, &e->base, &e->event.base);
- if (ret) {
- kfree(e);
- return ret;
- }
-
- mutex_lock(&c_node->event_lock);
- list_add_tail(&e->base.link, &c_node->event_list);
- mutex_unlock(&c_node->event_lock);
-
- return 0;
-}
-
-static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_send_event *e, *te;
- int count = 0;
-
- mutex_lock(&c_node->event_lock);
- list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
- DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e);
-
- /*
- * qbuf == NULL condition means all event deletion.
- * stop operations want to delete all event list.
- * another case delete only same buf id.
- */
- if (!qbuf) {
- /* delete list */
- list_del(&e->base.link);
- kfree(e);
- }
-
- /* compare buffer id */
- if (qbuf && (qbuf->buf_id ==
- e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
- /* delete list */
- list_del(&e->base.link);
- kfree(e);
- goto out_unlock;
- }
- }
-
-out_unlock:
- mutex_unlock(&c_node->event_lock);
- return;
-}
-
-static void ipp_clean_cmd_node(struct ipp_context *ctx,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- int i;
-
- /* cancel works */
- cancel_work_sync(&c_node->start_work->work);
- cancel_work_sync(&c_node->stop_work->work);
- cancel_work_sync(&c_node->event_work->work);
-
- /* put event */
- ipp_put_event(c_node, NULL);
-
- for_each_ipp_ops(i)
- ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
-
- /* delete list */
- list_del(&c_node->list);
-
- ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
- c_node->property.prop_id);
-
- /* destroy mutex */
- mutex_destroy(&c_node->lock);
- mutex_destroy(&c_node->mem_lock);
- mutex_destroy(&c_node->event_lock);
-
- /* free command node */
- kfree(c_node->start_work);
- kfree(c_node->stop_work);
- kfree(c_node->event_work);
- kfree(c_node);
-}
-
-static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
-{
- switch (c_node->property.cmd) {
- case IPP_CMD_WB:
- return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
- case IPP_CMD_OUTPUT:
- return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
- case IPP_CMD_M2M:
- default:
- return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
- !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
- }
-}
-
-static struct drm_exynos_ipp_mem_node
- *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_mem_node *m_node;
- struct list_head *head;
- int count = 0;
-
- DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
-
- /* source/destination memory list */
- head = &c_node->mem_list[qbuf->ops_id];
-
- /* find memory node from memory list */
- list_for_each_entry(m_node, head, list) {
- DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node);
-
- /* compare buffer id */
- if (m_node->buf_id == qbuf->buf_id)
- return m_node;
- }
-
- return NULL;
-}
-
-static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_mem_node *m_node)
-{
- struct exynos_drm_ipp_ops *ops = NULL;
- int ret = 0;
-
- DRM_DEBUG_KMS("node[%pK]\n", m_node);
-
- if (!m_node) {
- DRM_ERROR("invalid queue node.\n");
- return -EFAULT;
- }
-
- DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
-
- /* get operations callback */
- ops = ippdrv->ops[m_node->ops_id];
- if (!ops) {
- DRM_ERROR("not support ops.\n");
- return -EFAULT;
- }
-
- /* set address and enable irq */
- if (ops->set_addr) {
- ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
- m_node->buf_id, IPP_BUF_ENQUEUE);
- if (ret) {
- DRM_ERROR("failed to set addr.\n");
- return ret;
- }
- }
-
- return ret;
-}
-
-static void ipp_handle_cmd_work(struct device *dev,
- struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_work *cmd_work,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct ipp_context *ctx = get_ipp_context(dev);
-
- cmd_work->ippdrv = ippdrv;
- cmd_work->c_node = c_node;
- queue_work(ctx->cmd_workq, &cmd_work->work);
-}
-
-static int ipp_queue_buf_with_run(struct device *dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_mem_node *m_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_property *property;
- struct exynos_drm_ipp_ops *ops;
- int ret;
-
- ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("failed to get ipp driver.\n");
- return -EFAULT;
- }
-
- ops = ippdrv->ops[qbuf->ops_id];
- if (!ops) {
- DRM_ERROR("failed to get ops.\n");
- return -EFAULT;
- }
-
- property = &c_node->property;
-
- if (c_node->state != IPP_STATE_START) {
- DRM_DEBUG_KMS("bypass for invalid state.\n");
- return 0;
- }
-
- mutex_lock(&c_node->mem_lock);
- if (!ipp_check_mem_list(c_node)) {
- mutex_unlock(&c_node->mem_lock);
- DRM_DEBUG_KMS("empty memory.\n");
- return 0;
- }
-
- /*
- * If set destination buffer and enabled clock,
- * then m2m operations need start operations at queue_buf
- */
- if (ipp_is_m2m_cmd(property->cmd)) {
- struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
-
- cmd_work->ctrl = IPP_CTRL_PLAY;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
- } else {
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- mutex_unlock(&c_node->mem_lock);
- DRM_ERROR("failed to set m node.\n");
- return ret;
- }
- }
- mutex_unlock(&c_node->mem_lock);
-
- return 0;
-}
-
-static void ipp_clean_queue_buf(struct drm_device *drm_dev,
- struct drm_exynos_ipp_cmd_node *c_node,
- struct drm_exynos_ipp_queue_buf *qbuf)
-{
- struct drm_exynos_ipp_mem_node *m_node, *tm_node;
-
- /* delete list */
- mutex_lock(&c_node->mem_lock);
- list_for_each_entry_safe(m_node, tm_node,
- &c_node->mem_list[qbuf->ops_id], list) {
- if (m_node->buf_id == qbuf->buf_id &&
- m_node->ops_id == qbuf->ops_id)
- ipp_put_mem_node(drm_dev, c_node, m_node);
- }
- mutex_unlock(&c_node->mem_lock);
-}
-
-int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_queue_buf *qbuf = data;
- struct drm_exynos_ipp_cmd_node *c_node;
- struct drm_exynos_ipp_mem_node *m_node;
- int ret;
-
- if (!qbuf) {
- DRM_ERROR("invalid buf parameter.\n");
- return -EINVAL;
- }
-
- if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
- DRM_ERROR("invalid ops parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
- qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
- qbuf->buf_id, qbuf->buf_type);
-
- /* find command node */
- c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
- qbuf->prop_id);
- if (!c_node || c_node->filp != file) {
- DRM_ERROR("failed to get command node.\n");
- return -ENODEV;
- }
-
- /* buffer control */
- switch (qbuf->buf_type) {
- case IPP_BUF_ENQUEUE:
- /* get memory node */
- m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
- if (IS_ERR(m_node)) {
- DRM_ERROR("failed to get m_node.\n");
- return PTR_ERR(m_node);
- }
-
- /*
- * first step get event for destination buffer.
- * and second step when M2M case run with destination buffer
- * if needed.
- */
- if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
- /* get event for destination buffer */
- ret = ipp_get_event(drm_dev, c_node, qbuf);
- if (ret) {
- DRM_ERROR("failed to get event.\n");
- goto err_clean_node;
- }
-
- /*
- * M2M case run play control for streaming feature.
- * other case set address and waiting.
- */
- ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
- if (ret) {
- DRM_ERROR("failed to run command.\n");
- goto err_clean_node;
- }
- }
- break;
- case IPP_BUF_DEQUEUE:
- mutex_lock(&c_node->lock);
-
- /* put event for destination buffer */
- if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
- ipp_put_event(c_node, qbuf);
-
- ipp_clean_queue_buf(drm_dev, c_node, qbuf);
-
- mutex_unlock(&c_node->lock);
- break;
- default:
- DRM_ERROR("invalid buffer control.\n");
- return -EINVAL;
- }
-
- return 0;
-
-err_clean_node:
- DRM_ERROR("clean memory nodes.\n");
-
- ipp_clean_queue_buf(drm_dev, c_node, qbuf);
- return ret;
-}
-
-static bool exynos_drm_ipp_check_valid(struct device *dev,
- enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
-{
- if (ctrl != IPP_CTRL_PLAY) {
- if (pm_runtime_suspended(dev)) {
- DRM_ERROR("pm:runtime_suspended.\n");
- goto err_status;
- }
- }
-
- switch (ctrl) {
- case IPP_CTRL_PLAY:
- if (state != IPP_STATE_IDLE)
- goto err_status;
- break;
- case IPP_CTRL_STOP:
- if (state == IPP_STATE_STOP)
- goto err_status;
- break;
- case IPP_CTRL_PAUSE:
- if (state != IPP_STATE_START)
- goto err_status;
- break;
- case IPP_CTRL_RESUME:
- if (state != IPP_STATE_STOP)
- goto err_status;
- break;
- default:
- DRM_ERROR("invalid state.\n");
- goto err_status;
- }
-
- return true;
-
-err_status:
- DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
- return false;
-}
-
-int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ippdrv *ippdrv = NULL;
- struct device *dev = file_priv->ipp_dev;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
- struct drm_exynos_ipp_cmd_work *cmd_work;
- struct drm_exynos_ipp_cmd_node *c_node;
-
- if (!ctx) {
- DRM_ERROR("invalid context.\n");
- return -EINVAL;
- }
-
- if (!cmd_ctrl) {
- DRM_ERROR("invalid control parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
- cmd_ctrl->ctrl, cmd_ctrl->prop_id);
-
- ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("failed to get ipp driver.\n");
- return PTR_ERR(ippdrv);
- }
-
- c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
- cmd_ctrl->prop_id);
- if (!c_node || c_node->filp != file) {
- DRM_ERROR("invalid command node list.\n");
- return -ENODEV;
- }
-
- if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
- c_node->state)) {
- DRM_ERROR("invalid state.\n");
- return -EINVAL;
- }
-
- switch (cmd_ctrl->ctrl) {
- case IPP_CTRL_PLAY:
- if (pm_runtime_suspended(ippdrv->dev))
- pm_runtime_get_sync(ippdrv->dev);
-
- c_node->state = IPP_STATE_START;
-
- cmd_work = c_node->start_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
- break;
- case IPP_CTRL_STOP:
- cmd_work = c_node->stop_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
-
- if (!wait_for_completion_timeout(&c_node->stop_complete,
- msecs_to_jiffies(300))) {
- DRM_ERROR("timeout stop:prop_id[%d]\n",
- c_node->property.prop_id);
- }
-
- c_node->state = IPP_STATE_STOP;
- ippdrv->dedicated = false;
- mutex_lock(&ippdrv->cmd_lock);
- ipp_clean_cmd_node(ctx, c_node);
-
- if (list_empty(&ippdrv->cmd_list))
- pm_runtime_put_sync(ippdrv->dev);
- mutex_unlock(&ippdrv->cmd_lock);
- break;
- case IPP_CTRL_PAUSE:
- cmd_work = c_node->stop_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
-
- if (!wait_for_completion_timeout(&c_node->stop_complete,
- msecs_to_jiffies(200))) {
- DRM_ERROR("timeout stop:prop_id[%d]\n",
- c_node->property.prop_id);
- }
-
- c_node->state = IPP_STATE_STOP;
- break;
- case IPP_CTRL_RESUME:
- c_node->state = IPP_STATE_START;
- cmd_work = c_node->start_work;
- cmd_work->ctrl = cmd_ctrl->ctrl;
- ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
- break;
- default:
- DRM_ERROR("could not support this state currently.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
- cmd_ctrl->ctrl, cmd_ctrl->prop_id);
-
- return 0;
-}
-
-int exynos_drm_ippnb_register(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(
- &exynos_drm_ippnb_list, nb);
-}
-
-int exynos_drm_ippnb_unregister(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(
- &exynos_drm_ippnb_list, nb);
-}
-
-int exynos_drm_ippnb_send_event(unsigned long val, void *v)
-{
- return blocking_notifier_call_chain(
- &exynos_drm_ippnb_list, val, v);
-}
-
-static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_property *property)
-{
- struct exynos_drm_ipp_ops *ops = NULL;
- bool swap = false;
- int ret, i;
-
- if (!property) {
- DRM_ERROR("invalid property parameter.\n");
- return -EINVAL;
- }
-
- DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
-
- /* reset h/w block */
- if (ippdrv->reset &&
- ippdrv->reset(ippdrv->dev)) {
- return -EINVAL;
- }
-
- /* set source,destination operations */
- for_each_ipp_ops(i) {
- struct drm_exynos_ipp_config *config =
- &property->config[i];
-
- ops = ippdrv->ops[i];
- if (!ops || !config) {
- DRM_ERROR("not support ops and config.\n");
- return -EINVAL;
- }
-
- /* set format */
- if (ops->set_fmt) {
- ret = ops->set_fmt(ippdrv->dev, config->fmt);
- if (ret)
- return ret;
- }
-
- /* set transform for rotation, flip */
- if (ops->set_transf) {
- ret = ops->set_transf(ippdrv->dev, config->degree,
- config->flip, &swap);
- if (ret)
- return ret;
- }
-
- /* set size */
- if (ops->set_size) {
- ret = ops->set_size(ippdrv->dev, swap, &config->pos,
- &config->sz);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
-static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_property *property = &c_node->property;
- struct list_head *head;
- int ret, i;
-
- DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
-
- /* store command info in ippdrv */
- ippdrv->c_node = c_node;
-
- mutex_lock(&c_node->mem_lock);
- if (!ipp_check_mem_list(c_node)) {
- DRM_DEBUG_KMS("empty memory.\n");
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- /* set current property in ippdrv */
- ret = ipp_set_property(ippdrv, property);
- if (ret) {
- DRM_ERROR("failed to set property.\n");
- ippdrv->c_node = NULL;
- goto err_unlock;
- }
-
- /* check command */
- switch (property->cmd) {
- case IPP_CMD_M2M:
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- m_node = list_first_entry(head,
- struct drm_exynos_ipp_mem_node, list);
-
- DRM_DEBUG_KMS("m_node[%pK]\n", m_node);
-
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to set m node.\n");
- goto err_unlock;
- }
- }
- break;
- case IPP_CMD_WB:
- /* destination memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
-
- list_for_each_entry(m_node, head, list) {
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to set m node.\n");
- goto err_unlock;
- }
- }
- break;
- case IPP_CMD_OUTPUT:
- /* source memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
-
- list_for_each_entry(m_node, head, list) {
- ret = ipp_set_mem_node(ippdrv, c_node, m_node);
- if (ret) {
- DRM_ERROR("failed to set m node.\n");
- goto err_unlock;
- }
- }
- break;
- default:
- DRM_ERROR("invalid operations.\n");
- ret = -EINVAL;
- goto err_unlock;
- }
- mutex_unlock(&c_node->mem_lock);
-
- DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
-
- /* start operations */
- if (ippdrv->start) {
- ret = ippdrv->start(ippdrv->dev, property->cmd);
- if (ret) {
- DRM_ERROR("failed to start ops.\n");
- ippdrv->c_node = NULL;
- return ret;
- }
- }
-
- return 0;
-
-err_unlock:
- mutex_unlock(&c_node->mem_lock);
- ippdrv->c_node = NULL;
- return ret;
-}
-
-static int ipp_stop_property(struct drm_device *drm_dev,
- struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node)
-{
- struct drm_exynos_ipp_property *property = &c_node->property;
- int i;
-
- DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
-
- /* stop operations */
- if (ippdrv->stop)
- ippdrv->stop(ippdrv->dev, property->cmd);
-
- /* check command */
- switch (property->cmd) {
- case IPP_CMD_M2M:
- for_each_ipp_ops(i)
- ipp_clean_mem_nodes(drm_dev, c_node, i);
- break;
- case IPP_CMD_WB:
- ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
- break;
- case IPP_CMD_OUTPUT:
- ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
- break;
- default:
- DRM_ERROR("invalid operations.\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-void ipp_sched_cmd(struct work_struct *work)
-{
- struct drm_exynos_ipp_cmd_work *cmd_work =
- container_of(work, struct drm_exynos_ipp_cmd_work, work);
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- struct drm_exynos_ipp_property *property;
- int ret;
-
- ippdrv = cmd_work->ippdrv;
- if (!ippdrv) {
- DRM_ERROR("invalid ippdrv list.\n");
- return;
- }
-
- c_node = cmd_work->c_node;
- if (!c_node) {
- DRM_ERROR("invalid command node list.\n");
- return;
- }
-
- mutex_lock(&c_node->lock);
-
- property = &c_node->property;
-
- switch (cmd_work->ctrl) {
- case IPP_CTRL_PLAY:
- case IPP_CTRL_RESUME:
- ret = ipp_start_property(ippdrv, c_node);
- if (ret) {
- DRM_ERROR("failed to start property:prop_id[%d]\n",
- c_node->property.prop_id);
- goto err_unlock;
- }
-
- /*
- * M2M case supports wait_completion of transfer.
- * because M2M case supports single unit operation
- * with multiple queue.
- * M2M need to wait completion of data transfer.
- */
- if (ipp_is_m2m_cmd(property->cmd)) {
- if (!wait_for_completion_timeout
- (&c_node->start_complete, msecs_to_jiffies(200))) {
- DRM_ERROR("timeout event:prop_id[%d]\n",
- c_node->property.prop_id);
- goto err_unlock;
- }
- }
- break;
- case IPP_CTRL_STOP:
- case IPP_CTRL_PAUSE:
- ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
- c_node);
- if (ret) {
- DRM_ERROR("failed to stop property.\n");
- goto err_unlock;
- }
-
- complete(&c_node->stop_complete);
- break;
- default:
- DRM_ERROR("unknown control type\n");
- break;
- }
-
- DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
-
-err_unlock:
- mutex_unlock(&c_node->lock);
-}
-
-static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
- struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
-{
- struct drm_device *drm_dev = ippdrv->drm_dev;
- struct drm_exynos_ipp_property *property = &c_node->property;
- struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_queue_buf qbuf;
- struct drm_exynos_ipp_send_event *e;
- struct list_head *head;
- struct timeval now;
- u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
- int ret, i;
-
- for_each_ipp_ops(i)
- DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
-
- if (!drm_dev) {
- DRM_ERROR("failed to get drm_dev.\n");
- return -EINVAL;
- }
-
- if (!property) {
- DRM_ERROR("failed to get property.\n");
- return -EINVAL;
- }
-
- mutex_lock(&c_node->event_lock);
- if (list_empty(&c_node->event_list)) {
- DRM_DEBUG_KMS("event list is empty.\n");
- ret = 0;
- goto err_event_unlock;
- }
-
- mutex_lock(&c_node->mem_lock);
- if (!ipp_check_mem_list(c_node)) {
- DRM_DEBUG_KMS("empty memory.\n");
- ret = 0;
- goto err_mem_unlock;
- }
-
- /* check command */
- switch (property->cmd) {
- case IPP_CMD_M2M:
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- m_node = list_first_entry(head,
- struct drm_exynos_ipp_mem_node, list);
-
- tbuf_id[i] = m_node->buf_id;
- DRM_DEBUG_KMS("%s buf_id[%d]\n",
- i ? "dst" : "src", tbuf_id[i]);
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- }
- break;
- case IPP_CMD_WB:
- /* clear buf for finding */
- memset(&qbuf, 0x0, sizeof(qbuf));
- qbuf.ops_id = EXYNOS_DRM_OPS_DST;
- qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
-
- /* get memory node entry */
- m_node = ipp_find_mem_node(c_node, &qbuf);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
-
- tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- break;
- case IPP_CMD_OUTPUT:
- /* source memory list */
- head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
-
- m_node = list_first_entry(head,
- struct drm_exynos_ipp_mem_node, list);
-
- tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
-
- ret = ipp_put_mem_node(drm_dev, c_node, m_node);
- if (ret)
- DRM_ERROR("failed to put m_node.\n");
- break;
- default:
- DRM_ERROR("invalid operations.\n");
- ret = -EINVAL;
- goto err_mem_unlock;
- }
- mutex_unlock(&c_node->mem_lock);
-
- if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
- DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
- tbuf_id[1], buf_id[1], property->prop_id);
-
- /*
- * command node have event list of destination buffer
- * If destination buffer enqueue to mem list,
- * then we make event and link to event list tail.
- * so, we get first event for first enqueued buffer.
- */
- e = list_first_entry(&c_node->event_list,
- struct drm_exynos_ipp_send_event, base.link);
-
- do_gettimeofday(&now);
- DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- e->event.prop_id = property->prop_id;
-
- /* set buffer id about source destination */
- for_each_ipp_ops(i)
- e->event.buf_id[i] = tbuf_id[i];
-
- drm_send_event(drm_dev, &e->base);
- mutex_unlock(&c_node->event_lock);
-
- DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
- property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
-
- return 0;
-
-err_mem_unlock:
- mutex_unlock(&c_node->mem_lock);
-err_event_unlock:
- mutex_unlock(&c_node->event_lock);
- return ret;
-}
-
-void ipp_sched_event(struct work_struct *work)
-{
- struct drm_exynos_ipp_event_work *event_work =
- container_of(work, struct drm_exynos_ipp_event_work, work);
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- int ret;
-
- if (!event_work) {
- DRM_ERROR("failed to get event_work.\n");
- return;
- }
-
- DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
-
- ippdrv = event_work->ippdrv;
- if (!ippdrv) {
- DRM_ERROR("failed to get ipp driver.\n");
- return;
- }
-
- c_node = ippdrv->c_node;
- if (!c_node) {
- DRM_ERROR("failed to get command node.\n");
- return;
- }
-
- /*
- * IPP supports command thread, event thread synchronization.
- * If IPP close immediately from user land, then IPP make
- * synchronization with command thread, so make complete event.
- * or going out operations.
- */
- if (c_node->state != IPP_STATE_START) {
- DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
- c_node->state, c_node->property.prop_id);
- goto err_completion;
- }
-
- ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
- if (ret) {
- DRM_ERROR("failed to send event.\n");
- goto err_completion;
- }
-
-err_completion:
- if (ipp_is_m2m_cmd(c_node->property.cmd))
- complete(&c_node->start_complete);
-}
-
-static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- struct ipp_context *ctx = get_ipp_context(dev);
- struct exynos_drm_ippdrv *ippdrv;
- int ret, count = 0;
-
- /* get ipp driver entry */
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- ippdrv->drm_dev = drm_dev;
-
- ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
- if (ret < 0) {
- DRM_ERROR("failed to create id.\n");
- goto err;
- }
- ippdrv->prop_list.ipp_id = ret;
-
- DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n",
- count++, ippdrv, ret);
-
- /* store parent device for node */
- ippdrv->parent_dev = dev;
-
- /* store event work queue and handler */
- ippdrv->event_workq = ctx->event_workq;
- ippdrv->sched_event = ipp_sched_event;
- INIT_LIST_HEAD(&ippdrv->cmd_list);
- mutex_init(&ippdrv->cmd_lock);
-
- ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
- if (ret) {
- DRM_ERROR("failed to activate iommu\n");
- goto err;
- }
- }
-
- return 0;
-
-err:
- /* get ipp driver entry */
- list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
- drv_list) {
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
-
- ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
- ippdrv->prop_list.ipp_id);
- }
-
- return ret;
-}
-
-static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- struct exynos_drm_ippdrv *ippdrv, *t;
- struct ipp_context *ctx = get_ipp_context(dev);
-
- /* get ipp driver entry */
- list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
- drm_iommu_detach_device(drm_dev, ippdrv->dev);
-
- ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
- ippdrv->prop_list.ipp_id);
-
- ippdrv->drm_dev = NULL;
- exynos_drm_ippdrv_unregister(ippdrv);
- }
-}
-
-static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
-
- file_priv->ipp_dev = dev;
-
- DRM_DEBUG_KMS("done priv[%pK]\n", dev);
-
- return 0;
-}
-
-static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
-{
- struct exynos_drm_ippdrv *ippdrv = NULL;
- struct ipp_context *ctx = get_ipp_context(dev);
- struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
- int count = 0;
-
- list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- mutex_lock(&ippdrv->cmd_lock);
- list_for_each_entry_safe(c_node, tc_node,
- &ippdrv->cmd_list, list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n",
- count++, ippdrv);
-
- if (c_node->filp == file) {
- /*
- * userland goto unnormal state. process killed.
- * and close the file.
- * so, IPP didn't called stop cmd ctrl.
- * so, we are make stop operation in this state.
- */
- if (c_node->state == IPP_STATE_START) {
- ipp_stop_property(drm_dev, ippdrv,
- c_node);
- c_node->state = IPP_STATE_STOP;
- }
-
- ippdrv->dedicated = false;
- ipp_clean_cmd_node(ctx, c_node);
- if (list_empty(&ippdrv->cmd_list))
- pm_runtime_put_sync(ippdrv->dev);
- }
- }
- mutex_unlock(&ippdrv->cmd_lock);
- }
-
- return;
-}
-
-static int ipp_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct ipp_context *ctx;
- struct exynos_drm_subdrv *subdrv;
- int ret;
-
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- mutex_init(&ctx->ipp_lock);
- mutex_init(&ctx->prop_lock);
-
- idr_init(&ctx->ipp_idr);
- idr_init(&ctx->prop_idr);
-
- /*
- * create single thread for ipp event
- * IPP supports event thread for IPP drivers.
- * IPP driver send event_work to this thread.
- * and IPP event thread send event to user process.
- */
- ctx->event_workq = create_singlethread_workqueue("ipp_event");
- if (!ctx->event_workq) {
- dev_err(dev, "failed to create event workqueue\n");
- return -EINVAL;
- }
-
- /*
- * create single thread for ipp command
- * IPP supports command thread for user process.
- * user process make command node using set property ioctl.
- * and make start_work and send this work to command thread.
- * and then this command thread start property.
- */
- ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
- if (!ctx->cmd_workq) {
- dev_err(dev, "failed to create cmd workqueue\n");
- ret = -EINVAL;
- goto err_event_workq;
- }
-
- /* set sub driver informations */
- subdrv = &ctx->subdrv;
- subdrv->dev = dev;
- subdrv->probe = ipp_subdrv_probe;
- subdrv->remove = ipp_subdrv_remove;
- subdrv->open = ipp_subdrv_open;
- subdrv->close = ipp_subdrv_close;
-
- platform_set_drvdata(pdev, ctx);
-
- ret = exynos_drm_subdrv_register(subdrv);
- if (ret < 0) {
- DRM_ERROR("failed to register drm ipp device.\n");
- goto err_cmd_workq;
- }
-
- dev_info(dev, "drm ipp registered successfully.\n");
-
- return 0;
-
-err_cmd_workq:
- destroy_workqueue(ctx->cmd_workq);
-err_event_workq:
- destroy_workqueue(ctx->event_workq);
- return ret;
-}
-
-static int ipp_remove(struct platform_device *pdev)
-{
- struct ipp_context *ctx = platform_get_drvdata(pdev);
-
- /* unregister sub driver */
- exynos_drm_subdrv_unregister(&ctx->subdrv);
-
- /* remove,destroy ipp idr */
- idr_destroy(&ctx->ipp_idr);
- idr_destroy(&ctx->prop_idr);
-
- mutex_destroy(&ctx->ipp_lock);
- mutex_destroy(&ctx->prop_lock);
-
- /* destroy command, event work queue */
- destroy_workqueue(ctx->cmd_workq);
- destroy_workqueue(ctx->event_workq);
-
- return 0;
-}
-
-struct platform_driver ipp_driver = {
- .probe = ipp_probe,
- .remove = ipp_remove,
- .driver = {
- .name = "exynos-drm-ipp",
- .owner = THIS_MODULE,
- },
-};
-
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
deleted file mode 100644
index 2a61547a39d0..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Eunchul Kim <chulspro.kim@samsung.com>
- * Jinyoung Jeon <jy0.jeon@samsung.com>
- * Sangmin Lee <lsmin.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IPP_H_
-#define _EXYNOS_DRM_IPP_H_
-
-#define for_each_ipp_ops(pos) \
- for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
-#define for_each_ipp_planar(pos) \
- for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
-
-#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
-#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
-#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
-
-/* definition of state */
-enum drm_exynos_ipp_state {
- IPP_STATE_IDLE,
- IPP_STATE_START,
- IPP_STATE_STOP,
-};
-
-/*
- * A structure of command work information.
- * @work: work structure.
- * @ippdrv: current work ippdrv.
- * @c_node: command node information.
- * @ctrl: command control.
- */
-struct drm_exynos_ipp_cmd_work {
- struct work_struct work;
- struct exynos_drm_ippdrv *ippdrv;
- struct drm_exynos_ipp_cmd_node *c_node;
- enum drm_exynos_ipp_ctrl ctrl;
-};
-
-/*
- * A structure of command node.
- *
- * @list: list head to command queue information.
- * @event_list: list head of event.
- * @mem_list: list head to source,destination memory queue information.
- * @lock: lock for synchronization of access to ioctl.
- * @mem_lock: lock for synchronization of access to memory nodes.
- * @event_lock: lock for synchronization of access to scheduled event.
- * @start_complete: completion of start of command.
- * @stop_complete: completion of stop of command.
- * @property: property information.
- * @start_work: start command work structure.
- * @stop_work: stop command work structure.
- * @event_work: event work structure.
- * @state: state of command node.
- * @filp: associated file pointer.
- */
-struct drm_exynos_ipp_cmd_node {
- struct list_head list;
- struct list_head event_list;
- struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
- struct mutex lock;
- struct mutex mem_lock;
- struct mutex event_lock;
- struct completion start_complete;
- struct completion stop_complete;
- struct drm_exynos_ipp_property property;
- struct drm_exynos_ipp_cmd_work *start_work;
- struct drm_exynos_ipp_cmd_work *stop_work;
- struct drm_exynos_ipp_event_work *event_work;
- enum drm_exynos_ipp_state state;
- struct drm_file *filp;
-};
-
-/*
- * A structure of buffer information.
- *
- * @handles: Y, Cb, Cr each gem object handle.
- * @base: Y, Cb, Cr each planar address.
- */
-struct drm_exynos_ipp_buf_info {
- unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
- dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
-};
-
-/*
- * A structure of wb setting information.
- *
- * @enable: enable flag for wb.
- * @refresh: HZ of the refresh rate.
- */
-struct drm_exynos_ipp_set_wb {
- __u32 enable;
- __u32 refresh;
-};
-
-/*
- * A structure of event work information.
- *
- * @work: work structure.
- * @ippdrv: current work ippdrv.
- * @buf_id: id of src, dst buffer.
- */
-struct drm_exynos_ipp_event_work {
- struct work_struct work;
- struct exynos_drm_ippdrv *ippdrv;
- u32 buf_id[EXYNOS_DRM_OPS_MAX];
-};
-
-/*
- * A structure of source,destination operations.
- *
- * @set_fmt: set format of image.
- * @set_transf: set transform(rotations, flip).
- * @set_size: set size of region.
- * @set_addr: set address for dma.
- */
-struct exynos_drm_ipp_ops {
- int (*set_fmt)(struct device *dev, u32 fmt);
- int (*set_transf)(struct device *dev,
- enum drm_exynos_degree degree,
- enum drm_exynos_flip flip, bool *swap);
- int (*set_size)(struct device *dev, int swap,
- struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
- int (*set_addr)(struct device *dev,
- struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
- enum drm_exynos_ipp_buf_type buf_type);
-};
-
-/*
- * A structure of ipp driver.
- *
- * @drv_list: list head for registed sub driver information.
- * @parent_dev: parent device information.
- * @dev: platform device.
- * @drm_dev: drm device.
- * @dedicated: dedicated ipp device.
- * @ops: source, destination operations.
- * @event_workq: event work queue.
- * @c_node: current command information.
- * @cmd_list: list head for command information.
- * @cmd_lock: lock for synchronization of access to cmd_list.
- * @prop_list: property informations of current ipp driver.
- * @check_property: check property about format, size, buffer.
- * @reset: reset ipp block.
- * @start: ipp each device start.
- * @stop: ipp each device stop.
- * @sched_event: work schedule handler.
- */
-struct exynos_drm_ippdrv {
- struct list_head drv_list;
- struct device *parent_dev;
- struct device *dev;
- struct drm_device *drm_dev;
- bool dedicated;
- struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
- struct workqueue_struct *event_workq;
- struct drm_exynos_ipp_cmd_node *c_node;
- struct list_head cmd_list;
- struct mutex cmd_lock;
- struct drm_exynos_ipp_prop_list prop_list;
-
- int (*check_property)(struct device *dev,
- struct drm_exynos_ipp_property *property);
- int (*reset)(struct device *dev);
- int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
- void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
- void (*sched_event)(struct work_struct *work);
-};
-
-#ifdef CONFIG_DRM_EXYNOS_IPP
-extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
-extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
-extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
- struct drm_file *file);
-extern int exynos_drm_ippnb_register(struct notifier_block *nb);
-extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
-extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
-extern void ipp_sched_cmd(struct work_struct *work);
-extern void ipp_sched_event(struct work_struct *work);
-
-#else
-static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file_priv)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
- void *data,
- struct drm_file *file)
-{
- return -ENOTTY;
-}
-
-static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
-{
- return -ENODEV;
-}
-
-static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
-{
- return -ENOTTY;
-}
-#endif
-
-#endif /* _EXYNOS_DRM_IPP_H_ */
-
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
new file mode 100644
index 000000000000..19ad9e47945e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co.Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundationr
+ */
+
+#ifndef EXYNOS_REGS_DECON5433_H
+#define EXYNOS_REGS_DECON5433_H
+
+/* Exynos543X DECON */
+#define DECON_VIDCON0 0x0000
+#define DECON_VIDOUTCON0 0x0010
+#define DECON_WINCONx(n) (0x0020 + ((n) * 4))
+#define DECON_VIDOSDxH(n) (0x0080 + ((n) * 4))
+#define DECON_SHADOWCON 0x00A0
+#define DECON_VIDOSDxA(n) (0x00B0 + ((n) * 0x20))
+#define DECON_VIDOSDxB(n) (0x00B4 + ((n) * 0x20))
+#define DECON_VIDOSDxC(n) (0x00B8 + ((n) * 0x20))
+#define DECON_VIDOSDxD(n) (0x00BC + ((n) * 0x20))
+#define DECON_VIDOSDxE(n) (0x00C0 + ((n) * 0x20))
+#define DECON_VIDW0xADD0B0(n) (0x0150 + ((n) * 0x10))
+#define DECON_VIDW0xADD0B1(n) (0x0154 + ((n) * 0x10))
+#define DECON_VIDW0xADD0B2(n) (0x0158 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B0(n) (0x01A0 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B1(n) (0x01A4 + ((n) * 0x10))
+#define DECON_VIDW0xADD1B2(n) (0x01A8 + ((n) * 0x10))
+#define DECON_VIDW0xADD2(n) (0x0200 + ((n) * 4))
+#define DECON_LOCALxSIZE(n) (0x0214 + ((n) * 4))
+#define DECON_VIDINTCON0 0x0220
+#define DECON_VIDINTCON1 0x0224
+#define DECON_WxKEYCON0(n) (0x0230 + ((n - 1) * 8))
+#define DECON_WxKEYCON1(n) (0x0234 + ((n - 1) * 8))
+#define DECON_WxKEYALPHA(n) (0x0250 + ((n - 1) * 4))
+#define DECON_WINxMAP(n) (0x0270 + ((n) * 4))
+#define DECON_QOSLUT07_00 0x02C0
+#define DECON_QOSLUT15_08 0x02C4
+#define DECON_QOSCTRL 0x02C8
+#define DECON_BLENDERQx(n) (0x0300 + ((n - 1) * 4))
+#define DECON_BLENDCON 0x0310
+#define DECON_OPE_VIDW0xADD0(n) (0x0400 + ((n) * 4))
+#define DECON_OPE_VIDW0xADD1(n) (0x0414 + ((n) * 4))
+#define DECON_FRAMEFIFO_REG7 0x051C
+#define DECON_FRAMEFIFO_REG8 0x0520
+#define DECON_FRAMEFIFO_STATUS 0x0524
+#define DECON_CMU 0x1404
+#define DECON_UPDATE 0x1410
+#define DECON_CRFMID 0x1414
+#define DECON_UPDATE_SCHEME 0x1438
+#define DECON_VIDCON1 0x2000
+#define DECON_VIDCON2 0x2004
+#define DECON_VIDCON3 0x2008
+#define DECON_VIDCON4 0x200C
+#define DECON_VIDTCON2 0x2028
+#define DECON_FRAME_SIZE 0x2038
+#define DECON_LINECNT_OP_THRESHOLD 0x203C
+#define DECON_TRIGCON 0x2040
+#define DECON_TRIGSKIP 0x2050
+#define DECON_CRCRDATA 0x20B0
+#define DECON_CRCCTRL 0x20B4
+
+/* Exynos5430 DECON */
+#define DECON_VIDTCON0 0x2020
+#define DECON_VIDTCON1 0x2024
+
+/* Exynos5433 DECON */
+#define DECON_VIDTCON00 0x2010
+#define DECON_VIDTCON01 0x2014
+#define DECON_VIDTCON10 0x2018
+#define DECON_VIDTCON11 0x201C
+
+/* Exynos543X DECON Internal */
+#define DECON_W013DSTREOCON 0x0320
+#define DECON_W233DSTREOCON 0x0324
+#define DECON_FRAMEFIFO_REG0 0x0500
+#define DECON_ENHANCER_CTRL 0x2100
+
+/* Exynos543X DECON TV */
+#define DECON_VCLKCON0 0x0014
+#define DECON_VIDINTCON2 0x0228
+#define DECON_VIDINTCON3 0x022C
+
+/* VIDCON0 */
+#define VIDCON0_SWRESET (1 << 28)
+#define VIDCON0_CLKVALUP (1 << 14)
+#define VIDCON0_VLCKFREE (1 << 5)
+#define VIDCON0_STOP_STATUS (1 << 2)
+#define VIDCON0_ENVID (1 << 1)
+#define VIDCON0_ENVID_F (1 << 0)
+
+/* VIDOUTCON0 */
+#define VIDOUT_INTERLACE_FIELD_F (1 << 29)
+#define VIDOUT_INTERLACE_EN_F (1 << 28)
+#define VIDOUT_LCD_ON (1 << 24)
+#define VIDOUT_IF_F_MASK (0x3 << 20)
+#define VIDOUT_RGB_IF (0x0 << 20)
+#define VIDOUT_COMMAND_IF (0x2 << 20)
+
+/* WINCONx */
+#define WINCONx_HAWSWP_F (1 << 16)
+#define WINCONx_WSWP_F (1 << 15)
+#define WINCONx_BURSTLEN_MASK (0x3 << 10)
+#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
+#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
+#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
+#define WINCONx_BLD_PIX_F (1 << 6)
+#define WINCONx_BPPMODE_MASK (0xf << 2)
+#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
+#define WINCONx_BPPMODE_16BPP_A1555 (0x6 << 2)
+#define WINCONx_BPPMODE_16BPP_I1555 (0x7 << 2)
+#define WINCONx_BPPMODE_24BPP_888 (0xb << 2)
+#define WINCONx_BPPMODE_24BPP_A1887 (0xc << 2)
+#define WINCONx_BPPMODE_25BPP_A1888 (0xd << 2)
+#define WINCONx_BPPMODE_32BPP_A8888 (0xd << 2)
+#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
+#define WINCONx_ALPHA_SEL_F (1 << 1)
+#define WINCONx_ENWIN_F (1 << 0)
+
+/* SHADOWCON */
+#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
+#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
+
+/* VIDOSDxD */
+#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
+#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
+#define VIDOSD_Wx_ALPHA_B_F(n) (((n) & 0xff) << 0)
+
+/* VIDINTCON0 */
+#define VIDINTCON0_FRAMEDONE (1 << 17)
+#define VIDINTCON0_FRAMESEL_BP (0 << 15)
+#define VIDINTCON0_FRAMESEL_VS (1 << 15)
+#define VIDINTCON0_FRAMESEL_AC (2 << 15)
+#define VIDINTCON0_FRAMESEL_FP (3 << 15)
+#define VIDINTCON0_INTFRMEN (1 << 12)
+#define VIDINTCON0_INTEN (1 << 0)
+
+/* VIDINTCON1 */
+#define VIDINTCON1_INTFRMDONEPEND (1 << 2)
+#define VIDINTCON1_INTFRMPEND (1 << 1)
+#define VIDINTCON1_INTFIFOPEND (1 << 0)
+
+/* DECON_CMU */
+#define CMU_CLKGAGE_MODE_SFR_F (1 << 1)
+#define CMU_CLKGAGE_MODE_MEM_F (1 << 0)
+
+/* DECON_UPDATE */
+#define STANDALONE_UPDATE_F (1 << 0)
+
+/* DECON_VIDCON1 */
+#define VIDCON1_LINECNT_MASK (0x0fff << 16)
+#define VIDCON1_I80_ACTIVE (1 << 15)
+#define VIDCON1_VSTATUS_MASK (0x3 << 13)
+#define VIDCON1_VSTATUS_VS (0 << 13)
+#define VIDCON1_VSTATUS_BP (1 << 13)
+#define VIDCON1_VSTATUS_AC (2 << 13)
+#define VIDCON1_VSTATUS_FP (3 << 13)
+#define VIDCON1_VCLK_MASK (0x3 << 9)
+#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
+#define VIDCON1_VCLK_HOLD (0x0 << 9)
+#define VIDCON1_VCLK_RUN (0x1 << 9)
+
+
+/* DECON_VIDTCON00 */
+#define VIDTCON00_VBPD_F(x) (((x) & 0xfff) << 16)
+#define VIDTCON00_VFPD_F(x) ((x) & 0xfff)
+
+/* DECON_VIDTCON01 */
+#define VIDTCON01_VSPW_F(x) (((x) & 0xfff) << 16)
+
+/* DECON_VIDTCON10 */
+#define VIDTCON10_HBPD_F(x) (((x) & 0xfff) << 16)
+#define VIDTCON10_HFPD_F(x) ((x) & 0xfff)
+
+/* DECON_VIDTCON11 */
+#define VIDTCON11_HSPW_F(x) (((x) & 0xfff) << 16)
+
+/* DECON_VIDTCON2 */
+#define VIDTCON2_LINEVAL(x) (((x) & 0xfff) << 16)
+#define VIDTCON2_HOZVAL(x) ((x) & 0xfff)
+
+/* TRIGCON */
+#define TRIGCON_TRIGEN_PER_F (1 << 31)
+#define TRIGCON_TRIGEN_F (1 << 30)
+#define TRIGCON_TE_AUTO_MASK (1 << 29)
+#define TRIGCON_WB_SWTRIGCMD (1 << 28)
+#define TRIGCON_SWTRIGCMD_W4BUF (1 << 26)
+#define TRIGCON_TRIGMODE_W4BUF (1 << 25)
+#define TRIGCON_SWTRIGCMD_W3BUF (1 << 21)
+#define TRIGCON_TRIGMODE_W3BUF (1 << 20)
+#define TRIGCON_SWTRIGCMD_W2BUF (1 << 16)
+#define TRIGCON_TRIGMODE_W2BUF (1 << 15)
+#define TRIGCON_SWTRIGCMD_W1BUF (1 << 11)
+#define TRIGCON_TRIGMODE_W1BUF (1 << 10)
+#define TRIGCON_SWTRIGCMD_W0BUF (1 << 6)
+#define TRIGCON_TRIGMODE_W0BUF (1 << 5)
+#define TRIGCON_HWTRIGMASK (1 << 4)
+#define TRIGCON_HWTRIGEN (1 << 3)
+#define TRIGCON_HWTRIG_INV (1 << 2)
+#define TRIGCON_SWTRIGCMD (1 << 1)
+#define TRIGCON_SWTRIGEN (1 << 0)
+
+/* DECON_CRCCTRL */
+#define CRCCTRL_CRCCLKEN (0x1 << 2)
+#define CRCCTRL_CRCSTART_F (0x1 << 1)
+#define CRCCTRL_CRCEN (0x1 << 0)
+#define CRCCTRL_MASK (0x7)
+
+#endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-decon7.h b/drivers/gpu/drm/exynos/regs-decon7.h
new file mode 100644
index 000000000000..5df7765d2397
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-decon7.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Ajay Kumar <ajaykumar.rs@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef EXYNOS_REGS_DECON7_H
+#define EXYNOS_REGS_DECON7_H
+
+/* VIDCON0 */
+#define VIDCON0 0x00
+
+#define VIDCON0_SWRESET (1 << 28)
+#define VIDCON0_DECON_STOP_STATUS (1 << 2)
+#define VIDCON0_ENVID (1 << 1)
+#define VIDCON0_ENVID_F (1 << 0)
+
+/* VIDOUTCON0 */
+#define VIDOUTCON0 0x4
+
+#define VIDOUTCON0_DUAL_MASK (0x3 << 24)
+#define VIDOUTCON0_DUAL_ON (0x3 << 24)
+#define VIDOUTCON0_DISP_IF_1_ON (0x2 << 24)
+#define VIDOUTCON0_DISP_IF_0_ON (0x1 << 24)
+#define VIDOUTCON0_DUAL_OFF (0x0 << 24)
+#define VIDOUTCON0_IF_SHIFT 23
+#define VIDOUTCON0_IF_MASK (0x1 << 23)
+#define VIDOUTCON0_RGBIF (0x0 << 23)
+#define VIDOUTCON0_I80IF (0x1 << 23)
+
+/* VIDCON3 */
+#define VIDCON3 0x8
+
+/* VIDCON4 */
+#define VIDCON4 0xC
+#define VIDCON4_FIFOCNT_START_EN (1 << 0)
+
+/* VCLKCON0 */
+#define VCLKCON0 0x10
+#define VCLKCON0_CLKVALUP (1 << 8)
+#define VCLKCON0_VCLKFREE (1 << 0)
+
+/* VCLKCON */
+#define VCLKCON1 0x14
+#define VCLKCON1_CLKVAL_NUM_VCLK(val) (((val) & 0xff) << 0)
+#define VCLKCON2 0x18
+
+/* SHADOWCON */
+#define SHADOWCON 0x30
+
+#define SHADOWCON_WINx_PROTECT(_win) (1 << (10 + (_win)))
+
+/* WINCONx */
+#define WINCON(_win) (0x50 + ((_win) * 4))
+
+#define WINCONx_BUFSTATUS (0x3 << 30)
+#define WINCONx_BUFSEL_MASK (0x3 << 28)
+#define WINCONx_BUFSEL_SHIFT 28
+#define WINCONx_TRIPLE_BUF_MODE (0x1 << 18)
+#define WINCONx_DOUBLE_BUF_MODE (0x0 << 18)
+#define WINCONx_BURSTLEN_16WORD (0x0 << 11)
+#define WINCONx_BURSTLEN_8WORD (0x1 << 11)
+#define WINCONx_BURSTLEN_MASK (0x1 << 11)
+#define WINCONx_BURSTLEN_SHIFT 11
+#define WINCONx_BLD_PLANE (0 << 8)
+#define WINCONx_BLD_PIX (1 << 8)
+#define WINCONx_ALPHA_MUL (1 << 7)
+
+#define WINCONx_BPPMODE_MASK (0xf << 2)
+#define WINCONx_BPPMODE_SHIFT 2
+#define WINCONx_BPPMODE_16BPP_565 (0x8 << 2)
+#define WINCONx_BPPMODE_24BPP_BGRx (0x7 << 2)
+#define WINCONx_BPPMODE_24BPP_RGBx (0x6 << 2)
+#define WINCONx_BPPMODE_24BPP_xBGR (0x5 << 2)
+#define WINCONx_BPPMODE_24BPP_xRGB (0x4 << 2)
+#define WINCONx_BPPMODE_32BPP_BGRA (0x3 << 2)
+#define WINCONx_BPPMODE_32BPP_RGBA (0x2 << 2)
+#define WINCONx_BPPMODE_32BPP_ABGR (0x1 << 2)
+#define WINCONx_BPPMODE_32BPP_ARGB (0x0 << 2)
+#define WINCONx_ALPHA_SEL (1 << 1)
+#define WINCONx_ENWIN (1 << 0)
+
+#define WINCON1_ALPHA_MUL_F (1 << 7)
+#define WINCON2_ALPHA_MUL_F (1 << 7)
+#define WINCON3_ALPHA_MUL_F (1 << 7)
+#define WINCON4_ALPHA_MUL_F (1 << 7)
+
+/* VIDOSDxH: The height for the OSD image(READ ONLY)*/
+#define VIDOSD_H(_x) (0x80 + ((_x) * 4))
+
+/* Frame buffer start addresses: VIDWxxADD0n */
+#define VIDW_BUF_START(_win) (0x80 + ((_win) * 0x10))
+#define VIDW_BUF_START1(_win) (0x84 + ((_win) * 0x10))
+#define VIDW_BUF_START2(_win) (0x88 + ((_win) * 0x10))
+
+#define VIDW_WHOLE_X(_win) (0x0130 + ((_win) * 8))
+#define VIDW_WHOLE_Y(_win) (0x0134 + ((_win) * 8))
+#define VIDW_OFFSET_X(_win) (0x0170 + ((_win) * 8))
+#define VIDW_OFFSET_Y(_win) (0x0174 + ((_win) * 8))
+#define VIDW_BLKOFFSET(_win) (0x01B0 + ((_win) * 4))
+#define VIDW_BLKSIZE(win) (0x0200 + ((_win) * 4))
+
+/* Interrupt controls register */
+#define VIDINTCON2 0x228
+
+#define VIDINTCON1_INTEXTRA1_EN (1 << 1)
+#define VIDINTCON1_INTEXTRA0_EN (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON3 0x22C
+
+#define VIDINTCON1_INTEXTRA1_PEND (1 << 1)
+#define VIDINTCON1_INTEXTRA0_PEND (1 << 0)
+
+/* VIDOSDxA ~ VIDOSDxE */
+#define VIDOSD_BASE 0x230
+
+#define OSD_STRIDE 0x20
+
+#define VIDOSD_A(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x00)
+#define VIDOSD_B(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x04)
+#define VIDOSD_C(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x08)
+#define VIDOSD_D(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x0C)
+#define VIDOSD_E(_win) (VIDOSD_BASE + \
+ ((_win) * OSD_STRIDE) + 0x10)
+
+#define VIDOSDxA_TOPLEFT_X_MASK (0x1fff << 13)
+#define VIDOSDxA_TOPLEFT_X_SHIFT 13
+#define VIDOSDxA_TOPLEFT_X_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxA_TOPLEFT_Y_MASK (0x1fff << 0)
+#define VIDOSDxA_TOPLEFT_Y_SHIFT 0
+#define VIDOSDxA_TOPLEFT_Y_LIMIT 0x1fff
+#define VIDOSDxA_TOPLEFT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxB_BOTRIGHT_X_MASK (0x1fff << 13)
+#define VIDOSDxB_BOTRIGHT_X_SHIFT 13
+#define VIDOSDxB_BOTRIGHT_X_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_X(_x) (((_x) & 0x1fff) << 13)
+
+#define VIDOSDxB_BOTRIGHT_Y_MASK (0x1fff << 0)
+#define VIDOSDxB_BOTRIGHT_Y_SHIFT 0
+#define VIDOSDxB_BOTRIGHT_Y_LIMIT 0x1fff
+#define VIDOSDxB_BOTRIGHT_Y(_x) (((_x) & 0x1fff) << 0)
+
+#define VIDOSDxC_ALPHA0_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxC_ALPHA0_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxC_ALPHA0_B_F(_x) (((_x) & 0xFF) << 0)
+
+#define VIDOSDxD_ALPHA1_R_F(_x) (((_x) & 0xFF) << 16)
+#define VIDOSDxD_ALPHA1_G_F(_x) (((_x) & 0xFF) << 8)
+#define VIDOSDxD_ALPHA1_B_F(_x) (((_x) & 0xFF) >> 0)
+
+/* Window MAP (Color map) */
+#define WINxMAP(_win) (0x340 + ((_win) * 4))
+
+#define WINxMAP_MAP (1 << 24)
+#define WINxMAP_MAP_COLOUR_MASK (0xffffff << 0)
+#define WINxMAP_MAP_COLOUR_SHIFT 0
+#define WINxMAP_MAP_COLOUR_LIMIT 0xffffff
+#define WINxMAP_MAP_COLOUR(_x) ((_x) << 0)
+
+/* Window colour-key control registers */
+#define WKEYCON 0x370
+
+#define WKEYCON0 0x00
+#define WKEYCON1 0x04
+#define WxKEYCON0_KEYBL_EN (1 << 26)
+#define WxKEYCON0_KEYEN_F (1 << 25)
+#define WxKEYCON0_DIRCON (1 << 24)
+#define WxKEYCON0_COMPKEY_MASK (0xffffff << 0)
+#define WxKEYCON0_COMPKEY_SHIFT 0
+#define WxKEYCON0_COMPKEY_LIMIT 0xffffff
+#define WxKEYCON0_COMPKEY(_x) ((_x) << 0)
+#define WxKEYCON1_COLVAL_MASK (0xffffff << 0)
+#define WxKEYCON1_COLVAL_SHIFT 0
+#define WxKEYCON1_COLVAL_LIMIT 0xffffff
+#define WxKEYCON1_COLVAL(_x) ((_x) << 0)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x) ((WKEYCON + WKEYCON0) + ((x - 1) * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x) ((WKEYCON + WKEYCON1) + ((x - 1) * 8))
+
+/* Window KEY Alpha value */
+#define WxKEYALPHA(_win) (0x3A0 + (((_win) - 1) * 0x4))
+
+#define Wx_KEYALPHA_R_F_SHIFT 16
+#define Wx_KEYALPHA_G_F_SHIFT 8
+#define Wx_KEYALPHA_B_F_SHIFT 0
+
+/* Blending equation */
+#define BLENDE(_win) (0x03C0 + ((_win) * 4))
+#define BLENDE_COEF_ZERO 0x0
+#define BLENDE_COEF_ONE 0x1
+#define BLENDE_COEF_ALPHA_A 0x2
+#define BLENDE_COEF_ONE_MINUS_ALPHA_A 0x3
+#define BLENDE_COEF_ALPHA_B 0x4
+#define BLENDE_COEF_ONE_MINUS_ALPHA_B 0x5
+#define BLENDE_COEF_ALPHA0 0x6
+#define BLENDE_COEF_A 0xA
+#define BLENDE_COEF_ONE_MINUS_A 0xB
+#define BLENDE_COEF_B 0xC
+#define BLENDE_COEF_ONE_MINUS_B 0xD
+#define BLENDE_Q_FUNC(_v) ((_v) << 18)
+#define BLENDE_P_FUNC(_v) ((_v) << 12)
+#define BLENDE_B_FUNC(_v) ((_v) << 6)
+#define BLENDE_A_FUNC(_v) ((_v) << 0)
+
+/* Blending equation control */
+#define BLENDCON 0x3D8
+#define BLENDCON_NEW_MASK (1 << 0)
+#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
+#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)
+
+/* Interrupt control register */
+#define VIDINTCON0 0x500
+
+#define VIDINTCON0_WAKEUP_MASK (0x3f << 26)
+#define VIDINTCON0_INTEXTRAEN (1 << 21)
+
+#define VIDINTCON0_FRAMESEL0_SHIFT 15
+#define VIDINTCON0_FRAMESEL0_MASK (0x3 << 15)
+#define VIDINTCON0_FRAMESEL0_BACKPORCH (0x0 << 15)
+#define VIDINTCON0_FRAMESEL0_VSYNC (0x1 << 15)
+#define VIDINTCON0_FRAMESEL0_ACTIVE (0x2 << 15)
+#define VIDINTCON0_FRAMESEL0_FRONTPORCH (0x3 << 15)
+
+#define VIDINTCON0_INT_FRAME (1 << 11)
+
+#define VIDINTCON0_FIFOLEVEL_MASK (0x7 << 3)
+#define VIDINTCON0_FIFOLEVEL_SHIFT 3
+#define VIDINTCON0_FIFOLEVEL_EMPTY (0x0 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO25PC (0x1 << 3)
+#define VIDINTCON0_FIFOLEVEL_TO50PC (0x2 << 3)
+#define VIDINTCON0_FIFOLEVEL_FULL (0x4 << 3)
+
+#define VIDINTCON0_FIFOSEL_MAIN_EN (1 << 1)
+#define VIDINTCON0_INT_FIFO (1 << 1)
+
+#define VIDINTCON0_INT_ENABLE (1 << 0)
+
+/* Interrupt controls and status register */
+#define VIDINTCON1 0x504
+
+#define VIDINTCON1_INT_EXTRA (1 << 3)
+#define VIDINTCON1_INT_I80 (1 << 2)
+#define VIDINTCON1_INT_FRAME (1 << 1)
+#define VIDINTCON1_INT_FIFO (1 << 0)
+
+/* VIDCON1 */
+#define VIDCON1(_x) (0x0600 + ((_x) * 0x50))
+#define VIDCON1_LINECNT_GET(_v) (((_v) >> 17) & 0x1fff)
+#define VIDCON1_VCLK_MASK (0x3 << 9)
+#define VIDCON1_VCLK_HOLD (0x0 << 9)
+#define VIDCON1_VCLK_RUN (0x1 << 9)
+#define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9)
+#define VIDCON1_RGB_ORDER_O_MASK (0x7 << 4)
+#define VIDCON1_RGB_ORDER_O_RGB (0x0 << 4)
+#define VIDCON1_RGB_ORDER_O_GBR (0x1 << 4)
+#define VIDCON1_RGB_ORDER_O_BRG (0x2 << 4)
+#define VIDCON1_RGB_ORDER_O_BGR (0x4 << 4)
+#define VIDCON1_RGB_ORDER_O_RBG (0x5 << 4)
+#define VIDCON1_RGB_ORDER_O_GRB (0x6 << 4)
+
+/* VIDTCON0 */
+#define VIDTCON0 0x610
+
+#define VIDTCON0_VBPD_MASK (0xffff << 16)
+#define VIDTCON0_VBPD_SHIFT 16
+#define VIDTCON0_VBPD_LIMIT 0xffff
+#define VIDTCON0_VBPD(_x) ((_x) << 16)
+
+#define VIDTCON0_VFPD_MASK (0xffff << 0)
+#define VIDTCON0_VFPD_SHIFT 0
+#define VIDTCON0_VFPD_LIMIT 0xffff
+#define VIDTCON0_VFPD(_x) ((_x) << 0)
+
+/* VIDTCON1 */
+#define VIDTCON1 0x614
+
+#define VIDTCON1_VSPW_MASK (0xffff << 16)
+#define VIDTCON1_VSPW_SHIFT 16
+#define VIDTCON1_VSPW_LIMIT 0xffff
+#define VIDTCON1_VSPW(_x) ((_x) << 16)
+
+/* VIDTCON2 */
+#define VIDTCON2 0x618
+
+#define VIDTCON2_HBPD_MASK (0xffff << 16)
+#define VIDTCON2_HBPD_SHIFT 16
+#define VIDTCON2_HBPD_LIMIT 0xffff
+#define VIDTCON2_HBPD(_x) ((_x) << 16)
+
+#define VIDTCON2_HFPD_MASK (0xffff << 0)
+#define VIDTCON2_HFPD_SHIFT 0
+#define VIDTCON2_HFPD_LIMIT 0xffff
+#define VIDTCON2_HFPD(_x) ((_x) << 0)
+
+/* VIDTCON3 */
+#define VIDTCON3 0x61C
+
+#define VIDTCON3_HSPW_MASK (0xffff << 16)
+#define VIDTCON3_HSPW_SHIFT 16
+#define VIDTCON3_HSPW_LIMIT 0xffff
+#define VIDTCON3_HSPW(_x) ((_x) << 16)
+
+/* VIDTCON4 */
+#define VIDTCON4 0x620
+
+#define VIDTCON4_LINEVAL_MASK (0xfff << 16)
+#define VIDTCON4_LINEVAL_SHIFT 16
+#define VIDTCON4_LINEVAL_LIMIT 0xfff
+#define VIDTCON4_LINEVAL(_x) (((_x) & 0xfff) << 16)
+
+#define VIDTCON4_HOZVAL_MASK (0xfff << 0)
+#define VIDTCON4_HOZVAL_SHIFT 0
+#define VIDTCON4_HOZVAL_LIMIT 0xfff
+#define VIDTCON4_HOZVAL(_x) (((_x) & 0xfff) << 0)
+
+/* LINECNT OP THRSHOLD*/
+#define LINECNT_OP_THRESHOLD 0x630
+
+/* CRCCTRL */
+#define CRCCTRL 0x6C8
+#define CRCCTRL_CRCCLKEN (0x1 << 2)
+#define CRCCTRL_CRCSTART_F (0x1 << 1)
+#define CRCCTRL_CRCEN (0x1 << 0)
+
+/* DECON_CMU */
+#define DECON_CMU 0x704
+
+#define DECON_CMU_ALL_CLKGATE_ENABLE 0x3
+#define DECON_CMU_SE_CLKGATE_ENABLE (0x1 << 2)
+#define DECON_CMU_SFR_CLKGATE_ENABLE (0x1 << 1)
+#define DECON_CMU_MEM_CLKGATE_ENABLE (0x1 << 0)
+
+/* DECON_UPDATE */
+#define DECON_UPDATE 0x710
+
+#define DECON_UPDATE_SLAVE_SYNC (1 << 4)
+#define DECON_UPDATE_STANDALONE_F (1 << 0)
+
+#endif /* EXYNOS_REGS_DECON7_H */
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2570c7f647a6..cb0a2ae916e0 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -576,13 +576,6 @@ static void psb_fbdev_fini(struct drm_device *dev)
dev_priv->fbdev = NULL;
}
-static void psbfb_output_poll_changed(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
- drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
-}
-
/**
* psb_user_framebuffer_create_handle - add hamdle to a framebuffer
* @fb: framebuffer
@@ -623,7 +616,7 @@ static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
static const struct drm_mode_config_funcs psb_mode_funcs = {
.fb_create = psb_user_framebuffer_create,
- .output_poll_changed = psbfb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
static void psb_setup_outputs(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 8f5cc1f471cd..38d09d4b3ed5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -107,19 +107,6 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct drm_ioctl_desc psb_ioctls[] = {
};
-static void psb_driver_lastclose(struct drm_device *dev)
-{
- int ret;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_fbdev *fbdev = dev_priv->fbdev;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->psb_fb_helper);
- if (ret)
- DRM_DEBUG("failed to restore crtc mode\n");
-
- return;
-}
-
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -479,7 +466,7 @@ static struct drm_driver driver = {
DRIVER_MODESET | DRIVER_GEM,
.load = psb_driver_load,
.unload = psb_driver_unload,
- .lastclose = psb_driver_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.num_ioctls = ARRAY_SIZE(psb_ioctls),
.irq_preinstall = psb_irq_preinstall,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index ab4ee5953615..8516e005643f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -223,9 +223,10 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev,
return tt;
}
-static int hibmc_ttm_tt_populate(struct ttm_tt *ttm)
+static int hibmc_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index fa36491495b1..108d21f34777 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -29,7 +29,6 @@ config DRM_I915_DEBUG
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
- select DRM_I915_TRACE_GEM
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -53,6 +52,7 @@ config DRM_I915_DEBUG_GEM
config DRM_I915_TRACE_GEM
bool "Insert extra ftrace output from the GEM internals"
+ depends on DRM_I915_DEBUG_GEM
select TRACING
default n
help
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 18e1c172e792..347116faa558 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -2,7 +2,8 @@
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
- execlist.o scheduler.o sched_policy.o render.o cmd_parser.o debugfs.o
+ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
+ fb_decoder.o dmabuf.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4ce2e6bd0680..97bfc00d2a82 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -335,7 +335,8 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
case INTEL_GVT_PCI_OPREGION:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
- ret = intel_vgpu_init_opregion(vgpu, *(u32 *)p_data);
+ ret = intel_vgpu_opregion_base_write_handler(vgpu,
+ *(u32 *)p_data);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 18c45734c7a2..edec15d19538 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -825,6 +825,21 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return 0;
}
+static inline bool is_mocs_mmio(unsigned int offset)
+{
+ return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
+ ((offset >= 0xb020) && (offset <= 0xb0a0));
+}
+
+static int mocs_cmd_reg_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ if (!is_mocs_mmio(offset))
+ return -EINVAL;
+ vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -848,6 +863,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return 0;
}
+ if (is_mocs_mmio(offset) &&
+ mocs_cmd_reg_handler(s, offset, index))
+ return -EINVAL;
+
if (is_force_nonpriv_mmio(offset) &&
force_nonpriv_reg_handler(s, offset, index))
return -EPERM;
@@ -1220,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
return 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
- tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
+ stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
+ tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
} else {
- stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
+ stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
GENMASK(15, 6)) >> 6;
- tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
+ tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
}
if (stride != info->stride_val)
@@ -1245,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu;
- set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
- set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
info->tile_val << 10);
} else {
- set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
info->stride_val << 6);
- set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
+ set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
info->tile_val << 10);
}
- vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 355120865efd..dd96ffc878ac 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
+ if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
@@ -67,14 +67,14 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
return 1;
}
-static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
- if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
+ if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1;
if (edp_pipe_is_enabled(vgpu) &&
@@ -169,103 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
+ vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
+ vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
- vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
+ vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS |
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
- vgpu_vreg(vgpu, LCPLL1_CTL) |=
+ vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK;
- vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
+ vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
- vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
~PORT_CLK_SEL_MASK;
- vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
+ vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
PORT_CLK_SEL_LCPLL_810;
}
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
- vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv))
- vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
+ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG;
else
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+ vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
- vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
+ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
/* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv))
- vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+ vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
+
+ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type;
emulate_monitor_status_change(vgpu);
- vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
+
return 0;
}
@@ -368,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
if (pipe_is_enabled(vgpu, pipe)) {
- vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
+ vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index d73de22102e2..b46b86892d58 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -179,4 +179,6 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
new file mode 100644
index 000000000000..2ab584f97dfb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Xiaoguang Chen
+ * Tina Zhang <tina.zhang@intel.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <drm/drmP.h>
+#include <linux/vfio.h>
+
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
+
+static int vgpu_gem_get_pages(
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i, ret;
+ gen8_pte_t __iomem *gtt_entries;
+ struct intel_vgpu_fb_info *fb_info;
+
+ fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
+ if (WARN_ON(!fb_info))
+ return -ENODEV;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (unlikely(!st))
+ return -ENOMEM;
+
+ ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ if (ret) {
+ kfree(st);
+ return ret;
+ }
+ gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
+ (fb_info->start >> PAGE_SHIFT);
+ for_each_sg(st->sgl, sg, fb_info->size, i) {
+ sg->offset = 0;
+ sg->length = PAGE_SIZE;
+ sg_dma_address(sg) =
+ GEN8_DECODE_PTE(readq(&gtt_entries[i]));
+ sg_dma_len(sg) = PAGE_SIZE;
+ }
+
+ __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
+
+ return 0;
+}
+
+static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static void dmabuf_gem_object_free(struct kref *kref)
+{
+ struct intel_vgpu_dmabuf_obj *obj =
+ container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
+ struct intel_vgpu *vgpu = obj->vgpu;
+ struct list_head *pos;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+ if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos,
+ struct intel_vgpu_dmabuf_obj, list);
+ if (dmabuf_obj == obj) {
+ intel_gvt_hypervisor_put_vfio_device(vgpu);
+ idr_remove(&vgpu->object_idr,
+ dmabuf_obj->dmabuf_id);
+ kfree(dmabuf_obj->info);
+ kfree(dmabuf_obj);
+ list_del(pos);
+ break;
+ }
+ }
+ } else {
+ /* Free the orphan dmabuf_objs here */
+ kfree(obj->info);
+ kfree(obj);
+ }
+}
+
+
+static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
+{
+ kref_put(&obj->kref, dmabuf_gem_object_free);
+}
+
+static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
+{
+
+ struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
+ struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
+ struct intel_vgpu *vgpu = obj->vgpu;
+
+ if (vgpu) {
+ mutex_lock(&vgpu->dmabuf_lock);
+ gem_obj->base.dma_buf = NULL;
+ dmabuf_obj_put(obj);
+ mutex_unlock(&vgpu->dmabuf_lock);
+ } else {
+ /* vgpu is NULL, as it has been removed already */
+ gem_obj->base.dma_buf = NULL;
+ dmabuf_obj_put(obj);
+ }
+}
+
+static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
+ .flags = I915_GEM_OBJECT_IS_PROXY,
+ .get_pages = vgpu_gem_get_pages,
+ .put_pages = vgpu_gem_put_pages,
+ .release = vgpu_gem_release,
+};
+
+static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
+ struct intel_vgpu_fb_info *info)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_alloc(dev_priv);
+ if (obj == NULL)
+ return NULL;
+
+ drm_gem_private_object_init(dev, &obj->base,
+ info->size << PAGE_SHIFT);
+ i915_gem_object_init(obj, &intel_vgpu_gem_ops);
+
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = 0;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ unsigned int tiling_mode = 0;
+ unsigned int stride = 0;
+
+ switch (info->drm_format_mod << 10) {
+ case PLANE_CTL_TILED_LINEAR:
+ tiling_mode = I915_TILING_NONE;
+ break;
+ case PLANE_CTL_TILED_X:
+ tiling_mode = I915_TILING_X;
+ stride = info->stride;
+ break;
+ case PLANE_CTL_TILED_Y:
+ tiling_mode = I915_TILING_Y;
+ stride = info->stride;
+ break;
+ default:
+ gvt_dbg_core("not supported tiling mode\n");
+ }
+ obj->tiling_and_stride = tiling_mode | stride;
+ } else {
+ obj->tiling_and_stride = info->drm_format_mod ?
+ I915_TILING_X : 0;
+ }
+
+ return obj;
+}
+
+static int vgpu_get_plane_info(struct drm_device *dev,
+ struct intel_vgpu *vgpu,
+ struct intel_vgpu_fb_info *info,
+ int plane_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_vgpu_primary_plane_format p;
+ struct intel_vgpu_cursor_plane_format c;
+ int ret;
+
+ if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
+ ret = intel_vgpu_decode_primary_plane(vgpu, &p);
+ if (ret)
+ return ret;
+ info->start = p.base;
+ info->start_gpa = p.base_gpa;
+ info->width = p.width;
+ info->height = p.height;
+ info->stride = p.stride;
+ info->drm_format = p.drm_format;
+ info->drm_format_mod = p.tiled;
+ info->size = (((p.stride * p.height * p.bpp) / 8) +
+ (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
+ ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
+ if (ret)
+ return ret;
+ info->start = c.base;
+ info->start_gpa = c.base_gpa;
+ info->width = c.width;
+ info->height = c.height;
+ info->stride = c.width * (c.bpp / 8);
+ info->drm_format = c.drm_format;
+ info->drm_format_mod = 0;
+ info->x_pos = c.x_pos;
+ info->y_pos = c.y_pos;
+
+ /* The invalid cursor hotspot value is delivered to host
+ * until we find a way to get the cursor hotspot info of
+ * guest OS.
+ */
+ info->x_hot = UINT_MAX;
+ info->y_hot = UINT_MAX;
+ info->size = (((info->stride * c.height * c.bpp) / 8)
+ + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ } else {
+ gvt_vgpu_err("invalid plane id:%d\n", plane_id);
+ return -EINVAL;
+ }
+
+ if (info->size == 0) {
+ gvt_vgpu_err("fb size is zero\n");
+ return -EINVAL;
+ }
+
+ if (info->start & (PAGE_SIZE - 1)) {
+ gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
+ return -EFAULT;
+ }
+ if (((info->start >> PAGE_SHIFT) + info->size) >
+ ggtt_total_entries(&dev_priv->ggtt)) {
+ gvt_vgpu_err("Invalid GTT offset or size\n");
+ return -EFAULT;
+ }
+
+ if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
+ gvt_vgpu_err("invalid gma addr\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_info(struct intel_vgpu *vgpu,
+ struct intel_vgpu_fb_info *latest_info)
+{
+ struct list_head *pos;
+ struct intel_vgpu_fb_info *fb_info;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+ struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ if ((dmabuf_obj == NULL) ||
+ (dmabuf_obj->info == NULL))
+ continue;
+
+ fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
+ if ((fb_info->start == latest_info->start) &&
+ (fb_info->start_gpa == latest_info->start_gpa) &&
+ (fb_info->size == latest_info->size) &&
+ (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
+ (fb_info->drm_format == latest_info->drm_format) &&
+ (fb_info->width == latest_info->width) &&
+ (fb_info->height == latest_info->height)) {
+ ret = dmabuf_obj;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static struct intel_vgpu_dmabuf_obj *
+pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
+{
+ struct list_head *pos;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
+ struct intel_vgpu_dmabuf_obj *ret = NULL;
+
+ list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ if (!dmabuf_obj)
+ continue;
+
+ if (dmabuf_obj->dmabuf_id == id) {
+ ret = dmabuf_obj;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
+ struct intel_vgpu_fb_info *fb_info)
+{
+ gvt_dmabuf->drm_format = fb_info->drm_format;
+ gvt_dmabuf->width = fb_info->width;
+ gvt_dmabuf->height = fb_info->height;
+ gvt_dmabuf->stride = fb_info->stride;
+ gvt_dmabuf->size = fb_info->size;
+ gvt_dmabuf->x_pos = fb_info->x_pos;
+ gvt_dmabuf->y_pos = fb_info->y_pos;
+ gvt_dmabuf->x_hot = fb_info->x_hot;
+ gvt_dmabuf->y_hot = fb_info->y_hot;
+}
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
+{
+ struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct vfio_device_gfx_plane_info *gfx_plane_info = args;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+ struct intel_vgpu_fb_info fb_info;
+ int ret = 0;
+
+ if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
+ VFIO_GFX_PLANE_TYPE_PROBE))
+ return ret;
+ else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
+ (!gfx_plane_info->flags))
+ return -EINVAL;
+
+ ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
+ gfx_plane_info->drm_plane_type);
+ if (ret != 0)
+ goto out;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ /* If exists, pick up the exposed dmabuf_obj */
+ dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
+ if (dmabuf_obj) {
+ update_fb_info(gfx_plane_info, &fb_info);
+ gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
+
+ /* This buffer may be released between query_plane ioctl and
+ * get_dmabuf ioctl. Add the refcount to make sure it won't
+ * be released between the two ioctls.
+ */
+ if (!dmabuf_obj->initref) {
+ dmabuf_obj->initref = true;
+ dmabuf_obj_get(dmabuf_obj);
+ }
+ ret = 0;
+ gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
+ vgpu->id, kref_read(&dmabuf_obj->kref),
+ gfx_plane_info->dmabuf_id);
+ mutex_unlock(&vgpu->dmabuf_lock);
+ goto out;
+ }
+
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ /* Need to allocate a new one*/
+ dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
+ if (unlikely(!dmabuf_obj)) {
+ gvt_vgpu_err("alloc dmabuf_obj failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
+ GFP_KERNEL);
+ if (unlikely(!dmabuf_obj->info)) {
+ gvt_vgpu_err("allocate intel vgpu fb info failed\n");
+ ret = -ENOMEM;
+ goto out_free_dmabuf;
+ }
+ memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
+
+ ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
+
+ dmabuf_obj->vgpu = vgpu;
+
+ ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
+ if (ret < 0)
+ goto out_free_info;
+ gfx_plane_info->dmabuf_id = ret;
+ dmabuf_obj->dmabuf_id = ret;
+
+ dmabuf_obj->initref = true;
+
+ kref_init(&dmabuf_obj->kref);
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
+ gvt_vgpu_err("get vfio device failed\n");
+ mutex_unlock(&vgpu->dmabuf_lock);
+ goto out_free_info;
+ }
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ update_fb_info(gfx_plane_info, &fb_info);
+
+ INIT_LIST_HEAD(&dmabuf_obj->list);
+ mutex_lock(&vgpu->dmabuf_lock);
+ list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
+ __func__, kref_read(&dmabuf_obj->kref), ret);
+
+ return 0;
+
+out_free_info:
+ kfree(dmabuf_obj->info);
+out_free_dmabuf:
+ kfree(dmabuf_obj);
+out:
+ /* ENODEV means plane isn't ready, which might be a normal case. */
+ return (ret == -ENODEV) ? 0 : ret;
+}
+
+/* To associate an exposed dmabuf with the dmabuf_obj */
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
+{
+ struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ int dmabuf_fd;
+ int ret = 0;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+
+ dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
+ if (dmabuf_obj == NULL) {
+ gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ obj = vgpu_create_gem(dev, dmabuf_obj->info);
+ if (obj == NULL) {
+ gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ obj->gvt_info = dmabuf_obj->info;
+
+ dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+ if (IS_ERR(dmabuf)) {
+ gvt_vgpu_err("export dma-buf failed\n");
+ ret = PTR_ERR(dmabuf);
+ goto out_free_gem;
+ }
+ obj->base.dma_buf = dmabuf;
+
+ i915_gem_object_put(obj);
+
+ ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
+ if (ret < 0) {
+ gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
+ goto out_free_dmabuf;
+ }
+ dmabuf_fd = ret;
+
+ dmabuf_obj_get(dmabuf_obj);
+
+ if (dmabuf_obj->initref) {
+ dmabuf_obj->initref = false;
+ dmabuf_obj_put(dmabuf_obj);
+ }
+
+ mutex_unlock(&vgpu->dmabuf_lock);
+
+ gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
+ " file count: %ld, GEM ref: %d\n",
+ vgpu->id, dmabuf_obj->dmabuf_id,
+ kref_read(&dmabuf_obj->kref),
+ dmabuf_fd,
+ file_count(dmabuf->file),
+ kref_read(&obj->base.refcount));
+
+ return dmabuf_fd;
+
+out_free_dmabuf:
+ dma_buf_put(dmabuf);
+out_free_gem:
+ i915_gem_object_put(obj);
+out:
+ mutex_unlock(&vgpu->dmabuf_lock);
+ return ret;
+}
+
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_dmabuf_obj *dmabuf_obj;
+
+ mutex_lock(&vgpu->dmabuf_lock);
+ list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
+ dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
+ list);
+ dmabuf_obj->vgpu = NULL;
+
+ idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
+ intel_gvt_hypervisor_put_vfio_device(vgpu);
+ list_del(pos);
+
+ /* dmabuf_obj might be freed in dmabuf_obj_put */
+ if (dmabuf_obj->initref) {
+ dmabuf_obj->initref = false;
+ dmabuf_obj_put(dmabuf_obj);
+ }
+
+ }
+ mutex_unlock(&vgpu->dmabuf_lock);
+}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.h b/drivers/gpu/drm/i915/gvt/dmabuf.h
new file mode 100644
index 000000000000..5f8f03fb1d1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Zhiyuan Lv <zhiyuan.lv@intel.com>
+ *
+ * Contributors:
+ * Xiaoguang Chen
+ * Tina Zhang <tina.zhang@intel.com>
+ */
+
+#ifndef _GVT_DMABUF_H_
+#define _GVT_DMABUF_H_
+#include <linux/vfio.h>
+
+struct intel_vgpu_fb_info {
+ __u64 start;
+ __u64 start_gpa;
+ __u64 drm_format_mod;
+ __u32 drm_format; /* drm format of plane */
+ __u32 width; /* width of plane */
+ __u32 height; /* height of plane */
+ __u32 stride; /* stride of plane */
+ __u32 size; /* size of plane in bytes, align on page */
+ __u32 x_pos; /* horizontal position of cursor plane */
+ __u32 y_pos; /* vertical position of cursor plane */
+ __u32 x_hot; /* horizontal position of cursor hotspot */
+ __u32 y_hot; /* vertical position of cursor hotspot */
+ struct intel_vgpu_dmabuf_obj *obj;
+};
+
+/**
+ * struct intel_vgpu_dmabuf_obj- Intel vGPU device buffer object
+ */
+struct intel_vgpu_dmabuf_obj {
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_fb_info *info;
+ __u32 dmabuf_id;
+ struct kref kref;
+ bool initref;
+ struct list_head list;
+};
+
+int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args);
+int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id);
+void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 42cd09ec63fa..f61337632969 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{
- vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available)
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
}
@@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0;
}
@@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* 2) HW_RDY bit asserted
*/
if (wvalue & GMBUS_SW_CLR_INT) {
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
}
/* For virtualization, we suppose that HW is always ready,
@@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* visible in gmbus interface)
*/
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
}
break;
case NIDX_NS_W:
@@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* START (-->INDEX) -->DATA
*/
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
- vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break;
default:
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
@@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */
- if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
+ if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index c9fa0fb488d3..769c1c24ae75 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -458,7 +458,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
emulate_schedule_in);
- queue_workload(workload);
+ intel_vgpu_queue_workload(workload);
return 0;
}
@@ -528,7 +528,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-void clean_execlist(struct intel_vgpu *vgpu)
+static void clean_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
@@ -542,7 +542,7 @@ void clean_execlist(struct intel_vgpu *vgpu)
}
}
-void reset_execlist(struct intel_vgpu *vgpu,
+static void reset_execlist(struct intel_vgpu *vgpu,
unsigned long engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -553,7 +553,7 @@ void reset_execlist(struct intel_vgpu *vgpu,
init_vgpu_execlist(vgpu, engine->id);
}
-int init_execlist(struct intel_vgpu *vgpu)
+static int init_execlist(struct intel_vgpu *vgpu)
{
reset_execlist(vgpu, ALL_ENGINES);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
new file mode 100644
index 000000000000..6b50fe78dc1b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -0,0 +1,514 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Bing Niu <bing.niu@intel.com>
+ * Xu Han <xu.han@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Yang Liu <yang2.liu@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#include <uapi/drm/drm_fourcc.h>
+#include "i915_drv.h"
+#include "gvt.h"
+
+#define PRIMARY_FORMAT_NUM 16
+struct pixel_format {
+ int drm_format; /* Pixel format in DRM definition */
+ int bpp; /* Bits per pixel, 0 indicates invalid */
+ char *desc; /* The description */
+};
+
+static struct pixel_format bdw_pixel_formats[] = {
+ {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+ {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+ {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+
+ {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, NULL},
+};
+
+static struct pixel_format skl_pixel_formats[] = {
+ {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"},
+ {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"},
+ {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"},
+ {DRM_FORMAT_VYUY, 16, "16-bit packed VYUY (8:8:8:8 MSB-Y2:U:Y1:V)"},
+
+ {DRM_FORMAT_C8, 8, "8-bit Indexed"},
+ {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"},
+ {DRM_FORMAT_ABGR8888, 32, "32-bit RGBA (8:8:8:8 MSB-A:B:G:R)"},
+ {DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
+
+ {DRM_FORMAT_ARGB8888, 32, "32-bit BGRA (8:8:8:8 MSB-A:R:G:B)"},
+ {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"},
+ {DRM_FORMAT_XBGR2101010, 32, "32-bit RGBX (2:10:10:10 MSB-X:B:G:R)"},
+ {DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, NULL},
+};
+
+static int bdw_format_to_drm(int format)
+{
+ int bdw_pixel_formats_index = 6;
+
+ switch (format) {
+ case DISPPLANE_8BPP:
+ bdw_pixel_formats_index = 0;
+ break;
+ case DISPPLANE_BGRX565:
+ bdw_pixel_formats_index = 1;
+ break;
+ case DISPPLANE_BGRX888:
+ bdw_pixel_formats_index = 2;
+ break;
+ case DISPPLANE_RGBX101010:
+ bdw_pixel_formats_index = 3;
+ break;
+ case DISPPLANE_BGRX101010:
+ bdw_pixel_formats_index = 4;
+ break;
+ case DISPPLANE_RGBX888:
+ bdw_pixel_formats_index = 5;
+ break;
+
+ default:
+ break;
+ }
+
+ return bdw_pixel_formats_index;
+}
+
+static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
+ int yuv_order)
+{
+ int skl_pixel_formats_index = 12;
+
+ switch (format) {
+ case PLANE_CTL_FORMAT_INDEXED:
+ skl_pixel_formats_index = 4;
+ break;
+ case PLANE_CTL_FORMAT_RGB_565:
+ skl_pixel_formats_index = 5;
+ break;
+ case PLANE_CTL_FORMAT_XRGB_8888:
+ if (rgb_order)
+ skl_pixel_formats_index = alpha ? 6 : 7;
+ else
+ skl_pixel_formats_index = alpha ? 8 : 9;
+ break;
+ case PLANE_CTL_FORMAT_XRGB_2101010:
+ skl_pixel_formats_index = rgb_order ? 10 : 11;
+ break;
+ case PLANE_CTL_FORMAT_YUV422:
+ skl_pixel_formats_index = yuv_order >> 16;
+ if (skl_pixel_formats_index > 3)
+ return -EINVAL;
+ break;
+
+ default:
+ break;
+ }
+
+ return skl_pixel_formats_index;
+}
+
+static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
+ u32 tiled, int stride_mask, int bpp)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
+ u32 stride = stride_reg;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ switch (tiled) {
+ case PLANE_CTL_TILED_LINEAR:
+ stride = stride_reg * 64;
+ break;
+ case PLANE_CTL_TILED_X:
+ stride = stride_reg * 512;
+ break;
+ case PLANE_CTL_TILED_Y:
+ stride = stride_reg * 128;
+ break;
+ case PLANE_CTL_TILED_YF:
+ if (bpp == 8)
+ stride = stride_reg * 64;
+ else if (bpp == 16 || bpp == 32 || bpp == 64)
+ stride = stride_reg * 128;
+ else
+ gvt_dbg_core("skl: unsupported bpp:%d\n", bpp);
+ break;
+ default:
+ gvt_dbg_core("skl: unsupported tile format:%x\n",
+ tiled);
+ }
+ }
+
+ return stride;
+}
+
+static int get_active_pipe(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ if (pipe_is_enabled(vgpu, i))
+ break;
+
+ return i;
+}
+
+/**
+ * intel_vgpu_decode_primary_plane - Decode primary plane
+ * @vgpu: input vgpu
+ * @plane: primary plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_primary_plane_format *plane)
+{
+ u32 val, fmt;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, DSPCNTR(pipe));
+ plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
+ _PLANE_CTL_TILED_SHIFT;
+ fmt = skl_format_to_drm(
+ val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK,
+ val & PLANE_CTL_YUV422_ORDER_MASK);
+
+ if (fmt >= ARRAY_SIZE(skl_pixel_formats)) {
+ gvt_vgpu_err("Out-of-bounds pixel format index\n");
+ return -EINVAL;
+ }
+
+ plane->bpp = skl_pixel_formats[fmt].bpp;
+ plane->drm_format = skl_pixel_formats[fmt].drm_format;
+ } else {
+ plane->tiled = !!(val & DISPPLANE_TILED);
+ fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
+ plane->bpp = bdw_pixel_formats[fmt].bpp;
+ plane->drm_format = bdw_pixel_formats[fmt].drm_format;
+ }
+
+ if (!plane->bpp) {
+ gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+ return -EINVAL;
+ }
+
+ plane->hw_format = fmt;
+
+ plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+ (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) ?
+ (_PRI_PLANE_STRIDE_MASK >> 6) :
+ _PRI_PLANE_STRIDE_MASK, plane->bpp);
+
+ plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
+ _PIPE_H_SRCSZ_SHIFT;
+ plane->width += 1;
+ plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) &
+ _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
+ plane->height += 1; /* raw height is one minus the real value */
+
+ val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe));
+ plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
+ _PRI_PLANE_X_OFF_SHIFT;
+ plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
+ _PRI_PLANE_Y_OFF_SHIFT;
+
+ return 0;
+}
+
+#define CURSOR_FORMAT_NUM (1 << 6)
+struct cursor_mode_format {
+ int drm_format; /* Pixel format in DRM definition */
+ u8 bpp; /* Bits per pixel; 0 indicates invalid */
+ u32 width; /* In pixel */
+ u32 height; /* In lines */
+ char *desc; /* The description */
+};
+
+static struct cursor_mode_format cursor_pixel_formats[] = {
+ {DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+ {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
+
+ /* non-supported format has bpp default to 0 */
+ {0, 0, 0, 0, NULL},
+};
+
+static int cursor_mode_to_drm(int mode)
+{
+ int cursor_pixel_formats_index = 4;
+
+ switch (mode) {
+ case CURSOR_MODE_128_ARGB_AX:
+ cursor_pixel_formats_index = 0;
+ break;
+ case CURSOR_MODE_256_ARGB_AX:
+ cursor_pixel_formats_index = 1;
+ break;
+ case CURSOR_MODE_64_ARGB_AX:
+ cursor_pixel_formats_index = 2;
+ break;
+ case CURSOR_MODE_64_32B_AX:
+ cursor_pixel_formats_index = 3;
+ break;
+
+ default:
+ break;
+ }
+
+ return cursor_pixel_formats_index;
+}
+
+/**
+ * intel_vgpu_decode_cursor_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: cursor plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_cursor_plane_format *plane)
+{
+ u32 val, mode, index;
+ u32 alpha_plane, alpha_force;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
+ mode = val & CURSOR_MODE;
+ plane->enabled = (mode != CURSOR_MODE_DISABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ index = cursor_mode_to_drm(mode);
+
+ if (!cursor_pixel_formats[index].bpp) {
+ gvt_vgpu_err("Non-supported cursor mode (0x%x)\n", mode);
+ return -EINVAL;
+ }
+ plane->mode = mode;
+ plane->bpp = cursor_pixel_formats[index].bpp;
+ plane->drm_format = cursor_pixel_formats[index].drm_format;
+ plane->width = cursor_pixel_formats[index].width;
+ plane->height = cursor_pixel_formats[index].height;
+
+ alpha_plane = (val & _CURSOR_ALPHA_PLANE_MASK) >>
+ _CURSOR_ALPHA_PLANE_SHIFT;
+ alpha_force = (val & _CURSOR_ALPHA_FORCE_MASK) >>
+ _CURSOR_ALPHA_FORCE_SHIFT;
+ if (alpha_plane || alpha_force)
+ gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
+ alpha_plane, alpha_force);
+
+ plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ val = vgpu_vreg_t(vgpu, CURPOS(pipe));
+ plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
+ plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
+ plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
+ plane->y_sign = (val & _CURSOR_SIGN_Y_MASK) >> _CURSOR_SIGN_Y_SHIFT;
+
+ return 0;
+}
+
+#define SPRITE_FORMAT_NUM (1 << 3)
+
+static struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = {
+ [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"},
+ [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"},
+ [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"},
+ [0x4] = {DRM_FORMAT_AYUV, 32,
+ "YUV 32-bit 4:4:4 packed (8:8:8:8 MSB-X:Y:U:V)"},
+};
+
+/**
+ * intel_vgpu_decode_sprite_plane - Decode sprite plane
+ * @vgpu: input vgpu
+ * @plane: sprite plane to save decoded info
+ * This function is called for decoding plane
+ *
+ * Returns:
+ * 0 on success, non-zero if failed.
+ */
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_sprite_plane_format *plane)
+{
+ u32 val, fmt;
+ u32 color_order, yuv_order;
+ int drm_format;
+ int pipe;
+
+ pipe = get_active_pipe(vgpu);
+ if (pipe >= I915_MAX_PIPES)
+ return -ENODEV;
+
+ val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
+ plane->enabled = !!(val & SPRITE_ENABLE);
+ if (!plane->enabled)
+ return -ENODEV;
+
+ plane->tiled = !!(val & SPRITE_TILED);
+ color_order = !!(val & SPRITE_RGB_ORDER_RGBX);
+ yuv_order = (val & SPRITE_YUV_BYTE_ORDER_MASK) >>
+ _SPRITE_YUV_ORDER_SHIFT;
+
+ fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT;
+ if (!sprite_pixel_formats[fmt].bpp) {
+ gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt);
+ return -EINVAL;
+ }
+ plane->hw_format = fmt;
+ plane->bpp = sprite_pixel_formats[fmt].bpp;
+ drm_format = sprite_pixel_formats[fmt].drm_format;
+
+ /* Order of RGB values in an RGBxxx buffer may be ordered RGB or
+ * BGR depending on the state of the color_order field
+ */
+ if (!color_order) {
+ if (drm_format == DRM_FORMAT_XRGB2101010)
+ drm_format = DRM_FORMAT_XBGR2101010;
+ else if (drm_format == DRM_FORMAT_XRGB8888)
+ drm_format = DRM_FORMAT_XBGR8888;
+ }
+
+ if (drm_format == DRM_FORMAT_YUV422) {
+ switch (yuv_order) {
+ case 0:
+ drm_format = DRM_FORMAT_YUYV;
+ break;
+ case 1:
+ drm_format = DRM_FORMAT_UYVY;
+ break;
+ case 2:
+ drm_format = DRM_FORMAT_YVYU;
+ break;
+ case 3:
+ drm_format = DRM_FORMAT_VYUY;
+ break;
+ default:
+ /* yuv_order has only 2 bits */
+ break;
+ }
+ }
+
+ plane->drm_format = drm_format;
+
+ plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
+ if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
+ if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("invalid gma address: %lx\n",
+ (unsigned long)plane->base);
+ return -EINVAL;
+ }
+
+ plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
+ _SPRITE_STRIDE_MASK;
+
+ val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
+ plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
+ _SPRITE_SIZE_HEIGHT_SHIFT;
+ plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
+ _SPRITE_SIZE_WIDTH_SHIFT;
+ plane->height += 1; /* raw height is one minus the real value */
+ plane->width += 1; /* raw width is one minus the real value */
+
+ val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
+ plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
+ plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
+
+ val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
+ plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
+ _SPRITE_OFFSET_START_X_SHIFT;
+ plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
+ _SPRITE_OFFSET_START_Y_SHIFT;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
new file mode 100644
index 000000000000..cb055f3c81a2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Bing Niu <bing.niu@intel.com>
+ * Xu Han <xu.han@intel.com>
+ * Ping Gao <ping.a.gao@intel.com>
+ * Xiaoguang Chen <xiaoguang.chen@intel.com>
+ * Yang Liu <yang2.liu@intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ *
+ */
+
+#ifndef _GVT_FB_DECODER_H_
+#define _GVT_FB_DECODER_H_
+
+#define _PLANE_CTL_FORMAT_SHIFT 24
+#define _PLANE_CTL_TILED_SHIFT 10
+#define _PIPE_V_SRCSZ_SHIFT 0
+#define _PIPE_V_SRCSZ_MASK (0xfff << _PIPE_V_SRCSZ_SHIFT)
+#define _PIPE_H_SRCSZ_SHIFT 16
+#define _PIPE_H_SRCSZ_MASK (0x1fff << _PIPE_H_SRCSZ_SHIFT)
+
+#define _PRI_PLANE_FMT_SHIFT 26
+#define _PRI_PLANE_STRIDE_MASK (0x3ff << 6)
+#define _PRI_PLANE_X_OFF_SHIFT 0
+#define _PRI_PLANE_X_OFF_MASK (0x1fff << _PRI_PLANE_X_OFF_SHIFT)
+#define _PRI_PLANE_Y_OFF_SHIFT 16
+#define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT)
+
+#define _CURSOR_MODE 0x3f
+#define _CURSOR_ALPHA_FORCE_SHIFT 8
+#define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT)
+#define _CURSOR_ALPHA_PLANE_SHIFT 10
+#define _CURSOR_ALPHA_PLANE_MASK (0x3 << _CURSOR_ALPHA_PLANE_SHIFT)
+#define _CURSOR_POS_X_SHIFT 0
+#define _CURSOR_POS_X_MASK (0x1fff << _CURSOR_POS_X_SHIFT)
+#define _CURSOR_SIGN_X_SHIFT 15
+#define _CURSOR_SIGN_X_MASK (1 << _CURSOR_SIGN_X_SHIFT)
+#define _CURSOR_POS_Y_SHIFT 16
+#define _CURSOR_POS_Y_MASK (0xfff << _CURSOR_POS_Y_SHIFT)
+#define _CURSOR_SIGN_Y_SHIFT 31
+#define _CURSOR_SIGN_Y_MASK (1 << _CURSOR_SIGN_Y_SHIFT)
+
+#define _SPRITE_FMT_SHIFT 25
+#define _SPRITE_COLOR_ORDER_SHIFT 20
+#define _SPRITE_YUV_ORDER_SHIFT 16
+#define _SPRITE_STRIDE_SHIFT 6
+#define _SPRITE_STRIDE_MASK (0x1ff << _SPRITE_STRIDE_SHIFT)
+#define _SPRITE_SIZE_WIDTH_SHIFT 0
+#define _SPRITE_SIZE_HEIGHT_SHIFT 16
+#define _SPRITE_SIZE_WIDTH_MASK (0x1fff << _SPRITE_SIZE_WIDTH_SHIFT)
+#define _SPRITE_SIZE_HEIGHT_MASK (0xfff << _SPRITE_SIZE_HEIGHT_SHIFT)
+#define _SPRITE_POS_X_SHIFT 0
+#define _SPRITE_POS_Y_SHIFT 16
+#define _SPRITE_POS_X_MASK (0x1fff << _SPRITE_POS_X_SHIFT)
+#define _SPRITE_POS_Y_MASK (0xfff << _SPRITE_POS_Y_SHIFT)
+#define _SPRITE_OFFSET_START_X_SHIFT 0
+#define _SPRITE_OFFSET_START_Y_SHIFT 16
+#define _SPRITE_OFFSET_START_X_MASK (0x1fff << _SPRITE_OFFSET_START_X_SHIFT)
+#define _SPRITE_OFFSET_START_Y_MASK (0xfff << _SPRITE_OFFSET_START_Y_SHIFT)
+
+enum GVT_FB_EVENT {
+ FB_MODE_SET_START = 1,
+ FB_MODE_SET_END,
+ FB_DISPLAY_FLIP,
+};
+
+enum DDI_PORT {
+ DDI_PORT_NONE = 0,
+ DDI_PORT_B = 1,
+ DDI_PORT_C = 2,
+ DDI_PORT_D = 3,
+ DDI_PORT_E = 4
+};
+
+struct intel_gvt;
+
+/* color space conversion and gamma correction are not included */
+struct intel_vgpu_primary_plane_format {
+ u8 enabled; /* plane is enabled */
+ u8 tiled; /* X-tiled */
+ u8 bpp; /* bits per pixel */
+ u32 hw_format; /* format field in the PRI_CTL register */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* framebuffer base in graphics memory */
+ u64 base_gpa;
+ u32 x_offset; /* in pixels */
+ u32 y_offset; /* in lines */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 stride; /* in bytes */
+};
+
+struct intel_vgpu_sprite_plane_format {
+ u8 enabled; /* plane is enabled */
+ u8 tiled; /* X-tiled */
+ u8 bpp; /* bits per pixel */
+ u32 hw_format; /* format field in the SPR_CTL register */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* sprite base in graphics memory */
+ u64 base_gpa;
+ u32 x_pos; /* in pixels */
+ u32 y_pos; /* in lines */
+ u32 x_offset; /* in pixels */
+ u32 y_offset; /* in lines */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 stride; /* in bytes */
+};
+
+struct intel_vgpu_cursor_plane_format {
+ u8 enabled;
+ u8 mode; /* cursor mode select */
+ u8 bpp; /* bits per pixel */
+ u32 drm_format; /* format in DRM definition */
+ u32 base; /* cursor base in graphics memory */
+ u64 base_gpa;
+ u32 x_pos; /* in pixels */
+ u32 y_pos; /* in lines */
+ u8 x_sign; /* X Position Sign */
+ u8 y_sign; /* Y Position Sign */
+ u32 width; /* in pixels */
+ u32 height; /* in lines */
+ u32 x_hot; /* in pixels */
+ u32 y_hot; /* in pixels */
+};
+
+struct intel_vgpu_pipe_format {
+ struct intel_vgpu_primary_plane_format primary;
+ struct intel_vgpu_sprite_plane_format sprite;
+ struct intel_vgpu_cursor_plane_format cursor;
+ enum DDI_PORT ddi_port; /* the DDI port that pipe is connected to */
+};
+
+struct intel_vgpu_fb_format {
+ struct intel_vgpu_pipe_format pipes[I915_MAX_PIPES];
+};
+
+int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_primary_plane_format *plane);
+int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_cursor_plane_format *plane);
+int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
+ struct intel_vgpu_sprite_plane_format *plane);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 71a0f2b87b3a..c4f752eeadcc 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1968,6 +1968,39 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
}
+int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ int ret = 0;
+
+ if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
+ struct intel_vgpu_page_track *t;
+
+ mutex_lock(&gvt->lock);
+
+ t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
+ if (t) {
+ if (unlikely(vgpu->failsafe)) {
+ /* remove write protection to prevent furture traps */
+ intel_vgpu_clean_page_track(vgpu, t);
+ } else {
+ ret = t->handler(t, pa, p_data, bytes);
+ if (ret) {
+ gvt_err("guest page write error %d, "
+ "gfn 0x%lx, pa 0x%llx, "
+ "var 0x%x, len %d\n",
+ ret, t->gfn, pa,
+ *(u32 *)p_data, bytes);
+ }
+ }
+ }
+ mutex_unlock(&gvt->lock);
+ }
+ return ret;
+}
+
+
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t type)
{
@@ -2244,7 +2277,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level)
{
- u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
@@ -2279,7 +2312,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level)
{
- u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
+ u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index f98c1c19b4cb..4cc13b5934f1 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -308,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes);
+int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
+ void *p_data, unsigned int bytes);
+
#endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3a74a408a966..fac54f32d33f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -181,6 +181,9 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
.get_gvt_attrs = intel_get_gvt_attrs,
+ .vgpu_query_plane = intel_vgpu_query_plane,
+ .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
+ .write_protect_handler = intel_vgpu_write_protect_handler,
};
/**
@@ -384,6 +387,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret)
goto out_clean_idr;
+ intel_gvt_init_engine_mmio_context(gvt);
+
ret = intel_gvt_load_firmware(gvt);
if (ret)
goto out_clean_mmio_info;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 393066726993..7dc7a80213a8 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -44,8 +44,10 @@
#include "execlist.h"
#include "scheduler.h"
#include "sched_policy.h"
-#include "render.h"
+#include "mmio_context.h"
#include "cmd_parser.h"
+#include "fb_decoder.h"
+#include "dmabuf.h"
#define GVT_MAX_VGPU 8
@@ -123,7 +125,9 @@ struct intel_vgpu_irq {
};
struct intel_vgpu_opregion {
+ bool mapped;
void *va;
+ void *va_gopregion;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
};
@@ -206,8 +210,16 @@ struct intel_vgpu {
struct kvm *kvm;
struct work_struct release_work;
atomic_t released;
+ struct vfio_device *vfio_device;
} vdev;
#endif
+
+ struct list_head dmabuf_obj_list_head;
+ struct mutex dmabuf_lock;
+ struct idr object_idr;
+
+ struct completion vblank_done;
+
};
/* validating GM healthy status*/
@@ -298,6 +310,8 @@ struct intel_gvt {
wait_queue_head_t service_thread_wq;
unsigned long service_request;
+ struct engine_mmio *engine_mmio_list;
+
struct dentry *debugfs_root;
};
@@ -336,7 +350,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
+#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
#define gvt_ggtt_sz(gvt) \
@@ -398,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
-/* Macros for easily accessing vGPU virtual/shadow register */
-#define vgpu_vreg(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg8(vgpu, reg) \
- (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg16(vgpu, reg) \
- (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_vreg64(vgpu, reg) \
- (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg8(vgpu, reg) \
- (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg16(vgpu, reg) \
- (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
-#define vgpu_sreg64(vgpu, reg) \
- (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
+/* Macros for easily accessing vGPU virtual/shadow register.
+ Explicitly seperate use for typed MMIO reg or real offset.*/
+#define vgpu_vreg_t(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
+#define vgpu_vreg(vgpu, offset) \
+ (*(u32 *)(vgpu->mmio.vreg + (offset)))
+#define vgpu_vreg64_t(vgpu, reg) \
+ (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
+#define vgpu_vreg64(vgpu, offset) \
+ (*(u64 *)(vgpu->mmio.vreg + (offset)))
+#define vgpu_sreg_t(vgpu, reg) \
+ (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
+#define vgpu_sreg(vgpu, offset) \
+ (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -505,7 +516,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
}
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu);
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
@@ -532,6 +544,10 @@ struct intel_gvt_ops {
const char *name);
bool (*get_gvt_attrs)(struct attribute ***type_attrs,
struct attribute_group ***intel_vgpu_type_groups);
+ int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
+ int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
+ int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
+ unsigned int);
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 94fc04210bac..92d6468daeee 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -174,8 +174,10 @@ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
break;
case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
pr_err("Graphics resource is not enough for the guest\n");
+ break;
case GVT_FAILSAFE_GUEST_ERR:
pr_err("GVT Internal error for the guest\n");
+ break;
default:
break;
}
@@ -341,13 +343,13 @@ static int pch_pp_control_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
if (vgpu_vreg(vgpu, offset) & PANEL_POWER_ON) {
- vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_ON;
- vgpu_vreg(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
- vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
- vgpu_vreg(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_ON;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) |= PP_SEQUENCE_STATE_ON_IDLE;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_SEQUENCE_POWER_DOWN;
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &= ~PP_CYCLE_DELAY_ACTIVE;
} else
- vgpu_vreg(vgpu, PCH_PP_STATUS) &=
+ vgpu_vreg_t(vgpu, PCH_PP_STATUS) &=
~(PP_ON | PP_SEQUENCE_POWER_DOWN
| PP_CYCLE_DELAY_ACTIVE);
return 0;
@@ -501,7 +503,7 @@ static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
vgpu_vreg(vgpu, offset) |= DDI_BUF_IS_IDLE;
if (offset == i915_mmio_reg_offset(DDI_BUF_CTL(PORT_E)))
- vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E))
+ vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E))
&= ~DP_TP_STATUS_AUTOTRAIN_DONE;
}
return 0;
@@ -519,9 +521,9 @@ static int fdi_rx_iir_mmio_write(struct intel_vgpu *vgpu,
static int fdi_auto_training_started(struct intel_vgpu *vgpu)
{
- u32 ddi_buf_ctl = vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_E));
+ u32 ddi_buf_ctl = vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_E));
u32 rx_ctl = vgpu_vreg(vgpu, _FDI_RXA_CTL);
- u32 tx_ctl = vgpu_vreg(vgpu, DP_TP_CTL(PORT_E));
+ u32 tx_ctl = vgpu_vreg_t(vgpu, DP_TP_CTL(PORT_E));
if ((ddi_buf_ctl & DDI_BUF_CTL_ENABLE) &&
(rx_ctl & FDI_RX_ENABLE) &&
@@ -562,12 +564,12 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu,
fdi_tx_check_bits = FDI_TX_ENABLE | fdi_tx_train_bits;
/* If imr bit has been masked */
- if (vgpu_vreg(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
+ if (vgpu_vreg_t(vgpu, fdi_rx_imr) & fdi_iir_check_bits)
return 0;
- if (((vgpu_vreg(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
+ if (((vgpu_vreg_t(vgpu, fdi_tx_ctl) & fdi_tx_check_bits)
== fdi_tx_check_bits)
- && ((vgpu_vreg(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
+ && ((vgpu_vreg_t(vgpu, fdi_rx_ctl) & fdi_rx_check_bits)
== fdi_rx_check_bits))
return 1;
else
@@ -624,17 +626,17 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
if (ret < 0)
return ret;
if (ret)
- vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
+ vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_BIT_LOCK;
ret = check_fdi_rx_train_status(vgpu, index, FDI_LINK_TRAIN_PATTERN2);
if (ret < 0)
return ret;
if (ret)
- vgpu_vreg(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
+ vgpu_vreg_t(vgpu, fdi_rx_iir) |= FDI_RX_SYMBOL_LOCK;
if (offset == _FDI_RXA_CTL)
if (fdi_auto_training_started(vgpu))
- vgpu_vreg(vgpu, DP_TP_STATUS(PORT_E)) |=
+ vgpu_vreg_t(vgpu, DP_TP_STATUS(PORT_E)) |=
DP_TP_STATUS_AUTOTRAIN_DONE;
return 0;
}
@@ -655,7 +657,7 @@ static int dp_tp_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
data = (vgpu_vreg(vgpu, offset) & GENMASK(10, 8)) >> 8;
if (data == 0x2) {
status_reg = DP_TP_STATUS(index);
- vgpu_vreg(vgpu, status_reg) |= (1 << 25);
+ vgpu_vreg_t(vgpu, status_reg) |= (1 << 25);
}
return 0;
}
@@ -719,7 +721,7 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
};
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
@@ -740,7 +742,7 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
};
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
@@ -1062,9 +1064,9 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu,
static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRRD) {
- unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
vgpu_vreg(vgpu, offset) = read_virtual_sbi_register(vgpu,
sbi_offset);
@@ -1089,13 +1091,13 @@ static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = data;
- if (((vgpu_vreg(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
+ if (((vgpu_vreg_t(vgpu, SBI_CTL_STAT) & SBI_OPCODE_MASK) >>
SBI_OPCODE_SHIFT) == SBI_CMD_CRWR) {
- unsigned int sbi_offset = (vgpu_vreg(vgpu, SBI_ADDR) &
+ unsigned int sbi_offset = (vgpu_vreg_t(vgpu, SBI_ADDR) &
SBI_ADDR_OFFSET_MASK) >> SBI_ADDR_OFFSET_SHIFT;
write_virtual_sbi_register(vgpu, sbi_offset,
- vgpu_vreg(vgpu, SBI_DATA));
+ vgpu_vreg_t(vgpu, SBI_DATA));
}
return 0;
}
@@ -1341,7 +1343,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
{
u32 value = *(u32 *)p_data;
u32 cmd = value & 0xff;
- u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
+ u32 *data0 = &vgpu_vreg_t(vgpu, GEN6_PCODE_DATA);
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
@@ -1396,7 +1398,7 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
* update the VM CSB status correctly. Here listed registers can
* support BDW, SKL or other platforms with same HWSP registers.
*/
- if (unlikely(ring_id < 0 || ring_id > I915_NUM_ENGINES)) {
+ if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n",
vgpu->id, offset);
return -EINVAL;
@@ -1471,7 +1473,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 data = *(u32 *)p_data;
int ret = 0;
- if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
+ if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
return -EINVAL;
execlist = &vgpu->submission.execlist[ring_id];
@@ -1584,7 +1586,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
}
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
- ret = new_mmio_info(gvt, INTEL_GVT_MMIO_OFFSET(reg), \
+ ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
if (ret) \
return ret; \
@@ -1652,22 +1654,22 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
-#define RING_REG(base) (base + 0x28)
+#define RING_REG(base) _MMIO((base) + 0x28)
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x134)
+#define RING_REG(base) _MMIO((base) + 0x134)
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x6c)
+#define RING_REG(base) _MMIO((base) + 0x6c)
MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
#undef RING_REG
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
- MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
- MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -1677,7 +1679,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
-#define RING_REG(base) (base + 0x29c)
+#define RING_REG(base) _MMIO((base) + 0x29c)
MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
ring_mode_mmio_write);
#undef RING_REG
@@ -1696,37 +1698,37 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2124), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x9030), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20a0), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2420), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2430), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2434), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2438), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x243c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x7018), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
- MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(0x602a0, D_ALL);
+ MMIO_F(_MMIO(0x60220), 0x20, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_D(_MMIO(0x602a0), D_ALL);
- MMIO_D(0x65050, D_ALL);
- MMIO_D(0x650b4, D_ALL);
+ MMIO_D(_MMIO(0x65050), D_ALL);
+ MMIO_D(_MMIO(0x650b4), D_ALL);
- MMIO_D(0xc4040, D_ALL);
+ MMIO_D(_MMIO(0xc4040), D_ALL);
MMIO_D(DERRMR, D_ALL);
MMIO_D(PIPEDSL(PIPE_A), D_ALL);
@@ -1766,14 +1768,14 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(CURBASE(PIPE_B), D_ALL);
MMIO_D(CURBASE(PIPE_C), D_ALL);
- MMIO_D(0x700ac, D_ALL);
- MMIO_D(0x710ac, D_ALL);
- MMIO_D(0x720ac, D_ALL);
+ MMIO_D(_MMIO(0x700ac), D_ALL);
+ MMIO_D(_MMIO(0x710ac), D_ALL);
+ MMIO_D(_MMIO(0x720ac), D_ALL);
- MMIO_D(0x70090, D_ALL);
- MMIO_D(0x70094, D_ALL);
- MMIO_D(0x70098, D_ALL);
- MMIO_D(0x7009c, D_ALL);
+ MMIO_D(_MMIO(0x70090), D_ALL);
+ MMIO_D(_MMIO(0x70094), D_ALL);
+ MMIO_D(_MMIO(0x70098), D_ALL);
+ MMIO_D(_MMIO(0x7009c), D_ALL);
MMIO_D(DSPCNTR(PIPE_A), D_ALL);
MMIO_D(DSPADDR(PIPE_A), D_ALL);
@@ -1949,24 +1951,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(BLC_PWM_PCH_CTL1, D_ALL);
MMIO_D(BLC_PWM_PCH_CTL2, D_ALL);
- MMIO_D(0x48268, D_ALL);
+ MMIO_D(_MMIO(0x48268), D_ALL);
MMIO_F(PCH_GMBUS0, 4 * 4, 0, 0, 0, D_ALL, gmbus_mmio_read,
gmbus_mmio_write);
MMIO_F(PCH_GPIOA, 6 * 4, F_UNALIGN, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0xe4f00, 0x28, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0xe4f00), 0x28, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(_PCH_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_PCH_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
+ MMIO_F(_MMIO(_PCH_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_PRE_SKL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write);
- MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write);
- MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_MMIO(_PCH_TRANSACONF), D_ALL, NULL, transconf_mmio_write);
+ MMIO_DH(_MMIO(_PCH_TRANSBCONF), D_ALL, NULL, transconf_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_A), D_ALL, NULL, fdi_rx_iir_mmio_write);
MMIO_DH(FDI_RX_IIR(PIPE_B), D_ALL, NULL, fdi_rx_iir_mmio_write);
@@ -1978,30 +1980,30 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FDI_RX_CTL(PIPE_B), D_ALL, NULL, update_fdi_rx_iir_status);
MMIO_DH(FDI_RX_CTL(PIPE_C), D_ALL, NULL, update_fdi_rx_iir_status);
- MMIO_D(_PCH_TRANS_HTOTAL_A, D_ALL);
- MMIO_D(_PCH_TRANS_HBLANK_A, D_ALL);
- MMIO_D(_PCH_TRANS_HSYNC_A, D_ALL);
- MMIO_D(_PCH_TRANS_VTOTAL_A, D_ALL);
- MMIO_D(_PCH_TRANS_VBLANK_A, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNC_A, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNCSHIFT_A, D_ALL);
-
- MMIO_D(_PCH_TRANS_HTOTAL_B, D_ALL);
- MMIO_D(_PCH_TRANS_HBLANK_B, D_ALL);
- MMIO_D(_PCH_TRANS_HSYNC_B, D_ALL);
- MMIO_D(_PCH_TRANS_VTOTAL_B, D_ALL);
- MMIO_D(_PCH_TRANS_VBLANK_B, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNC_B, D_ALL);
- MMIO_D(_PCH_TRANS_VSYNCSHIFT_B, D_ALL);
-
- MMIO_D(_PCH_TRANSA_DATA_M1, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_N1, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_M2, D_ALL);
- MMIO_D(_PCH_TRANSA_DATA_N2, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_M1, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_N1, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_M2, D_ALL);
- MMIO_D(_PCH_TRANSA_LINK_N2, D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_A), D_ALL);
+
+ MMIO_D(_MMIO(_PCH_TRANS_HTOTAL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HBLANK_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_HSYNC_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VTOTAL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VBLANK_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNC_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANS_VSYNCSHIFT_B), D_ALL);
+
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_M2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_DATA_N2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N1), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_M2), D_ALL);
+ MMIO_D(_MMIO(_PCH_TRANSA_LINK_N2), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_A), D_ALL);
MMIO_D(TRANS_DP_CTL(PIPE_B), D_ALL);
@@ -2019,38 +2021,38 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(TVIDEO_DIP_DATA(PIPE_C), D_ALL);
MMIO_D(TVIDEO_DIP_GCP(PIPE_C), D_ALL);
- MMIO_D(_FDI_RXA_MISC, D_ALL);
- MMIO_D(_FDI_RXB_MISC, D_ALL);
- MMIO_D(_FDI_RXA_TUSIZE1, D_ALL);
- MMIO_D(_FDI_RXA_TUSIZE2, D_ALL);
- MMIO_D(_FDI_RXB_TUSIZE1, D_ALL);
- MMIO_D(_FDI_RXB_TUSIZE2, D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_MISC), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_MISC), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE1), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXA_TUSIZE2), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE1), D_ALL);
+ MMIO_D(_MMIO(_FDI_RXB_TUSIZE2), D_ALL);
MMIO_DH(PCH_PP_CONTROL, D_ALL, NULL, pch_pp_control_mmio_write);
MMIO_D(PCH_PP_DIVISOR, D_ALL);
MMIO_D(PCH_PP_STATUS, D_ALL);
MMIO_D(PCH_LVDS, D_ALL);
- MMIO_D(_PCH_DPLL_A, D_ALL);
- MMIO_D(_PCH_DPLL_B, D_ALL);
- MMIO_D(_PCH_FPA0, D_ALL);
- MMIO_D(_PCH_FPA1, D_ALL);
- MMIO_D(_PCH_FPB0, D_ALL);
- MMIO_D(_PCH_FPB1, D_ALL);
+ MMIO_D(_MMIO(_PCH_DPLL_A), D_ALL);
+ MMIO_D(_MMIO(_PCH_DPLL_B), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPA0), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPA1), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPB0), D_ALL);
+ MMIO_D(_MMIO(_PCH_FPB1), D_ALL);
MMIO_D(PCH_DREF_CONTROL, D_ALL);
MMIO_D(PCH_RAWCLK_FREQ, D_ALL);
MMIO_D(PCH_DPLL_SEL, D_ALL);
- MMIO_D(0x61208, D_ALL);
- MMIO_D(0x6120c, D_ALL);
+ MMIO_D(_MMIO(0x61208), D_ALL);
+ MMIO_D(_MMIO(0x6120c), D_ALL);
MMIO_D(PCH_PP_ON_DELAYS, D_ALL);
MMIO_D(PCH_PP_OFF_DELAYS, D_ALL);
- MMIO_DH(0xe651c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL);
- MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL);
+ MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL);
MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0,
PORTA_HOTPLUG_STATUS_MASK
@@ -2072,11 +2074,11 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SOUTH_CHICKEN1, D_ALL);
MMIO_DH(SOUTH_CHICKEN2, D_ALL, NULL, south_chicken2_mmio_write);
- MMIO_D(_TRANSA_CHICKEN1, D_ALL);
- MMIO_D(_TRANSB_CHICKEN1, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN1), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_CHICKEN1), D_ALL);
MMIO_D(SOUTH_DSPCLK_GATE_D, D_ALL);
- MMIO_D(_TRANSA_CHICKEN2, D_ALL);
- MMIO_D(_TRANSB_CHICKEN2, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL);
MMIO_D(ILK_DPFC_CB_BASE, D_ALL);
MMIO_D(ILK_DPFC_CONTROL, D_ALL);
@@ -2142,24 +2144,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(PREC_PAL_DATA(PIPE_C), D_ALL);
MMIO_F(PREC_PAL_GC_MAX(PIPE_C, 0), 4 * 3, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(0x60110, D_ALL);
- MMIO_D(0x61110, D_ALL);
- MMIO_F(0x70400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x71400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x72400, 0x40, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x70440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x71440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x72440, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7044c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7144c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_F(0x7244c, 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_D(_MMIO(0x60110), D_ALL);
+ MMIO_D(_MMIO(0x61110), D_ALL);
+ MMIO_F(_MMIO(0x70400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x71400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x72400), 0x40, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x70440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x71440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x72440), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7044c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
+ MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
- MMIO_D(_WRPLL_CTL1, D_ALL);
- MMIO_D(_WRPLL_CTL2, D_ALL);
+ MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
+ MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_A), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_B), D_ALL);
MMIO_D(PORT_CLK_SEL(PORT_C), D_ALL);
@@ -2170,15 +2172,15 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(TRANS_CLK_SEL(TRANSCODER_C), D_ALL);
MMIO_D(HSW_NDE_RSTWRN_OPT, D_ALL);
- MMIO_D(0x46508, D_ALL);
+ MMIO_D(_MMIO(0x46508), D_ALL);
- MMIO_D(0x49080, D_ALL);
- MMIO_D(0x49180, D_ALL);
- MMIO_D(0x49280, D_ALL);
+ MMIO_D(_MMIO(0x49080), D_ALL);
+ MMIO_D(_MMIO(0x49180), D_ALL);
+ MMIO_D(_MMIO(0x49280), D_ALL);
- MMIO_F(0x49090, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x49190, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x49290, 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49090), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49190), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x49290), 0x14, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GAMMA_MODE(PIPE_A), D_ALL);
MMIO_D(GAMMA_MODE(PIPE_B), D_ALL);
@@ -2198,7 +2200,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(SBI_CTL_STAT, D_ALL, NULL, sbi_ctl_mmio_write);
MMIO_D(PIXCLK_GATE, D_ALL);
- MMIO_F(_DPA_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_ALL, NULL,
+ MMIO_F(_MMIO(_DPA_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_ALL, NULL,
dp_aux_ch_ctl_mmio_write);
MMIO_DH(DDI_BUF_CTL(PORT_A), D_ALL, NULL, ddi_buf_ctl_mmio_write);
@@ -2219,24 +2221,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DP_TP_STATUS(PORT_D), D_ALL, NULL, dp_tp_status_mmio_write);
MMIO_DH(DP_TP_STATUS(PORT_E), D_ALL, NULL, NULL);
- MMIO_F(_DDI_BUF_TRANS_A, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64e60, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64eC0, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64f20, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x64f80, 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(_DDI_BUF_TRANS_A), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64e60), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64eC0), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64f20), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x64f80), 0x50, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_A, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_B, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_C, D_ALL, NULL, NULL);
- MMIO_DH(_TRANS_DDI_FUNC_CTL_EDP, D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_C), D_ALL, NULL, NULL);
+ MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_EDP), D_ALL, NULL, NULL);
- MMIO_D(_TRANSA_MSA_MISC, D_ALL);
- MMIO_D(_TRANSB_MSA_MISC, D_ALL);
- MMIO_D(_TRANSC_MSA_MISC, D_ALL);
- MMIO_D(_TRANS_EDP_MSA_MISC, D_ALL);
+ MMIO_D(_MMIO(_TRANSA_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANSB_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANSC_MSA_MISC), D_ALL);
+ MMIO_D(_MMIO(_TRANS_EDP_MSA_MISC), D_ALL);
MMIO_DH(FORCEWAKE, D_ALL, NULL, NULL);
MMIO_D(FORCEWAKE_ACK, D_ALL);
@@ -2302,101 +2304,101 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN6_UCGCTL1, D_ALL);
MMIO_D(GEN6_UCGCTL2, D_ALL);
- MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x4f000), 0x90, 0, 0, 0, D_ALL, NULL, NULL);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
- MMIO_D(0x13812c, D_ALL);
+ MMIO_D(_MMIO(0x13812c), D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
MMIO_D(HSW_EDRAM_CAP, D_ALL);
MMIO_D(HSW_IDICR, D_ALL);
MMIO_DH(GFX_FLSH_CNTL_GEN6, D_ALL, NULL, NULL);
- MMIO_D(0x3c, D_ALL);
- MMIO_D(0x860, D_ALL);
+ MMIO_D(_MMIO(0x3c), D_ALL);
+ MMIO_D(_MMIO(0x860), D_ALL);
MMIO_D(ECOSKPD, D_ALL);
- MMIO_D(0x121d0, D_ALL);
+ MMIO_D(_MMIO(0x121d0), D_ALL);
MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL);
- MMIO_D(0x41d0, D_ALL);
+ MMIO_D(_MMIO(0x41d0), D_ALL);
MMIO_D(GAC_ECO_BITS, D_ALL);
- MMIO_D(0x6200, D_ALL);
- MMIO_D(0x6204, D_ALL);
- MMIO_D(0x6208, D_ALL);
- MMIO_D(0x7118, D_ALL);
- MMIO_D(0x7180, D_ALL);
- MMIO_D(0x7408, D_ALL);
- MMIO_D(0x7c00, D_ALL);
+ MMIO_D(_MMIO(0x6200), D_ALL);
+ MMIO_D(_MMIO(0x6204), D_ALL);
+ MMIO_D(_MMIO(0x6208), D_ALL);
+ MMIO_D(_MMIO(0x7118), D_ALL);
+ MMIO_D(_MMIO(0x7180), D_ALL);
+ MMIO_D(_MMIO(0x7408), D_ALL);
+ MMIO_D(_MMIO(0x7c00), D_ALL);
MMIO_DH(GEN6_MBCTL, D_ALL, NULL, mbctl_write);
- MMIO_D(0x911c, D_ALL);
- MMIO_D(0x9120, D_ALL);
+ MMIO_D(_MMIO(0x911c), D_ALL);
+ MMIO_D(_MMIO(0x9120), D_ALL);
MMIO_DFH(GEN7_UCGCTL4, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GAB_CTL, D_ALL);
- MMIO_D(0x48800, D_ALL);
- MMIO_D(0xce044, D_ALL);
- MMIO_D(0xe6500, D_ALL);
- MMIO_D(0xe6504, D_ALL);
- MMIO_D(0xe6600, D_ALL);
- MMIO_D(0xe6604, D_ALL);
- MMIO_D(0xe6700, D_ALL);
- MMIO_D(0xe6704, D_ALL);
- MMIO_D(0xe6800, D_ALL);
- MMIO_D(0xe6804, D_ALL);
+ MMIO_D(_MMIO(0x48800), D_ALL);
+ MMIO_D(_MMIO(0xce044), D_ALL);
+ MMIO_D(_MMIO(0xe6500), D_ALL);
+ MMIO_D(_MMIO(0xe6504), D_ALL);
+ MMIO_D(_MMIO(0xe6600), D_ALL);
+ MMIO_D(_MMIO(0xe6604), D_ALL);
+ MMIO_D(_MMIO(0xe6700), D_ALL);
+ MMIO_D(_MMIO(0xe6704), D_ALL);
+ MMIO_D(_MMIO(0xe6800), D_ALL);
+ MMIO_D(_MMIO(0xe6804), D_ALL);
MMIO_D(PCH_GMBUS4, D_ALL);
MMIO_D(PCH_GMBUS5, D_ALL);
- MMIO_D(0x902c, D_ALL);
- MMIO_D(0xec008, D_ALL);
- MMIO_D(0xec00c, D_ALL);
- MMIO_D(0xec008 + 0x18, D_ALL);
- MMIO_D(0xec00c + 0x18, D_ALL);
- MMIO_D(0xec008 + 0x18 * 2, D_ALL);
- MMIO_D(0xec00c + 0x18 * 2, D_ALL);
- MMIO_D(0xec008 + 0x18 * 3, D_ALL);
- MMIO_D(0xec00c + 0x18 * 3, D_ALL);
- MMIO_D(0xec408, D_ALL);
- MMIO_D(0xec40c, D_ALL);
- MMIO_D(0xec408 + 0x18, D_ALL);
- MMIO_D(0xec40c + 0x18, D_ALL);
- MMIO_D(0xec408 + 0x18 * 2, D_ALL);
- MMIO_D(0xec40c + 0x18 * 2, D_ALL);
- MMIO_D(0xec408 + 0x18 * 3, D_ALL);
- MMIO_D(0xec40c + 0x18 * 3, D_ALL);
- MMIO_D(0xfc810, D_ALL);
- MMIO_D(0xfc81c, D_ALL);
- MMIO_D(0xfc828, D_ALL);
- MMIO_D(0xfc834, D_ALL);
- MMIO_D(0xfcc00, D_ALL);
- MMIO_D(0xfcc0c, D_ALL);
- MMIO_D(0xfcc18, D_ALL);
- MMIO_D(0xfcc24, D_ALL);
- MMIO_D(0xfd000, D_ALL);
- MMIO_D(0xfd00c, D_ALL);
- MMIO_D(0xfd018, D_ALL);
- MMIO_D(0xfd024, D_ALL);
- MMIO_D(0xfd034, D_ALL);
+ MMIO_D(_MMIO(0x902c), D_ALL);
+ MMIO_D(_MMIO(0xec008), D_ALL);
+ MMIO_D(_MMIO(0xec00c), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec008 + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec00c + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec408), D_ALL);
+ MMIO_D(_MMIO(0xec40c), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18 * 2), D_ALL);
+ MMIO_D(_MMIO(0xec408 + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xec40c + 0x18 * 3), D_ALL);
+ MMIO_D(_MMIO(0xfc810), D_ALL);
+ MMIO_D(_MMIO(0xfc81c), D_ALL);
+ MMIO_D(_MMIO(0xfc828), D_ALL);
+ MMIO_D(_MMIO(0xfc834), D_ALL);
+ MMIO_D(_MMIO(0xfcc00), D_ALL);
+ MMIO_D(_MMIO(0xfcc0c), D_ALL);
+ MMIO_D(_MMIO(0xfcc18), D_ALL);
+ MMIO_D(_MMIO(0xfcc24), D_ALL);
+ MMIO_D(_MMIO(0xfd000), D_ALL);
+ MMIO_D(_MMIO(0xfd00c), D_ALL);
+ MMIO_D(_MMIO(0xfd018), D_ALL);
+ MMIO_D(_MMIO(0xfd024), D_ALL);
+ MMIO_D(_MMIO(0xfd034), D_ALL);
MMIO_DH(FPGA_DBG, D_ALL, NULL, fpga_dbg_mmio_write);
- MMIO_D(0x2054, D_ALL);
- MMIO_D(0x12054, D_ALL);
- MMIO_D(0x22054, D_ALL);
- MMIO_D(0x1a054, D_ALL);
-
- MMIO_D(0x44070, D_ALL);
- MMIO_DFH(0x215c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
- MMIO_D(0x2b00, D_BDW_PLUS);
- MMIO_D(0x2360, D_BDW_PLUS);
- MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
-
- MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0x2054), D_ALL);
+ MMIO_D(_MMIO(0x12054), D_ALL);
+ MMIO_D(_MMIO(0x22054), D_ALL);
+ MMIO_D(_MMIO(0x1a054), D_ALL);
+
+ MMIO_D(_MMIO(0x44070), D_ALL);
+ MMIO_DFH(_MMIO(0x215c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2178), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x12178), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1217c), D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(_MMIO(0x2290), 8, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, NULL, NULL);
+ MMIO_D(_MMIO(0x2b00), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x2360), D_BDW_PLUS);
+ MMIO_F(_MMIO(0x5200), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x5240), 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(_MMIO(0x5280), 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0x1c17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1c178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
@@ -2410,24 +2412,24 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
- MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x426c, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
- MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(_MMIO(0x4260), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4264), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4268), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x426c), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DH(_MMIO(0x4270), D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
+ MMIO_DFH(_MMIO(0x4094), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
- MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2220), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x12220), D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x22220), D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x22178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2501,40 +2503,40 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
mmio_read_from_hw, NULL);
-#define RING_REG(base) (base + 0xd0)
+#define RING_REG(base) _MMIO((base) + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
#undef RING_REG
-#define RING_REG(base) (base + 0x230)
+#define RING_REG(base) _MMIO((base) + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
#undef RING_REG
-#define RING_REG(base) (base + 0x234)
+#define RING_REG(base) _MMIO((base) + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x244)
+#define RING_REG(base) _MMIO((base) + 0x244)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x370)
+#define RING_REG(base) _MMIO((base) + 0x370)
MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
-#define RING_REG(base) (base + 0x3a0)
+#define RING_REG(base) _MMIO((base) + 0x3a0)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_B), D_BDW_PLUS);
MMIO_D(PIPEMISC(PIPE_C), D_BDW_PLUS);
- MMIO_D(0x1c1d0, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x1c1d0), D_BDW_PLUS);
MMIO_D(GEN6_MBCUNIT_SNPCR, D_BDW_PLUS);
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
- MMIO_D(0x1c054, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x1c054), D_BDW_PLUS);
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
@@ -2543,7 +2545,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GAMTARBMODE, D_BDW_PLUS);
-#define RING_REG(base) (base + 0x270)
+#define RING_REG(base) _MMIO((base) + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
@@ -2556,10 +2558,10 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
- MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
+ MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
- MMIO_D(0x66c00, D_BDW_PLUS);
- MMIO_D(0x66c04, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
MMIO_D(HSW_GTT_CACHE_EN, D_BDW_PLUS);
@@ -2567,54 +2569,54 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
- MMIO_D(0xfdc, D_BDW_PLUS);
+ MMIO_D(_MMIO(0xfdc), D_BDW_PLUS);
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb1f0), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb1c0), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb110, D_BDW);
+ MMIO_DFH(_MMIO(0xb100), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xb10c), D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0xb110), D_BDW);
- MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
+ MMIO_F(_MMIO(0x24d0), 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
NULL, force_nonpriv_write);
- MMIO_D(0x44484, D_BDW_PLUS);
- MMIO_D(0x4448c, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x44484), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x4448c), D_BDW_PLUS);
- MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x83a4), D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
- MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x8430), D_BDW, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x110000, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x110000), D_BDW_PLUS);
- MMIO_D(0x48400, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x48400), D_BDW_PLUS);
- MMIO_D(0x6e570, D_BDW_PLUS);
- MMIO_D(0x65f10, D_BDW_PLUS);
+ MMIO_D(_MMIO(0x6e570), D_BDW_PLUS);
+ MMIO_D(_MMIO(0x65f10), D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe194), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe188), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x2580), D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0x2248), D_BDW, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(_MMIO(0xe220), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe230), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe240), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe260), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe270), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe280), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2630,11 +2632,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
dp_aux_ch_ctl_mmio_write);
/*
@@ -2645,26 +2647,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
skl_power_well_ctl_write);
- MMIO_D(0xa210, D_SKL_PLUS);
+ MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
- MMIO_D(0x45504, D_SKL_PLUS);
- MMIO_D(0x45520, D_SKL_PLUS);
- MMIO_D(0x46000, D_SKL_PLUS);
- MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
- MMIO_D(0x6C040, D_SKL | D_KBL);
- MMIO_D(0x6C048, D_SKL | D_KBL);
- MMIO_D(0x6C050, D_SKL | D_KBL);
- MMIO_D(0x6C044, D_SKL | D_KBL);
- MMIO_D(0x6C04C, D_SKL | D_KBL);
- MMIO_D(0x6C054, D_SKL | D_KBL);
- MMIO_D(0x6c058, D_SKL | D_KBL);
- MMIO_D(0x6c05c, D_SKL | D_KBL);
- MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
+ MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
+ MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
+ MMIO_DH(_MMIO(0x46010), D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_DH(_MMIO(0x46014), D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(0x6C040), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C048), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C050), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C044), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C04C), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6C054), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c058), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c05c), D_SKL | D_KBL);
+ MMIO_DH(_MMIO(0x6c060), D_SKL | D_KBL, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2753,105 +2755,105 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(0x70380, D_SKL_PLUS);
- MMIO_D(0x71380, D_SKL_PLUS);
- MMIO_D(0x72380, D_SKL_PLUS);
- MMIO_D(0x7039c, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
- MMIO_D(0x8f074, D_SKL | D_KBL);
- MMIO_D(0x8f004, D_SKL | D_KBL);
- MMIO_D(0x8f034, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f004), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x8f034), D_SKL | D_KBL);
- MMIO_D(0xb11c, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb11c), D_SKL | D_KBL);
- MMIO_D(0x51000, D_SKL | D_KBL);
- MMIO_D(0x6c00c, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x51000), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
- MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
- MMIO_D(0xd08, D_SKL_PLUS);
- MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
+ MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(_MMIO(0x4de0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4de8), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4dec), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df0), D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_MMIO(0x4df4), D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
+ MMIO_DH(_MMIO(0x4dfc), D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
- MMIO_D(0x45008, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x45008), D_SKL | D_KBL);
- MMIO_D(0x46430, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46430), D_SKL | D_KBL);
- MMIO_D(0x46520, D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x46520), D_SKL | D_KBL);
- MMIO_D(0xc403c, D_SKL | D_KBL);
- MMIO_D(0xb004, D_SKL_PLUS);
+ MMIO_D(_MMIO(0xc403c), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(0x65900, D_SKL_PLUS);
- MMIO_D(0x1082c0, D_SKL | D_KBL);
- MMIO_D(0x4068, D_SKL | D_KBL);
- MMIO_D(0x67054, D_SKL | D_KBL);
- MMIO_D(0x6e560, D_SKL | D_KBL);
- MMIO_D(0x6e554, D_SKL | D_KBL);
- MMIO_D(0x2b20, D_SKL | D_KBL);
- MMIO_D(0x65f00, D_SKL | D_KBL);
- MMIO_D(0x65f08, D_SKL | D_KBL);
- MMIO_D(0x320f0, D_SKL | D_KBL);
-
- MMIO_D(0x70034, D_SKL_PLUS);
- MMIO_D(0x71034, D_SKL_PLUS);
- MMIO_D(0x72034, D_SKL_PLUS);
-
- MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
-
- MMIO_D(0x44500, D_SKL_PLUS);
+ MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x1082c0), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x4068), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x67054), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6e560), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x6e554), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x2b20), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x65f00), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x65f08), D_SKL | D_KBL);
+ MMIO_D(_MMIO(0x320f0), D_SKL | D_KBL);
+
+ MMIO_D(_MMIO(0x70034), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x71034), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x72034), D_SKL_PLUS);
+
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
+
+ MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(0x4ab8, D_KBL);
- MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
+ MMIO_D(_MMIO(0x4ab8), D_KBL);
+ MMIO_D(_MMIO(0x2248), D_SKL_PLUS | D_KBL);
return 0;
}
@@ -2867,8 +2869,8 @@ static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
for (i = 0; i < num; i++, block++) {
if (!(device & block->device))
continue;
- if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
- offset < INTEL_GVT_MMIO_OFFSET(block->offset) + block->size)
+ if (offset >= i915_mmio_reg_offset(block->offset) &&
+ offset < i915_mmio_reg_offset(block->offset) + block->size)
return block;
}
return NULL;
@@ -2980,8 +2982,8 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) {
for (j = 0; j < block->size; j += 4) {
ret = handler(gvt,
- INTEL_GVT_MMIO_OFFSET(block->offset) + j,
- data);
+ i915_mmio_reg_offset(block->offset) + j,
+ data);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index df7f33abd393..a1bd82feb827 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -55,6 +55,9 @@ struct intel_gvt_mpt {
unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map);
+ int (*set_opregion)(void *vgpu);
+ int (*get_vfio_device)(void *vgpu);
+ void (*put_vfio_device)(void *vgpu);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 110f07e8bcfb..45bab5a6290b 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -53,11 +53,23 @@ static const struct intel_gvt_ops *intel_gvt_ops;
#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+
+struct vfio_region;
+struct intel_vgpu_regops {
+ size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite);
+ void (*release)(struct intel_vgpu *vgpu,
+ struct vfio_region *region);
+};
+
struct vfio_region {
u32 type;
u32 subtype;
size_t size;
u32 flags;
+ const struct intel_vgpu_regops *ops;
+ void *data;
};
struct kvmgt_pgfn {
@@ -316,6 +328,108 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
}
}
+static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
+ size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
+ VFIO_PCI_NUM_REGIONS;
+ void *base = vgpu->vdev.region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos >= vgpu->vdev.region[i].size || iswrite) {
+ gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
+ return -EINVAL;
+ }
+ count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ memcpy(buf, base + pos, count);
+
+ return count;
+}
+
+static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
+ struct vfio_region *region)
+{
+}
+
+static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
+ .rw = intel_vgpu_reg_rw_opregion,
+ .release = intel_vgpu_reg_release_opregion,
+};
+
+static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
+ unsigned int type, unsigned int subtype,
+ const struct intel_vgpu_regops *ops,
+ size_t size, u32 flags, void *data)
+{
+ struct vfio_region *region;
+
+ region = krealloc(vgpu->vdev.region,
+ (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ vgpu->vdev.region = region;
+ vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
+ vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
+ vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
+ vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
+ vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
+ vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
+ vgpu->vdev.num_regions++;
+ return 0;
+}
+
+static int kvmgt_get_vfio_device(void *p_vgpu)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ vgpu->vdev.vfio_device = vfio_device_get_from_dev(
+ mdev_dev(vgpu->vdev.mdev));
+ if (!vgpu->vdev.vfio_device) {
+ gvt_vgpu_err("failed to get vfio device\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+static int kvmgt_set_opregion(void *p_vgpu)
+{
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ void *base;
+ int ret;
+
+ /* Each vgpu has its own opregion, although VFIO would create another
+ * one later. This one is used to expose opregion to VFIO. And the
+ * other one created by VFIO later, is used by guest actually.
+ */
+ base = vgpu_opregion(vgpu)->va;
+ if (!base)
+ return -ENOMEM;
+
+ if (memcmp(base, OPREGION_SIGNATURE, 16)) {
+ memunmap(base);
+ return -EINVAL;
+ }
+
+ ret = intel_vgpu_register_reg(vgpu,
+ PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
+ &intel_vgpu_regops_opregion, OPREGION_SIZE,
+ VFIO_REGION_INFO_FLAG_READ, base);
+
+ return ret;
+}
+
+static void kvmgt_put_vfio_device(void *vgpu)
+{
+ if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+ return;
+
+ vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+}
+
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = NULL;
@@ -546,7 +660,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -574,8 +688,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_BAR5_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX:
case VFIO_PCI_ROM_REGION_INDEX:
+ break;
default:
- gvt_vgpu_err("unsupported region: %u\n", index);
+ if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+ return -EINVAL;
+
+ index -= VFIO_PCI_NUM_REGIONS;
+ return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ ppos, is_write);
}
return ret == 0 ? count : ret;
@@ -838,7 +958,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
- info.num_regions = VFIO_PCI_NUM_REGIONS;
+ info.num_regions = VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -908,13 +1029,17 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = 0;
-
info.flags = 0;
+
gvt_dbg_core("get region info bar:%d\n", info.index);
break;
case VFIO_PCI_ROM_REGION_INDEX:
case VFIO_PCI_VGA_REGION_INDEX:
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.size = 0;
+ info.flags = 0;
+
gvt_dbg_core("get region info index:%d\n", info.index);
break;
default:
@@ -959,6 +1084,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
if (caps.size) {
+ info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size;
info.cap_offset = 0;
@@ -1045,6 +1171,33 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_RESET) {
intel_gvt_ops->vgpu_reset(vgpu);
return 0;
+ } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
+ struct vfio_device_gfx_plane_info dmabuf;
+ int ret = 0;
+
+ minsz = offsetofend(struct vfio_device_gfx_plane_info,
+ dmabuf_id);
+ if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
+ return -EFAULT;
+ if (dmabuf.argsz < minsz)
+ return -EINVAL;
+
+ ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
+ if (ret != 0)
+ return ret;
+
+ return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
+ -EFAULT : 0;
+ } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
+ __u32 dmabuf_id;
+ __s32 dmabuf_fd;
+
+ if (get_user(dmabuf_id, (__u32 __user *)arg))
+ return -EFAULT;
+
+ dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
+ return dmabuf_fd;
+
}
return 0;
@@ -1207,8 +1360,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct kvmgt_guest_info, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
- intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
- (void *)val, len);
+ intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
+ (void *)val, len);
}
static void kvmgt_page_track_flush_slot(struct kvm *kvm,
@@ -1286,6 +1439,9 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
+ mutex_init(&vgpu->dmabuf_lock);
+ init_completion(&vgpu->vblank_done);
+
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1426,6 +1582,9 @@ struct intel_gvt_mpt kvmgt_mpt = {
.read_gpa = kvmgt_read_gpa,
.write_gpa = kvmgt_write_gpa,
.gfn_to_mfn = kvmgt_gfn_to_pfn,
+ .set_opregion = kvmgt_set_opregion,
+ .get_vfio_device = kvmgt_get_vfio_device,
+ .put_vfio_device = kvmgt_put_vfio_device,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 4ea0feb5f04d..562b5ad857a4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else
memcpy(pt, p_data, bytes);
- } else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
- struct intel_vgpu_page_track *t;
-
- /* Since we enter the failsafe mode early during guest boot,
- * guest may not have chance to set up its ppgtt table, so
- * there should not be any wp pages for guest. Keep the wp
- * related code here in case we need to handle it in furture.
- */
- t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
- if (t) {
- /* remove write protection to prevent furture traps */
- intel_vgpu_clean_page_track(vgpu, t);
- if (read)
- intel_gvt_hypervisor_read_gpa(vgpu, pa,
- p_data, bytes);
- else
- intel_gvt_hypervisor_write_gpa(vgpu, pa,
- p_data, bytes);
- }
}
mutex_unlock(&gvt->lock);
}
@@ -157,7 +138,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0;
int ret = -EINVAL;
-
if (vgpu->failsafe) {
failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
return 0;
@@ -166,26 +146,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
- mutex_unlock(&gvt->lock);
- return ret;
- }
-
- if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
- struct intel_vgpu_page_track *t;
-
- t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
- if (t) {
- ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
- p_data, bytes);
- if (ret) {
- gvt_vgpu_err("guest page read error %d, "
- "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
- ret, t->gfn, pa, *(u32 *)p_data,
- bytes);
- }
- mutex_unlock(&gvt->lock);
- return ret;
- }
+ goto out;
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -205,14 +166,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
p_data, bytes);
if (ret)
goto err;
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
@@ -228,11 +187,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
- mutex_unlock(&gvt->lock);
- return 0;
+ ret = 0;
+ goto out;
+
err:
gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
offset, bytes);
+out:
mutex_unlock(&gvt->lock);
return ret;
}
@@ -263,26 +224,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
- mutex_unlock(&gvt->lock);
- return ret;
- }
-
- if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
- struct intel_vgpu_page_track *t;
-
- t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
- if (t) {
- ret = t->handler(t, pa, p_data, bytes);
- if (ret) {
- gvt_err("guest page write error %d, "
- "gfn 0x%lx, pa 0x%llx, "
- "var 0x%x, len %d\n",
- ret, t->gfn, pa,
- *(u32 *)p_data, bytes);
- }
- mutex_unlock(&gvt->lock);
- return ret;
- }
+ goto out;
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
@@ -302,14 +244,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
p_data, bytes);
if (ret)
goto err;
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
- mutex_unlock(&gvt->lock);
- return ret;
+ goto out;
}
ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
@@ -317,11 +257,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
intel_gvt_mmio_set_accessed(gvt, offset);
- mutex_unlock(&gvt->lock);
- return 0;
+ ret = 0;
+ goto out;
err:
gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
bytes);
+out:
mutex_unlock(&gvt->lock);
return ret;
}
@@ -342,10 +283,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
- vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+ vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */
- vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+ vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
vgpu->mmio.disable_warn_untrack = false;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 62709ac351cd..71b620875943 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -76,13 +76,6 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data);
-
-#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
- typeof(reg) __reg = reg; \
- u32 *offset = (u32 *)&__reg; \
- *offset; \
-})
-
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
new file mode 100644
index 000000000000..74834395dd89
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Eddie Dong <eddie.dong@intel.com>
+ * Kevin Tian <kevin.tian@intel.com>
+ *
+ * Contributors:
+ * Zhi Wang <zhi.a.wang@intel.com>
+ * Changbin Du <changbin.du@intel.com>
+ * Zhenyu Wang <zhenyuw@linux.intel.com>
+ * Tina Zhang <tina.zhang@intel.com>
+ * Bing Niu <bing.niu@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+#include "gvt.h"
+#include "trace.h"
+
+/**
+ * Defined in Intel Open Source PRM.
+ * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
+ */
+#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
+#define TRNULLDETCT _MMIO(0x4de8)
+#define TRINVTILEDETCT _MMIO(0x4dec)
+#define TRVADR _MMIO(0x4df0)
+#define TRTTE _MMIO(0x4df4)
+#define RING_EXCC(base) _MMIO((base) + 0x28)
+#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
+#define VF_GUARDBAND _MMIO(0x83a4)
+
+/* Raw offset is appened to each line for convenience. */
+static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
+ {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+ { /* Terminated */ }
+};
+
+static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
+ {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+ {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+ {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
+ {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+ {RCS, TRVADR, 0, false}, /* 0x4df0 */
+ {RCS, TRTTE, 0, false}, /* 0x4df4 */
+
+ {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
+
+ {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+
+ {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ { /* Terminated */ }
+};
+
+static struct {
+ bool initialized;
+ u32 control_table[I915_NUM_ENGINES][64];
+ u32 l3cc_table[32];
+} gen9_render_mocs;
+
+static void load_render_mocs(struct drm_i915_private *dev_priv)
+{
+ i915_reg_t offset;
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int ring_id, i;
+
+ for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ gen9_render_mocs.control_table[ring_id][i] =
+ I915_READ_FW(offset);
+ offset.reg += 4;
+ }
+ }
+
+ offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ gen9_render_mocs.l3cc_table[i] =
+ I915_READ_FW(offset);
+ offset.reg += 4;
+ }
+ gen9_render_mocs.initialized = true;
+}
+
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ enum forcewake_domains fw;
+ i915_reg_t reg;
+ u32 regs[] = {
+ [RCS] = 0x4260,
+ [VCS] = 0x4264,
+ [VCS2] = 0x4268,
+ [BCS] = 0x426c,
+ [VECS] = 0x4270,
+ };
+
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+ return;
+
+ reg = _MMIO(regs[ring_id]);
+
+ /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
+ * we need to put a forcewake when invalidating RCS TLB caches,
+ * otherwise device can go to RC6 state and interrupt invalidation
+ * process
+ */
+ fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ FW_REG_READ | FW_REG_WRITE);
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
+ fw |= FORCEWAKE_RENDER;
+
+ intel_uncore_forcewake_get(dev_priv, fw);
+
+ I915_WRITE_FW(reg, 0x1);
+
+ if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ else
+ vgpu_vreg_t(vgpu, reg) = 0;
+
+ intel_uncore_forcewake_put(dev_priv, fw);
+
+ gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+}
+
+static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
+ int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+ i915_reg_t offset, l3_offset;
+ u32 old_v, new_v;
+
+ u32 regs[] = {
+ [RCS] = 0xc800,
+ [VCS] = 0xc900,
+ [VCS2] = 0xca00,
+ [BCS] = 0xcc00,
+ [VECS] = 0xcb00,
+ };
+ int i;
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+ if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ return;
+
+ if (!pre && !gen9_render_mocs.initialized)
+ load_render_mocs(dev_priv);
+
+ offset.reg = regs[ring_id];
+ for (i = 0; i < 64; i++) {
+ if (pre)
+ old_v = vgpu_vreg_t(pre, offset);
+ else
+ old_v = gen9_render_mocs.control_table[ring_id][i];
+ if (next)
+ new_v = vgpu_vreg_t(next, offset);
+ else
+ new_v = gen9_render_mocs.control_table[ring_id][i];
+
+ if (old_v != new_v)
+ I915_WRITE_FW(offset, new_v);
+
+ offset.reg += 4;
+ }
+
+ if (ring_id == RCS) {
+ l3_offset.reg = 0xb020;
+ for (i = 0; i < 32; i++) {
+ if (pre)
+ old_v = vgpu_vreg_t(pre, l3_offset);
+ else
+ old_v = gen9_render_mocs.l3cc_table[i];
+ if (next)
+ new_v = vgpu_vreg_t(next, l3_offset);
+ else
+ new_v = gen9_render_mocs.l3cc_table[i];
+
+ if (old_v != new_v)
+ I915_WRITE_FW(l3_offset, new_v);
+
+ l3_offset.reg += 4;
+ }
+ }
+}
+
+#define CTX_CONTEXT_CONTROL_VAL 0x03
+
+/* Switch ring mmio values (context). */
+static void switch_mmio(struct intel_vgpu *pre,
+ struct intel_vgpu *next,
+ int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+ struct intel_vgpu_submission *s;
+ u32 *reg_state, ctx_ctrl;
+ u32 inhibit_mask =
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ struct engine_mmio *mmio;
+ u32 old_v, new_v;
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ switch_mocs(pre, next, ring_id);
+
+ mmio = dev_priv->gvt->engine_mmio_list;
+ while (i915_mmio_reg_offset((mmio++)->reg)) {
+ if (mmio->ring_id != ring_id)
+ continue;
+ // save
+ if (pre) {
+ vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+ if (mmio->mask)
+ vgpu_vreg_t(pre, mmio->reg) &=
+ ~(mmio->mask << 16);
+ old_v = vgpu_vreg_t(pre, mmio->reg);
+ } else
+ old_v = mmio->value = I915_READ_FW(mmio->reg);
+
+ // restore
+ if (next) {
+ s = &next->submission;
+ reg_state =
+ s->shadow_ctx->engine[ring_id].lrc_reg_state;
+ ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+ /*
+ * if it is an inhibit context, load in_context mmio
+ * into HW by mmio write. If it is not, skip this mmio
+ * write.
+ */
+ if (mmio->in_context &&
+ (ctx_ctrl & inhibit_mask) != inhibit_mask)
+ continue;
+
+ if (mmio->mask)
+ new_v = vgpu_vreg_t(next, mmio->reg) |
+ (mmio->mask << 16);
+ else
+ new_v = vgpu_vreg_t(next, mmio->reg);
+ } else {
+ if (mmio->in_context)
+ continue;
+ if (mmio->mask)
+ new_v = mmio->value | (mmio->mask << 16);
+ else
+ new_v = mmio->value;
+ }
+
+ I915_WRITE_FW(mmio->reg, new_v);
+
+ trace_render_mmio(pre ? pre->id : 0,
+ next ? next->id : 0,
+ "switch",
+ i915_mmio_reg_offset(mmio->reg),
+ old_v, new_v);
+ }
+
+ if (next)
+ handle_tlb_pending_event(next, ring_id);
+}
+
+/**
+ * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * @pre: the last vGPU that own the engine
+ * @next: the vGPU to switch to
+ * @ring_id: specify the engine
+ *
+ * If pre is null indicates that host own the engine. If next is null
+ * indicates that we are switching to host workload.
+ */
+void intel_gvt_switch_mmio(struct intel_vgpu *pre,
+ struct intel_vgpu *next, int ring_id)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (WARN_ON(!pre && !next))
+ return;
+
+ gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
+
+ dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
+
+ /**
+ * We are using raw mmio access wrapper to improve the
+ * performace for batch mmio read/write, so we need
+ * handle forcewake mannually.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ switch_mmio(pre, next, ring_id);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+}
+
+/**
+ * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
+ * @gvt: GVT device
+ *
+ */
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
+{
+ if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
+ gvt->engine_mmio_list = gen9_engine_mmio_list;
+ else
+ gvt->engine_mmio_list = gen8_engine_mmio_list;
+}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 91db1d39d28f..ca2c6a745673 100644
--- a/drivers/gpu/drm/i915/gvt/render.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -36,8 +36,17 @@
#ifndef __GVT_RENDER_H__
#define __GVT_RENDER_H__
+struct engine_mmio {
+ int ring_id;
+ i915_reg_t reg;
+ u32 mask;
+ bool in_context;
+ u32 value;
+};
+
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, int ring_id);
+void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index c436e20ea59e..ca8005a6d5fa 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -294,4 +294,49 @@ static inline int intel_gvt_hypervisor_set_trap_area(
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
}
+/**
+ * intel_gvt_hypervisor_set_opregion - Set opregion for guest
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_set_opregion(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->set_opregion)
+ return 0;
+
+ return intel_gvt_host.mpt->set_opregion(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_get_vfio_device - increase vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline int intel_gvt_hypervisor_get_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->get_vfio_device)
+ return 0;
+
+ return intel_gvt_host.mpt->get_vfio_device(vgpu);
+}
+
+/**
+ * intel_gvt_hypervisor_put_vfio_device - decrease vfio device ref count
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
+{
+ if (!intel_gvt_host.mpt->put_vfio_device)
+ return;
+
+ intel_gvt_host.mpt->put_vfio_device(vgpu);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 80720e59723a..8420d1fc3ddb 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -213,11 +213,20 @@ static void virt_vbt_generation(struct vbt *v)
v->driver_features.lvds_config = BDB_DRIVER_FEATURE_NO_LVDS;
}
-static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
+/**
+ * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_init_opregion(struct intel_vgpu *vgpu)
{
u8 *buf;
struct opregion_header *header;
struct vbt v;
+ const char opregion_signature[16] = OPREGION_SIGNATURE;
gvt_dbg_core("init vgpu%d opregion\n", vgpu->id);
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
@@ -231,8 +240,8 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
/* emulated opregion with VBT mailbox only */
buf = (u8 *)vgpu_opregion(vgpu)->va;
header = (struct opregion_header *)buf;
- memcpy(header->signature, OPREGION_SIGNATURE,
- sizeof(OPREGION_SIGNATURE));
+ memcpy(header->signature, opregion_signature,
+ sizeof(opregion_signature));
header->size = 0x8;
header->opregion_ver = 0x02000000;
header->mboxes = MBOX_VBT;
@@ -250,25 +259,6 @@ static int alloc_and_init_virt_opregion(struct intel_vgpu *vgpu)
return 0;
}
-static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
-{
- int i, ret;
-
- if (WARN((vgpu_opregion(vgpu)->va),
- "vgpu%d: opregion has been initialized already.\n",
- vgpu->id))
- return -EINVAL;
-
- ret = alloc_and_init_virt_opregion(vgpu);
- if (ret < 0)
- return ret;
-
- for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
- vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
-
- return 0;
-}
-
static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
{
u64 mfn;
@@ -290,59 +280,91 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
return ret;
}
}
+
+ vgpu_opregion(vgpu)->mapped = map;
+
return 0;
}
/**
- * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
+ * intel_vgpu_opregion_base_write_handler - Opregion base register write handler
+ *
* @vgpu: a vGPU
+ * @gpa: guest physical address of opregion
*
+ * Returns:
+ * Zero on success, negative error code if failed.
*/
-void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
+int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
- gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
- if (!vgpu_opregion(vgpu)->va)
- return;
+ int i, ret = 0;
+ unsigned long pfn;
- if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- map_vgpu_opregion(vgpu, false);
- free_pages((unsigned long)vgpu_opregion(vgpu)->va,
- get_order(INTEL_GVT_OPREGION_SIZE));
+ gvt_dbg_core("emulate opregion from kernel\n");
+
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_KVM:
+ pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
+ vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
+ INTEL_GVT_OPREGION_SIZE,
+ MEMREMAP_WB);
+ if (!vgpu_opregion(vgpu)->va_gopregion) {
+ gvt_vgpu_err("failed to map guest opregion\n");
+ ret = -EFAULT;
+ }
+ vgpu_opregion(vgpu)->mapped = true;
+ break;
+ case INTEL_GVT_HYPERVISOR_XEN:
+ /**
+ * Wins guest on Xengt will write this register twice: xen
+ * hvmloader and windows graphic driver.
+ */
+ if (vgpu_opregion(vgpu)->mapped)
+ map_vgpu_opregion(vgpu, false);
+
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
- vgpu_opregion(vgpu)->va = NULL;
+ ret = map_vgpu_opregion(vgpu, true);
+ break;
+ default:
+ ret = -EINVAL;
+ gvt_vgpu_err("not supported hypervisor\n");
}
+
+ return ret;
}
/**
- * intel_vgpu_init_opregion - initialize the stuff used to emulate opregion
+ * intel_vgpu_clean_opregion - clean the stuff used to emulate opregion
* @vgpu: a vGPU
- * @gpa: guest physical address of opregion
*
- * Returns:
- * Zero on success, negative error code if failed.
*/
-int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
+void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{
- int ret;
+ gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
- gvt_dbg_core("vgpu%d: init vgpu opregion\n", vgpu->id);
+ if (!vgpu_opregion(vgpu)->va)
+ return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
- gvt_dbg_core("emulate opregion from kernel\n");
-
- ret = init_vgpu_opregion(vgpu, gpa);
- if (ret)
- return ret;
-
- ret = map_vgpu_opregion(vgpu, true);
- if (ret)
- return ret;
+ if (vgpu_opregion(vgpu)->mapped)
+ map_vgpu_opregion(vgpu, false);
+ } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
+ if (vgpu_opregion(vgpu)->mapped) {
+ memunmap(vgpu_opregion(vgpu)->va_gopregion);
+ vgpu_opregion(vgpu)->va_gopregion = NULL;
+ }
}
+ free_pages((unsigned long)vgpu_opregion(vgpu)->va,
+ get_order(INTEL_GVT_OPREGION_SIZE));
+
+ vgpu_opregion(vgpu)->va = NULL;
- return 0;
}
+
#define GVT_OPREGION_FUNC(scic) \
({ \
u32 __ret; \
@@ -461,8 +483,21 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
u32 *scic, *parm;
u32 func, subfunc;
- scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
- parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_XEN:
+ scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+ break;
+ case INTEL_GVT_HYPERVISOR_KVM:
+ scic = vgpu_opregion(vgpu)->va_gopregion +
+ INTEL_GVT_OPREGION_SCIC;
+ parm = vgpu_opregion(vgpu)->va_gopregion +
+ INTEL_GVT_OPREGION_PARM;
+ break;
+ default:
+ gvt_vgpu_err("not supported hypervisor\n");
+ return -EINVAL;
+ }
if (!(swsci & SWSCI_SCI_SELECT)) {
gvt_vgpu_err("requesting SMI service\n");
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
deleted file mode 100644
index dac12c25f349..000000000000
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- * Authors:
- * Eddie Dong <eddie.dong@intel.com>
- * Kevin Tian <kevin.tian@intel.com>
- *
- * Contributors:
- * Zhi Wang <zhi.a.wang@intel.com>
- * Changbin Du <changbin.du@intel.com>
- * Zhenyu Wang <zhenyuw@linux.intel.com>
- * Tina Zhang <tina.zhang@intel.com>
- * Bing Niu <bing.niu@intel.com>
- *
- */
-
-#include "i915_drv.h"
-#include "gvt.h"
-#include "trace.h"
-
-struct render_mmio {
- int ring_id;
- i915_reg_t reg;
- u32 mask;
- bool in_context;
- u32 value;
-};
-
-static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
- {RCS, _MMIO(0x229c), 0xffff, false},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x2098), 0x0, false},
- {RCS, _MMIO(0x20c0), 0xffff, true},
- {RCS, _MMIO(0x24d0), 0, false},
- {RCS, _MMIO(0x24d4), 0, false},
- {RCS, _MMIO(0x24d8), 0, false},
- {RCS, _MMIO(0x24dc), 0, false},
- {RCS, _MMIO(0x24e0), 0, false},
- {RCS, _MMIO(0x24e4), 0, false},
- {RCS, _MMIO(0x24e8), 0, false},
- {RCS, _MMIO(0x24ec), 0, false},
- {RCS, _MMIO(0x24f0), 0, false},
- {RCS, _MMIO(0x24f4), 0, false},
- {RCS, _MMIO(0x24f8), 0, false},
- {RCS, _MMIO(0x24fc), 0, false},
- {RCS, _MMIO(0x7004), 0xffff, true},
- {RCS, _MMIO(0x7008), 0xffff, true},
- {RCS, _MMIO(0x7000), 0xffff, true},
- {RCS, _MMIO(0x7010), 0xffff, true},
- {RCS, _MMIO(0x7300), 0xffff, true},
- {RCS, _MMIO(0x83a4), 0xffff, true},
-
- {BCS, _MMIO(0x2229c), 0xffff, false},
- {BCS, _MMIO(0x2209c), 0xffff, false},
- {BCS, _MMIO(0x220c0), 0xffff, false},
- {BCS, _MMIO(0x22098), 0x0, false},
- {BCS, _MMIO(0x22028), 0x0, false},
-};
-
-static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
- {RCS, _MMIO(0x229c), 0xffff, false},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x2098), 0x0, false},
- {RCS, _MMIO(0x20c0), 0xffff, true},
- {RCS, _MMIO(0x24d0), 0, false},
- {RCS, _MMIO(0x24d4), 0, false},
- {RCS, _MMIO(0x24d8), 0, false},
- {RCS, _MMIO(0x24dc), 0, false},
- {RCS, _MMIO(0x24e0), 0, false},
- {RCS, _MMIO(0x24e4), 0, false},
- {RCS, _MMIO(0x24e8), 0, false},
- {RCS, _MMIO(0x24ec), 0, false},
- {RCS, _MMIO(0x24f0), 0, false},
- {RCS, _MMIO(0x24f4), 0, false},
- {RCS, _MMIO(0x24f8), 0, false},
- {RCS, _MMIO(0x24fc), 0, false},
- {RCS, _MMIO(0x7004), 0xffff, true},
- {RCS, _MMIO(0x7008), 0xffff, true},
- {RCS, _MMIO(0x7000), 0xffff, true},
- {RCS, _MMIO(0x7010), 0xffff, true},
- {RCS, _MMIO(0x7300), 0xffff, true},
- {RCS, _MMIO(0x83a4), 0xffff, true},
-
- {RCS, _MMIO(0x40e0), 0, false},
- {RCS, _MMIO(0x40e4), 0, false},
- {RCS, _MMIO(0x2580), 0xffff, true},
- {RCS, _MMIO(0x7014), 0xffff, true},
- {RCS, _MMIO(0x20ec), 0xffff, false},
- {RCS, _MMIO(0xb118), 0, false},
- {RCS, _MMIO(0xe100), 0xffff, true},
- {RCS, _MMIO(0xe180), 0xffff, true},
- {RCS, _MMIO(0xe184), 0xffff, true},
- {RCS, _MMIO(0xe188), 0xffff, true},
- {RCS, _MMIO(0xe194), 0xffff, true},
- {RCS, _MMIO(0x4de0), 0, false},
- {RCS, _MMIO(0x4de4), 0, false},
- {RCS, _MMIO(0x4de8), 0, false},
- {RCS, _MMIO(0x4dec), 0, false},
- {RCS, _MMIO(0x4df0), 0, false},
- {RCS, _MMIO(0x4df4), 0, false},
-
- {BCS, _MMIO(0x2229c), 0xffff, false},
- {BCS, _MMIO(0x2209c), 0xffff, false},
- {BCS, _MMIO(0x220c0), 0xffff, false},
- {BCS, _MMIO(0x22098), 0x0, false},
- {BCS, _MMIO(0x22028), 0x0, false},
-
- {VCS2, _MMIO(0x1c028), 0xffff, false},
-
- {VECS, _MMIO(0x1a028), 0xffff, false},
-
- {RCS, _MMIO(0x7304), 0xffff, true},
- {RCS, _MMIO(0x2248), 0x0, false},
- {RCS, _MMIO(0x940c), 0x0, false},
- {RCS, _MMIO(0x4ab8), 0x0, false},
-
- {RCS, _MMIO(0x4ab0), 0x0, false},
- {RCS, _MMIO(0x20d4), 0x0, false},
-
- {RCS, _MMIO(0xb004), 0x0, false},
- {RCS, _MMIO(0x20a0), 0x0, false},
- {RCS, _MMIO(0x20e4), 0xffff, false},
-};
-
-static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
-static u32 gen9_render_mocs_L3[32];
-
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_vgpu_submission *s = &vgpu->submission;
- enum forcewake_domains fw;
- i915_reg_t reg;
- u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
- };
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
- return;
-
- reg = _MMIO(regs[ring_id]);
-
- /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
- * we need to put a forcewake when invalidating RCS TLB caches,
- * otherwise device can go to RC6 state and interrupt invalidation
- * process
- */
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
- FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
- fw |= FORCEWAKE_RENDER;
-
- intel_uncore_forcewake_get(dev_priv, fw);
-
- I915_WRITE_FW(reg, 0x1);
-
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
- gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
- else
- vgpu_vreg(vgpu, regs[ring_id]) = 0;
-
- intel_uncore_forcewake_put(dev_priv, fw);
-
- gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
-}
-
-static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- i915_reg_t offset, l3_offset;
- u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
- };
- int i;
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
- gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
- I915_WRITE_FW(offset, vgpu_vreg(vgpu, offset));
- offset.reg += 4;
- }
-
- if (ring_id == RCS) {
- l3_offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
- gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
- I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
- l3_offset.reg += 4;
- }
- }
-}
-
-static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- i915_reg_t offset, l3_offset;
- u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
- };
- int i;
-
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
- return;
-
- offset.reg = regs[ring_id];
- for (i = 0; i < 64; i++) {
- vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
- I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
- offset.reg += 4;
- }
-
- if (ring_id == RCS) {
- l3_offset.reg = 0xb020;
- for (i = 0; i < 32; i++) {
- vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
- I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
- l3_offset.reg += 4;
- }
- }
-}
-
-#define CTX_CONTEXT_CONTROL_VAL 0x03
-
-/* Switch ring mmio values (context) from host to a vgpu. */
-static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_vgpu_submission *s = &vgpu->submission;
- u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
- u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
- u32 inhibit_mask =
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- i915_reg_t last_reg = _MMIO(0);
- struct render_mmio *mmio;
- u32 v;
- int i, array_size;
-
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
- load_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
-
- for (i = 0; i < array_size; i++, mmio++) {
- if (mmio->ring_id != ring_id)
- continue;
-
- mmio->value = I915_READ_FW(mmio->reg);
-
- /*
- * if it is an inhibit context, load in_context mmio
- * into HW by mmio write. If it is not, skip this mmio
- * write.
- */
- if (mmio->in_context &&
- (ctx_ctrl & inhibit_mask) != inhibit_mask)
- continue;
-
- if (mmio->mask)
- v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
- else
- v = vgpu_vreg(vgpu, mmio->reg);
-
- I915_WRITE_FW(mmio->reg, v);
- last_reg = mmio->reg;
-
- trace_render_mmio(vgpu->id, "load",
- i915_mmio_reg_offset(mmio->reg),
- mmio->value, v);
- }
-
- /* Make sure the swiched MMIOs has taken effect. */
- if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- I915_READ_FW(last_reg);
-
- handle_tlb_pending_event(vgpu, ring_id);
-}
-
-/* Switch ring mmio values (context) from vgpu to host. */
-static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct render_mmio *mmio;
- i915_reg_t last_reg = _MMIO(0);
- u32 v;
- int i, array_size;
-
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- mmio = gen9_render_mmio_list;
- array_size = ARRAY_SIZE(gen9_render_mmio_list);
- restore_mocs(vgpu, ring_id);
- } else {
- mmio = gen8_render_mmio_list;
- array_size = ARRAY_SIZE(gen8_render_mmio_list);
- }
-
- for (i = 0; i < array_size; i++, mmio++) {
- if (mmio->ring_id != ring_id)
- continue;
-
- vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
-
- if (mmio->mask) {
- vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
- v = mmio->value | (mmio->mask << 16);
- } else
- v = mmio->value;
-
- if (mmio->in_context)
- continue;
-
- I915_WRITE_FW(mmio->reg, v);
- last_reg = mmio->reg;
-
- trace_render_mmio(vgpu->id, "restore",
- i915_mmio_reg_offset(mmio->reg),
- mmio->value, v);
- }
-
- /* Make sure the swiched MMIOs has taken effect. */
- if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
- I915_READ_FW(last_reg);
-}
-
-/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
- * @pre: the last vGPU that own the engine
- * @next: the vGPU to switch to
- * @ring_id: specify the engine
- *
- * If pre is null indicates that host own the engine. If next is null
- * indicates that we are switching to host workload.
- */
-void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id)
-{
- struct drm_i915_private *dev_priv;
-
- if (WARN_ON(!pre && !next))
- return;
-
- gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
- pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
-
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
- /**
- * We are using raw mmio access wrapper to improve the
- * performace for batch mmio read/write, so we need
- * handle forcewake mannually.
- */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
- /**
- * TODO: Optimize for vGPU to vGPU switch by merging
- * switch_mmio_to_host() and switch_mmio_to_vgpu().
- */
- if (pre)
- switch_mmio_to_host(pre, ring_id);
-
- if (next)
- switch_mmio_to_vgpu(next, ring_id);
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 03532dfc0cd5..eea1a2f92099 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -372,6 +372,11 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
}
+void intel_gvt_kick_schedule(struct intel_gvt *gvt)
+{
+ intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
+}
+
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index ba00a5f7455f..7b59e3e88b8b 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -57,4 +57,6 @@ void intel_vgpu_start_schedule(struct intel_vgpu *vgpu);
void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu);
+void intel_gvt_kick_schedule(struct intel_gvt *gvt);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d6177a0baeec..0056638b0c16 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -189,10 +189,12 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
save_ring_hw_state(workload->vgpu, ring_id);
atomic_set(&workload->shadow_ctx_active, 0);
break;
+ case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
+ save_ring_hw_state(workload->vgpu, ring_id);
+ break;
default:
WARN_ON(1);
return NOTIFY_OK;
@@ -246,7 +248,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
return 0;
}
-void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (!wa_ctx->indirect_ctx.obj)
return;
@@ -1037,6 +1039,9 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
+ if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
+ s->shadow_ctx->priority = INT_MAX;
+
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
@@ -1329,3 +1334,15 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
return workload;
}
+
+/**
+ * intel_vgpu_queue_workload - Qeue a vGPU workload
+ * @workload: the workload to queue in
+ */
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
+{
+ list_add_tail(&workload->list,
+ workload_q_head(workload->vgpu, workload->ring_id));
+ intel_gvt_kick_schedule(workload->vgpu->gvt);
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index e4a9f9acd4a9..3de77dfa7c59 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -125,12 +125,7 @@ struct intel_vgpu_shadow_bb {
#define workload_q_head(vgpu, ring_id) \
(&(vgpu->submission.workload_q_head[ring_id]))
-#define queue_workload(workload) do { \
- list_add_tail(&workload->list, \
- workload_q_head(workload->vgpu, workload->ring_id)); \
- wake_up(&workload->vgpu->gvt-> \
- scheduler.waitq[workload->ring_id]); \
-} while (0)
+void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 8c150381d9a4..7a2511538f34 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -330,13 +330,14 @@ TRACE_EVENT(inject_msi,
);
TRACE_EVENT(render_mmio,
- TP_PROTO(int id, char *action, unsigned int reg,
+ TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
unsigned int old_val, unsigned int new_val),
- TP_ARGS(id, action, reg, new_val, old_val),
+ TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
TP_STRUCT__entry(
- __field(int, id)
+ __field(int, old_id)
+ __field(int, new_id)
__array(char, buf, GVT_TEMP_STR_LEN)
__field(unsigned int, reg)
__field(unsigned int, old_val)
@@ -344,15 +345,17 @@ TRACE_EVENT(render_mmio,
),
TP_fast_assign(
- __entry->id = id;
+ __entry->old_id = old_id;
+ __entry->new_id = new_id;
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
__entry->reg = reg;
__entry->old_val = old_val;
__entry->new_val = new_val;
),
- TP_printk("VM%u %s reg %x, old %08x new %08x\n",
- __entry->id, __entry->buf, __entry->reg,
+ TP_printk("VM%u -> VM%u %s reg %x, old %08x new %08x\n",
+ __entry->old_id, __entry->new_id,
+ __entry->buf, __entry->reg,
__entry->old_val, __entry->new_val)
);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index c6b82d1ba7de..4688619f6a1c 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -38,25 +38,25 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
- vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
- vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
- vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
- vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
- vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
+ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
+ vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
+ vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
+ vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
- vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
vgpu_hidden_gmadr_base(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
vgpu_hidden_sz(vgpu);
- vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
+ vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
@@ -236,6 +236,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
}
intel_vgpu_stop_schedule(vgpu);
+ intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&gvt->lock);
}
@@ -265,6 +266,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
intel_vgpu_clean_mmio(vgpu);
+ intel_vgpu_dmabuf_cleanup(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
@@ -349,7 +351,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->handle = param->handle;
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
-
+ INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
+ idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary);
ret = intel_vgpu_init_mmio(vgpu);
@@ -370,10 +373,14 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- ret = intel_vgpu_init_display(vgpu, param->resolution);
+ ret = intel_vgpu_init_opregion(vgpu);
if (ret)
goto out_clean_gtt;
+ ret = intel_vgpu_init_display(vgpu, param->resolution);
+ if (ret)
+ goto out_clean_opregion;
+
ret = intel_vgpu_setup_submission(vgpu);
if (ret)
goto out_clean_display;
@@ -386,6 +393,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
+ ret = intel_gvt_hypervisor_set_opregion(vgpu);
+ if (ret)
+ goto out_clean_sched_policy;
+
mutex_unlock(&gvt->lock);
return vgpu;
@@ -396,6 +407,8 @@ out_clean_submission:
intel_vgpu_clean_submission(vgpu);
out_clean_display:
intel_vgpu_clean_display(vgpu);
+out_clean_opregion:
+ intel_vgpu_clean_opregion(vgpu);
out_clean_gtt:
intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu:
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 28294470ae31..e968aeae1d84 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -37,40 +37,21 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
return to_i915(node->minor->dev);
}
-static __always_inline void seq_print_param(struct seq_file *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
-}
-
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ struct drm_printer p = drm_seq_file_printer(m);
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
-#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
+ intel_device_info_dump_flags(info, &p);
+ intel_device_info_dump_runtime(info, &p);
kernel_param_lock(THIS_MODULE);
-#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
- I915_PARAMS_FOR_EACH(PRINT_PARAM);
-#undef PRINT_PARAM
+ i915_params_dump(&i915_modparams, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
@@ -111,8 +92,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
u64 size = 0;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
+ for_each_ggtt_vma(vma, obj) {
+ if (drm_mm_node_allocated(&vma->node))
size += vma->node.size;
}
@@ -522,8 +503,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
dpy_count, dpy_size);
- seq_printf(m, "%llu [%llu] gtt total\n",
- ggtt->base.total, ggtt->mappable_end);
+ seq_printf(m, "%llu [%pa] gtt total\n",
+ ggtt->base.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf)));
@@ -664,38 +645,6 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
return 0;
}
-static void i915_ring_seqno_info(struct seq_file *m,
- struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct rb_node *rb;
-
- seq_printf(m, "Current sequence (%s): %x\n",
- engine->name, intel_engine_get_seqno(engine));
-
- spin_lock_irq(&b->rb_lock);
- for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = rb_entry(rb, typeof(*w), node);
-
- seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
- engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
- }
- spin_unlock_irq(&b->rb_lock);
-}
-
-static int i915_gem_seqno_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- i915_ring_seqno_info(m, engine);
-
- return 0;
-}
-
-
static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -896,13 +845,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Graphics Interrupt mask: %08x\n",
I915_READ(GTIMR));
}
- for_each_engine(engine, dev_priv, id) {
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 6) {
+ for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
engine->name, I915_READ_IMR(engine));
}
- i915_ring_seqno_info(m, engine);
}
intel_runtime_pm_put(dev_priv);
@@ -1634,20 +1582,23 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
- if (!HAS_FBC(dev_priv)) {
- seq_puts(m, "FBC unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
- mutex_lock(&dev_priv->fbc.lock);
+ mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
seq_puts(m, "FBC enabled\n");
else
- seq_printf(m, "FBC disabled: %s\n",
- dev_priv->fbc.no_fbc_reason);
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (fbc->work.scheduled)
+ seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n",
+ fbc->work.scheduled_vblank,
+ drm_crtc_vblank_count(&fbc->crtc->base));
if (intel_fbc_is_active(dev_priv)) {
u32 mask;
@@ -1667,7 +1618,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
seq_printf(m, "Compressing: %s\n", yesno(mask));
}
- mutex_unlock(&dev_priv->fbc.lock);
+ mutex_unlock(&fbc->lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -1714,10 +1665,8 @@ static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- if (!HAS_IPS(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -1803,10 +1752,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
int gpu_freq, ia_freq;
unsigned int max_gpu_freq, min_gpu_freq;
- if (!HAS_LLC(dev_priv)) {
- seq_puts(m, "unsupported on this chipset\n");
- return 0;
- }
+ if (!HAS_LLC(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -2286,8 +2233,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p;
- if (!HAS_HUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_HUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
@@ -2305,8 +2252,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
struct drm_printer p;
u32 tmp, i;
- if (!HAS_GUC_UCODE(dev_priv))
- return 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
@@ -2379,29 +2326,16 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tTotal: %llu\n", tot);
}
-static bool check_guc_submission(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
-
- if (!guc->execbuf_client) {
- seq_printf(m, "GuC submission %s\n",
- HAS_GUC_SCHED(dev_priv) ?
- "disabled" :
- "not supported");
- return false;
- }
-
- return true;
-}
-
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
const struct intel_guc *guc = &dev_priv->guc;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
+
+ GEM_BUG_ON(!guc->execbuf_client);
+ GEM_BUG_ON(!guc->preempt_client);
seq_printf(m, "Doorbell map:\n");
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
@@ -2428,8 +2362,8 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
unsigned int tmp;
int index;
- if (!check_guc_submission(m))
- return 0;
+ if (!USES_GUC_SUBMISSION(dev_priv))
+ return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
struct intel_engine_cs *engine;
@@ -2482,6 +2416,9 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
u32 *log;
int i = 0;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (dump_load_err)
obj = dev_priv->guc.load_err_log;
else if (dev_priv->guc.log.vma)
@@ -2513,6 +2450,9 @@ static int i915_guc_log_control_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2526,6 +2466,9 @@ static int i915_guc_log_control_set(void *data, u64 val)
struct drm_i915_private *dev_priv = data;
int ret;
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
+
if (!dev_priv->guc.log.vma)
return -EINVAL;
@@ -2576,10 +2519,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
enum pipe pipe;
bool enabled = false;
- if (!HAS_PSR(dev_priv)) {
- seq_puts(m, "PSR not supported\n");
- return 0;
- }
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
intel_runtime_pm_get(dev_priv);
@@ -2818,10 +2759,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_csr *csr;
- if (!HAS_CSR(dev_priv)) {
- seq_puts(m, "not supported\n");
- return 0;
- }
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
csr = &dev_priv->csr;
@@ -3213,7 +3152,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
p = drm_seq_file_printer(m);
for_each_engine(engine, dev_priv, id)
- intel_engine_dump(engine, &p);
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
intel_runtime_pm_put(dev_priv);
@@ -3357,7 +3296,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
int plane;
if (INTEL_GEN(dev_priv) < 9)
- return 0;
+ return -ENODEV;
drm_modeset_lock_all(dev);
@@ -4672,7 +4611,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_stolen", i915_gem_stolen_list_info },
- {"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7faf20aff25a..6c8da9d20c33 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -617,10 +617,12 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uc_fini_hw(dev_priv);
+ intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_wq(dev_priv);
i915_gem_cleanup_userptr(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -726,7 +728,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = ggtt->mappable_base;
+ ap->ranges[0].base = ggtt->gmadr.start;
ap->ranges[0].size = ggtt->mappable_end;
primary =
@@ -929,8 +931,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_display_crc_init(dev_priv);
- intel_device_info_dump(dev_priv);
-
intel_detect_preproduction_hw(dev_priv);
return 0;
@@ -1082,7 +1082,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
- intel_device_info_runtime_init(dev_priv);
+ intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
intel_sanitize_options(dev_priv);
@@ -1292,6 +1292,21 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
i915_gem_shrinker_unregister(dev_priv);
}
+static void i915_welcome_messages(struct drm_i915_private *dev_priv)
+{
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer("i915 device info:");
+
+ intel_device_info_dump(&dev_priv->info, &p);
+ intel_device_info_dump_runtime(&dev_priv->info, &p);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ DRM_INFO("DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @pdev: PCI device
@@ -1377,13 +1392,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_init_ipc(dev_priv);
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- DRM_INFO("DRM_I915_DEBUG enabled\n");
- if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
-
intel_runtime_pm_put(dev_priv);
+ i915_welcome_messages(dev_priv);
+
return 0;
out_cleanup_hw:
@@ -1897,13 +1909,16 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
disable_irq(i915->drm.irq);
ret = i915_gem_reset_prepare(i915);
if (ret) {
- DRM_ERROR("GPU recovery failed\n");
+ dev_err(i915->drm.dev, "GPU recovery failed\n");
intel_gpu_reset(i915, ALL_ENGINES);
- goto error;
+ goto taint;
}
if (!intel_has_gpu_reset(i915)) {
- DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ if (i915_modparams.reset)
+ dev_err(i915->drm.dev, "GPU reset not supported\n");
+ else
+ DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
@@ -1916,12 +1931,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
}
if (ret) {
dev_err(i915->drm.dev, "Failed to reset chip\n");
- goto error;
+ goto taint;
}
- i915_gem_reset(i915);
- intel_overlay_reset(i915);
-
/* Ok, now get things going again... */
/*
@@ -1934,6 +1946,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
goto error;
}
+ i915_gem_reset(i915);
+ intel_overlay_reset(i915);
+
/*
* Next we need to restore the context, but we don't use those
* yet either...
@@ -1959,6 +1974,20 @@ wakeup:
wake_up_bit(&error->flags, I915_RESET_HANDOFF);
return;
+taint:
+ /*
+ * History tells us that if we cannot reset the GPU now, we
+ * never will. This then impacts everything that is run
+ * subsequently. On failing the reset, we mark the driver
+ * as wedged, preventing further execution on the GPU.
+ * We also want to go one step further and add a taint to the
+ * kernel so that any subsequent faults can be traced back to
+ * this failure. This is important for CI, where if the
+ * GPU/driver fails we would like to reboot and restart testing
+ * rather than continue on into oblivion. For everyone else,
+ * the system should still plod along, but they have been warned!
+ */
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
i915_gem_set_wedged(i915);
i915_gem_retire_requests(i915);
@@ -1992,19 +2021,19 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ active_request = i915_gem_reset_prepare_engine(engine);
+ if (IS_ERR_OR_NULL(active_request)) {
+ /* Either the previous reset failed, or we pardon the reset. */
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
if (!(flags & I915_RESET_QUIET)) {
dev_notice(engine->i915->drm.dev,
"Resetting %s after gpu hang\n", engine->name);
}
error->reset_engine_count[engine->id]++;
- active_request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR(active_request)) {
- DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
- ret = PTR_ERR(active_request);
- goto out;
- }
-
if (!engine->i915->guc.execbuf_client)
ret = intel_gt_reset_engine(engine->i915, engine);
else
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 594fd14e66c5..caebd5825279 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,12 +56,15 @@
#include "i915_reg.h"
#include "i915_utils.h"
-#include "intel_uncore.h"
#include "intel_bios.h"
+#include "intel_device_info.h"
+#include "intel_display.h"
#include "intel_dpll_mgr.h"
-#include "intel_uc.h"
#include "intel_lrc.h"
+#include "intel_opregion.h"
#include "intel_ringbuffer.h"
+#include "intel_uncore.h"
+#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
@@ -80,8 +83,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20171201"
-#define DRIVER_TIMESTAMP 1512176839
+#define DRIVER_DATE "20171222"
+#define DRIVER_TIMESTAMP 1513971710
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -243,173 +246,6 @@ static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
return clamp_u64_to_fixed16(interm_sum);
}
-static inline const char *yesno(bool v)
-{
- return v ? "yes" : "no";
-}
-
-static inline const char *onoff(bool v)
-{
- return v ? "on" : "off";
-}
-
-static inline const char *enableddisabled(bool v)
-{
- return v ? "enabled" : "disabled";
-}
-
-enum pipe {
- INVALID_PIPE = -1,
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- _PIPE_EDP,
- I915_MAX_PIPES = _PIPE_EDP
-};
-#define pipe_name(p) ((p) + 'A')
-
-enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
- TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
- I915_MAX_TRANSCODERS
-};
-
-static inline const char *transcoder_name(enum transcoder transcoder)
-{
- switch (transcoder) {
- case TRANSCODER_A:
- return "A";
- case TRANSCODER_B:
- return "B";
- case TRANSCODER_C:
- return "C";
- case TRANSCODER_EDP:
- return "EDP";
- case TRANSCODER_DSI_A:
- return "DSI A";
- case TRANSCODER_DSI_C:
- return "DSI C";
- default:
- return "<invalid>";
- }
-}
-
-static inline bool transcoder_is_dsi(enum transcoder transcoder)
-{
- return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
-}
-
-/*
- * Global legacy plane identifier. Valid only for primary/sprite
- * planes on pre-g4x, and only for primary planes on g4x-bdw.
- */
-enum i9xx_plane_id {
- PLANE_A,
- PLANE_B,
- PLANE_C,
-};
-#define plane_name(p) ((p) + 'A')
-
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_SPRITE2,
- PLANE_CURSOR,
- I915_MAX_PLANES,
-};
-
-#define for_each_plane_id_on_crtc(__crtc, __p) \
- for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
- for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
-
-enum port {
- PORT_NONE = -1,
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- I915_MAX_PORTS
-};
-#define port_name(p) ((p) + 'A')
-
-#define I915_NUM_PHYS_VLV 2
-
-enum dpio_channel {
- DPIO_CH0,
- DPIO_CH1
-};
-
-enum dpio_phy {
- DPIO_PHY0,
- DPIO_PHY1,
- DPIO_PHY2,
-};
-
-enum intel_display_power_domain {
- POWER_DOMAIN_PIPE_A,
- POWER_DOMAIN_PIPE_B,
- POWER_DOMAIN_PIPE_C,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_TRANSCODER_A,
- POWER_DOMAIN_TRANSCODER_B,
- POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_DSI_A,
- POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DDI_A_IO,
- POWER_DOMAIN_PORT_DDI_B_IO,
- POWER_DOMAIN_PORT_DDI_C_IO,
- POWER_DOMAIN_PORT_DDI_D_IO,
- POWER_DOMAIN_PORT_DDI_E_IO,
- POWER_DOMAIN_PORT_DSI,
- POWER_DOMAIN_PORT_CRT,
- POWER_DOMAIN_PORT_OTHER,
- POWER_DOMAIN_VGA,
- POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_PLLS,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_AUX_C,
- POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_GMBUS,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_INIT,
-
- POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
- ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -471,121 +307,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
- for_each_if ((__mask) & (1 << (__p)))
-#define for_each_universal_plane(__dev_priv, __pipe, __p) \
- for ((__p) = 0; \
- (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
- (__p)++)
-#define for_each_sprite(__dev_priv, __p, __s) \
- for ((__s) = 0; \
- (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
- (__s)++)
-
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if ((__ports_mask) & (1 << (__port)))
-
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
-#define for_each_intel_plane(dev, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head)
-
-#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((plane_mask) & \
- (1 << drm_plane_index(&intel_plane->base)))
-
-#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
-
-#define for_each_intel_crtc(dev, intel_crtc) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head)
-
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head) \
- for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
-
-#define for_each_intel_encoder(dev, intel_encoder) \
- list_for_each_entry(intel_encoder, \
- &(dev)->mode_config.encoder_list, \
- base.head)
-
-#define for_each_intel_connector_iter(intel_connector, iter) \
- while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
-
-#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
- list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
- for_each_if ((intel_encoder)->base.crtc == (__crtc))
-
-#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
- list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
- for_each_if ((intel_connector)->base.encoder == (__encoder))
-
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if (BIT_ULL(domain) & (mask))
-
-#define for_each_power_well(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
- (__power_well) - (__dev_priv)->power_domains.power_wells < \
- (__dev_priv)->power_domains.power_well_count; \
- (__power_well)++)
-
-#define for_each_power_well_rev(__dev_priv, __power_well) \
- for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
- (__dev_priv)->power_domains.power_well_count - 1; \
- (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
- (__power_well)--)
-
-#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_rev(__dev_priv, __power_well) \
- for_each_if ((__power_well)->domains & (__domain_mask))
-
-#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
- (__i)++) \
- for_each_if (plane)
-
-#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_crtc && \
- ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
- (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
- (__i)++) \
- for_each_if (crtc)
-
-#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_total_plane && \
- ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
- (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
- (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
- (__i)++) \
- for_each_if (plane)
-
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@@ -622,20 +343,6 @@ struct drm_i915_file_private {
atomic_t context_bans;
};
-/* Used by dp and fdi links */
-struct intel_link_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-void intel_link_compute_m_n(int bpp, int nlanes,
- int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool reduce_m_n);
-
/* Interface history:
*
* 1.1: Original.
@@ -650,27 +357,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-struct opregion_header;
-struct opregion_acpi;
-struct opregion_swsci;
-struct opregion_asle;
-
-struct intel_opregion {
- struct opregion_header *header;
- struct opregion_acpi *acpi;
- struct opregion_swsci *swsci;
- u32 swsci_gbda_sub_functions;
- u32 swsci_sbcb_sub_functions;
- struct opregion_asle *asle;
- void *rvda;
- void *vbt_firmware;
- const void *vbt;
- u32 vbt_size;
- u32 *lid_state;
- struct work_struct asle_work;
-};
-#define OPREGION_SIZE (8*1024)
-
struct intel_overlay;
struct intel_overlay_error_state;
@@ -763,137 +449,6 @@ struct intel_csr {
uint32_t allowed_dc_mask;
};
-#define DEV_INFO_FOR_EACH_FLAG(func) \
- func(is_mobile); \
- func(is_lp); \
- func(is_alpha_support); \
- /* Keep has_* in alphabetical order */ \
- func(has_64bit_reloc); \
- func(has_aliasing_ppgtt); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
- func(has_reset_engine); \
- func(has_fbc); \
- func(has_fpga_dbg); \
- func(has_full_ppgtt); \
- func(has_full_48bit_ppgtt); \
- func(has_gmch_display); \
- func(has_guc); \
- func(has_guc_ct); \
- func(has_hotplug); \
- func(has_l3_dpf); \
- func(has_llc); \
- func(has_logical_ring_contexts); \
- func(has_logical_ring_preemption); \
- func(has_overlay); \
- func(has_pooled_eu); \
- func(has_psr); \
- func(has_rc6); \
- func(has_rc6p); \
- func(has_resource_streamer); \
- func(has_runtime_pm); \
- func(has_snoop); \
- func(unfenced_needs_alignment); \
- func(cursor_needs_physical); \
- func(hws_needs_physical); \
- func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
-
-struct sseu_dev_info {
- u8 slice_mask;
- u8 subslice_mask;
- u8 eu_total;
- u8 eu_per_subslice;
- u8 min_eu_in_pool;
- /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
- u8 subslice_7eu[3];
- u8 has_slice_pg:1;
- u8 has_subslice_pg:1;
- u8 has_eu_pg:1;
-};
-
-static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
-{
- return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
-}
-
-/* Keep in gen based order, and chronological order within a gen */
-enum intel_platform {
- INTEL_PLATFORM_UNINITIALIZED = 0,
- INTEL_I830,
- INTEL_I845G,
- INTEL_I85X,
- INTEL_I865G,
- INTEL_I915G,
- INTEL_I915GM,
- INTEL_I945G,
- INTEL_I945GM,
- INTEL_G33,
- INTEL_PINEVIEW,
- INTEL_I965G,
- INTEL_I965GM,
- INTEL_G45,
- INTEL_GM45,
- INTEL_IRONLAKE,
- INTEL_SANDYBRIDGE,
- INTEL_IVYBRIDGE,
- INTEL_VALLEYVIEW,
- INTEL_HASWELL,
- INTEL_BROADWELL,
- INTEL_CHERRYVIEW,
- INTEL_SKYLAKE,
- INTEL_BROXTON,
- INTEL_KABYLAKE,
- INTEL_GEMINILAKE,
- INTEL_COFFEELAKE,
- INTEL_CANNONLAKE,
- INTEL_MAX_PLATFORMS
-};
-
-struct intel_device_info {
- u16 device_id;
- u16 gen_mask;
-
- u8 gen;
- u8 gt; /* GT number, 0 if undefined */
- u8 num_rings;
- u8 ring_mask; /* Rings supported by the HW */
-
- enum intel_platform platform;
- u32 platform_mask;
-
- u32 display_mmio_offset;
-
- u8 num_pipes;
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
-
- unsigned int page_sizes; /* page sizes supported by the HW */
-
-#define DEFINE_FLAG(name) u8 name:1
- DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
-#undef DEFINE_FLAG
- u16 ddb_size; /* in blocks */
-
- /* Register offsets for the various display pipes and transcoders */
- int pipe_offsets[I915_MAX_TRANSCODERS];
- int trans_offsets[I915_MAX_TRANSCODERS];
- int palette_offsets[I915_MAX_PIPES];
- int cursor_offsets[I915_MAX_PIPES];
-
- /* Slice/subslice/EU info */
- struct sseu_dev_info sseu;
-
- u32 cs_timestamp_frequency_khz;
-
- struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
- } color;
-};
-
struct intel_display_error_state;
struct i915_gpu_state {
@@ -947,6 +502,7 @@ struct i915_gpu_state {
struct drm_i915_error_engine {
int engine_id;
/* Software tracked state */
+ bool idle;
bool waiting;
int num_waiters;
unsigned long hangcheck_timestamp;
@@ -1537,9 +1093,6 @@ struct i915_gem_mm {
*/
struct pagevec wc_stash;
- /** Usable portion of the GTT for GEM */
- dma_addr_t stolen_base; /* limited to low memory (32-bit) */
-
/**
* tmpfs instance used for shmem backed objects
*/
@@ -1588,6 +1141,8 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
+#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
+
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
@@ -2253,6 +1808,30 @@ struct drm_i915_private {
const struct intel_device_info info;
+ /**
+ * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
+ * end of stolen which we can optionally use to create GEM objects
+ * backed by stolen memory. Note that stolen_usable_size tells us
+ * exactly how much of this we are actually allowed to use, given that
+ * some portion of it is in fact reserved for use by hardware functions.
+ */
+ struct resource dsm;
+ /**
+ * Reseved portion of Data Stolen Memory
+ */
+ struct resource dsm_reserved;
+
+ /*
+ * Stolen memory is segmented in hardware with different portions
+ * offlimits to certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of stolen memory is reserved for
+ * hardware functions and similarly removed from the accessible range.
+ */
+ resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
+
void __iomem *regs;
struct intel_uncore uncore;
@@ -2381,6 +1960,9 @@ struct drm_i915_private {
*/
struct workqueue_struct *wq;
+ /* ordered wq for modesets */
+ struct workqueue_struct *modeset_wq;
+
/* Display functions */
struct drm_i915_display_funcs display;
@@ -3234,8 +2816,16 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+
+/* For now, anything with a GuC has also HuC */
+#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
+/* Having a GuC is not the same as using a GuC */
+#define USES_GUC(dev_priv) intel_uc_is_using_guc()
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
+#define USES_HUC(dev_priv) intel_uc_is_using_huc()
+
#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
@@ -3879,6 +3469,8 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm);
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
+
/* belongs in i915_gem_gtt.h */
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
{
@@ -3900,12 +3492,13 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size);
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size);
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- u32 stolen_offset,
- u32 gtt_offset,
- u32 size);
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size);
/* i915_gem_internal.c */
struct drm_i915_gem_object *
@@ -4076,41 +3669,6 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
-
-/* intel_opregion.c */
-#ifdef CONFIG_ACPI
-extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
-extern void intel_opregion_register(struct drm_i915_private *dev_priv);
-extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
-extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
-extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
- bool enable);
-extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
- pci_power_t state);
-extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-#else
-static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
-static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
-static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
-{
-}
-static inline int
-intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
-{
- return 0;
-}
-static inline int
-intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
-{
- return 0;
-}
-static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
-{
- return -ENODEV;
-}
-#endif
-
/* intel_acpi.c */
#ifdef CONFIG_ACPI
extern void intel_register_dsm_handler(void);
@@ -4127,10 +3685,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)&dev_priv->info;
}
-const char *intel_platform_name(enum intel_platform platform);
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
-void intel_device_info_dump(struct drm_i915_private *dev_priv);
-
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e083f242b8dc..ba9f67c256f4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
* must wait for all rendering to complete to the object (as unbinding
* must anyway), and retire the requests.
*/
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret)
return ret;
- i915_gem_retire_requests(to_i915(obj->base.dev));
-
while ((vma = list_first_entry_or_null(&obj->vma_list,
struct i915_vma,
obj_link))) {
@@ -673,17 +666,13 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}
-static void
-flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-
- if (!(obj->base.write_domain & flush_domains))
- return;
-
- /* No actual flushing is required for the GTT write domain. Writes
- * to it "immediately" go to main memory as far as we know, so there's
- * no chipset flush. It also doesn't land in render cache.
+ /*
+ * No actual flushing is required for the GTT write domain for reads
+ * from the GTT domain. Writes to it "immediately" go to main memory
+ * as far as we know, so there's no chipset flush. It also doesn't
+ * land in the GPU render cache.
*
* However, we do have to enforce the order so that all writes through
* the GTT land before any writes to the device, such as updates to
@@ -694,22 +683,43 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
* timing. This issue has only been observed when switching quickly
* between GTT writes and CPU reads from inside the kernel on recent hw,
* and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour).
+ * system agents we cannot reproduce this behaviour, until Cannonlake
+ * that was!).
*/
+
wmb();
+ intel_runtime_pm_get(dev_priv);
+ spin_lock_irq(&dev_priv->uncore.lock);
+
+ POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
+
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void
+flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+ if (!(obj->base.write_domain & flush_domains))
+ return;
+
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
- if (!HAS_LLC(dev_priv)) {
- intel_runtime_pm_get(dev_priv);
- spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
- spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
- }
+ i915_gem_flush_ggtt_writes(dev_priv);
intel_fb_obj_flush(obj,
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+
+ for_each_ggtt_vma(vma, obj) {
+ if (vma->iomap)
+ continue;
+
+ i915_vma_unset_ggtt_write(vma);
+ }
break;
case I915_GEM_DOMAIN_CPU:
@@ -1106,7 +1116,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_base += offset & PAGE_MASK;
}
- if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
+ if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
@@ -1314,7 +1324,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* If the object is non-shmem backed, we retry again with the
* path that handles page fault.
*/
- if (ggtt_write(&ggtt->mappable, page_base, page_offset,
+ if (ggtt_write(&ggtt->iomap, page_base, page_offset,
user_data, page_length)) {
ret = -EFAULT;
break;
@@ -1556,10 +1566,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
if (i915_vma_is_active(vma))
continue;
@@ -1960,9 +1967,9 @@ int i915_gem_fault(struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
+ (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->mappable);
+ &ggtt->iomap);
if (ret)
goto err_fence;
@@ -1972,6 +1979,8 @@ int i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
GEM_BUG_ON(!obj->userfault_count);
+ i915_vma_set_ggtt_write(vma);
+
err_fence:
i915_vma_unpin_fence(vma);
err_unpin:
@@ -2036,12 +2045,8 @@ static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj)
i915_vma_unset_userfault(vma);
- }
}
/**
@@ -2591,7 +2596,7 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
err = obj->ops->get_pages(obj);
- GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
+ GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
return err;
}
@@ -3084,7 +3089,12 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- engine->irq_posted = 0;
+ /*
+ * Make sure this write is visible before we re-enable the interrupt
+ * handlers on another CPU, as tasklet_enable() resolves to just
+ * a compiler barrier which is insufficient for our purpose here.
+ */
+ smp_store_mb(engine->irq_posted, 0);
if (request)
request = i915_gem_reset_request(engine, request);
@@ -3114,6 +3124,25 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
ctx = fetch_and_zero(&engine->last_retired_context);
if (ctx)
engine->context_unpin(engine, ctx);
+
+ /*
+ * Ostensibily, we always want a context loaded for powersaving,
+ * so if the engine is idle after the reset, send a request
+ * to load our scratch kernel_context.
+ *
+ * More mysteriously, if we leave the engine idle after a reset,
+ * the next userspace batch may hang, with what appears to be
+ * an incoherent read by the CS (presumably stale TLB). An
+ * empty request appears sufficient to paper over the glitch.
+ */
+ if (list_empty(&engine->timeline->requests)) {
+ struct drm_i915_gem_request *rq;
+
+ rq = i915_gem_request_alloc(engine,
+ dev_priv->kernel_context);
+ if (!IS_ERR(rq))
+ __i915_add_request(rq, false);
+ }
}
i915_gem_restore_fences(dev_priv);
@@ -3328,7 +3357,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Wait for last execlists context complete, but bail out in case a
* new request is submitted.
*/
- end = ktime_add_ms(ktime_get(), 200);
+ end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
do {
if (new_requests_since_last_retire(dev_priv))
return;
@@ -3381,6 +3410,9 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (INTEL_GEN(dev_priv) >= 6)
gen6_rps_idle(dev_priv);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
+
intel_runtime_pm_put(dev_priv);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3525,8 +3557,19 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
static int wait_for_engines(struct drm_i915_private *i915)
{
- if (wait_for(intel_engines_are_idle(i915), 50)) {
- DRM_ERROR("Failed to idle engines, declaring wedged!\n");
+ if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer(__func__);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ intel_engine_dump(engine, &p,
+ "%s", engine->name);
+ }
+
i915_gem_set_wedged(i915);
return -EIO;
}
@@ -3552,9 +3595,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
if (ret)
return ret;
}
-
i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
ret = wait_for_engines(i915);
} else {
@@ -3753,7 +3794,8 @@ restart:
return -EBUSY;
}
- if (i915_gem_valid_gtt_space(vma, cache_level))
+ if (!i915_vma_is_closed(vma) &&
+ i915_gem_valid_gtt_space(vma, cache_level))
continue;
ret = i915_vma_unbind(vma);
@@ -3806,7 +3848,7 @@ restart:
* dropped the fence as all snoopable access is
* supposed to be linear.
*/
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ for_each_ggtt_vma(vma, obj) {
ret = i915_vma_put_fence(vma);
if (ret)
return ret;
@@ -4847,7 +4889,8 @@ void i915_gem_resume(struct drm_i915_private *i915)
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
- /* As we didn't flush the kernel context before suspend, we cannot
+ /*
+ * As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
@@ -4868,8 +4911,10 @@ out_unlock:
return;
err_wedged:
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
- i915_gem_set_wedged(i915);
+ if (!i915_terminally_wedged(&i915->gpu_error)) {
+ DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ i915_gem_set_wedged(i915);
+ }
goto out_unlock;
}
@@ -5142,6 +5187,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ ret = intel_uc_init_wq(dev_priv);
+ if (ret)
+ return ret;
+
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
* used by the CS may be stale, despite us poking the TLB reset. If
@@ -5152,22 +5201,32 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
- if (ret)
- goto out_unlock;
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_unlock;
+ }
ret = i915_gem_contexts_init(dev_priv);
- if (ret)
- goto out_unlock;
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_ggtt;
+ }
ret = intel_engines_init(dev_priv);
- if (ret)
- goto out_unlock;
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_context;
+ }
intel_init_gt_powersave(dev_priv);
+ ret = intel_uc_init(dev_priv);
+ if (ret)
+ goto err_pm;
+
ret = i915_gem_init_hw(dev_priv);
if (ret)
- goto out_unlock;
+ goto err_uc_init;
/*
* Despite its name intel_init_clock_gating applies both display
@@ -5181,9 +5240,55 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_init_clock_gating(dev_priv);
ret = __intel_engines_record_defaults(dev_priv);
-out_unlock:
+ if (ret)
+ goto err_init_hw;
+
+ if (i915_inject_load_failure()) {
+ ret = -ENODEV;
+ goto err_init_hw;
+ }
+
+ if (i915_inject_load_failure()) {
+ ret = -EIO;
+ goto err_init_hw;
+ }
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return 0;
+
+ /*
+ * Unwinding is complicated by that we want to handle -EIO to mean
+ * disable GPU submission but keep KMS alive. We want to mark the
+ * HW as irrevisibly wedged, but keep enough state around that the
+ * driver doesn't explode during runtime.
+ */
+err_init_hw:
+ i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
+ i915_gem_contexts_lost(dev_priv);
+ intel_uc_fini_hw(dev_priv);
+err_uc_init:
+ intel_uc_fini(dev_priv);
+err_pm:
+ if (ret != -EIO) {
+ intel_cleanup_gt_powersave(dev_priv);
+ i915_gem_cleanup_engines(dev_priv);
+ }
+err_context:
+ if (ret != -EIO)
+ i915_gem_contexts_fini(dev_priv);
+err_ggtt:
+err_unlock:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ if (ret != -EIO)
+ i915_gem_cleanup_userptr(dev_priv);
+
if (ret == -EIO) {
- /* Allow engine initialisation to fail by marking the GPU as
+ /*
+ * Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
@@ -5193,9 +5298,8 @@ out_unlock:
}
ret = 0;
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_drain_freed_objects(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index f663cd919795..b9b53ac14176 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -167,7 +167,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL,
true, I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ I915_FENCE_GFP);
reservation_object_lock(obj->resv, NULL);
reservation_object_add_excl_fence(obj->resv, &clflush->dma);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index ce3139e5ec4c..648e7536ff51 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -316,7 +316,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* present or not in use we still need a small bias as ring wraparound
* at offset 0 sometimes hangs. No idea why.
*/
- if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
+ if (USES_GUC(dev_priv))
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
@@ -409,7 +409,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
- if (!i915_modparams.enable_guc_submission)
+ if (!USES_GUC_SUBMISSION(to_i915(dev)))
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
@@ -617,7 +617,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
if (prev)
i915_sw_fence_await_sw_fence_gfp(&req->submit,
&prev->submit,
- GFP_KERNEL);
+ I915_FENCE_GFP);
}
/*
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 70ccd63cbf8e..4401068ff468 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1012,7 +1012,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset += page << PAGE_SHIFT;
}
- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->mappable,
+ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
offset);
cache->page = page;
cache->vaddr = (unsigned long)vaddr;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f3c35e826321..c5f393870532 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2912,7 +2912,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->mappable);
+ io_mapping_fini(&ggtt->iomap);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -2949,50 +2949,6 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
return 0;
}
-static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
-{
- snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
- snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
- return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
-}
-
-static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
-{
- bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
- return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
-}
-
-static size_t chv_get_stolen_size(u16 gmch_ctrl)
-{
- gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
- gmch_ctrl &= SNB_GMCH_GMS_MASK;
-
- /*
- * 0x0 to 0x10: 32MB increments starting at 0MB
- * 0x11 to 0x16: 4MB increments starting at 8MB
- * 0x17 to 0x1d: 4MB increments start at 36MB
- */
- if (gmch_ctrl < 0x11)
- return (size_t)gmch_ctrl << 25;
- else if (gmch_ctrl < 0x17)
- return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
- else
- return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
-}
-
-static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
-{
- gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
- gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
-
- if (gen9_gmch_ctl < 0xf0)
- return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
- else
- /* 4MB increments starting at 0xf0 for 4MB */
- return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
-}
-
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
@@ -3332,8 +3288,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
int err;
/* TODO: We're not aware of mappable constraints on gen8 yet */
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
if (!err)
@@ -3344,13 +3302,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
if (INTEL_GEN(dev_priv) >= 9) {
- ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
} else if (IS_CHERRYVIEW(dev_priv)) {
- ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
size = chv_get_total_gtt_size(snb_gmch_ctl);
} else {
- ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
}
@@ -3390,14 +3345,16 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
u16 snb_gmch_ctl;
int err;
- ggtt->mappable_base = pci_resource_start(pdev, 2);
- ggtt->mappable_end = pci_resource_len(pdev, 2);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
- DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
+ DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
return -ENXIO;
}
@@ -3408,8 +3365,6 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
- ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
-
size = gen6_get_total_gtt_size(snb_gmch_ctl);
ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
@@ -3446,6 +3401,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
+ phys_addr_t gmadr_base;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3455,10 +3411,13 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
}
intel_gtt_get(&ggtt->base.total,
- &ggtt->stolen_size,
- &ggtt->mappable_base,
+ &gmadr_base,
&ggtt->mappable_end);
+ ggtt->gmadr =
+ (struct resource) DEFINE_RES_MEM(gmadr_base,
+ ggtt->mappable_end);
+
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page;
ggtt->base.insert_entries = i915_ggtt_insert_entries;
@@ -3503,9 +3462,9 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* currently don't have any bits spare to pass in this upper
* restriction!
*/
- if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
+ if (USES_GUC(dev_priv)) {
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
}
if ((ggtt->base.total - 1) >> 32) {
@@ -3513,21 +3472,21 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
" of address space! Found %lldM!\n",
ggtt->base.total >> 20);
ggtt->base.total = 1ULL << 32;
- ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ ggtt->mappable_end = min_t(u64, ggtt->mappable_end, ggtt->base.total);
}
if (ggtt->mappable_end > ggtt->base.total) {
DRM_ERROR("mappable aperture extends past end of GGTT,"
- " aperture=%llx, total=%llx\n",
- ggtt->mappable_end, ggtt->base.total);
+ " aperture=%pa, total=%llx\n",
+ &ggtt->mappable_end, ggtt->base.total);
ggtt->mappable_end = ggtt->base.total;
}
/* GMADR is the PCI mmio aperture into the global GTT. */
- DRM_INFO("Memory usable by graphics device = %lluM\n",
- ggtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
+ DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("DSM size = %lluM\n",
+ (u64)resource_size(&intel_graphics_stolen_res) >> 20);
if (intel_vtd_active())
DRM_INFO("VT-d active for gfx access\n");
@@ -3556,14 +3515,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
- dev_priv->ggtt.mappable_base,
+ if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
+ dev_priv->ggtt.gmadr.start,
dev_priv->ggtt.mappable_end)) {
ret = -EIO;
goto out_gtt_cleanup;
}
- ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
+ ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
/*
* Initialise stolen early so that we may reserve preallocated
@@ -3593,6 +3552,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
i915->ggtt.invalidate = guc_ggtt_invalidate;
+
+ i915_ggtt_invalidate(i915);
}
void i915_ggtt_disable_guc(struct drm_i915_private *i915)
@@ -3601,6 +3562,8 @@ void i915_ggtt_disable_guc(struct drm_i915_private *i915)
GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
i915->ggtt.invalidate = gen6_ggtt_invalidate;
+
+ i915_ggtt_invalidate(i915);
}
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
@@ -3620,10 +3583,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
bool ggtt_bound = false;
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (vma->vm != &ggtt->base)
- continue;
-
+ for_each_ggtt_vma(vma, obj) {
if (!i915_vma_unbind(vma))
continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 93211a96fdad..a42890d9af38 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -368,23 +368,10 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
*/
struct i915_ggtt {
struct i915_address_space base;
- struct io_mapping mappable; /* Mapping to our CPU mappable region */
- phys_addr_t mappable_base; /* PA of our GMADR */
- u64 mappable_end; /* End offset that we can CPU map */
-
- /* Stolen memory is segmented in hardware with different portions
- * offlimits to certain functions.
- *
- * The drm_mm is initialised to the total accessible range, as found
- * from the PCI config. On Broadwell+, this is further restricted to
- * avoid the first page! The upper end of stolen memory is reserved for
- * hardware functions and similarly removed from the accessible range.
- */
- u32 stolen_size; /* Total size of stolen memory */
- u32 stolen_usable_size; /* Total size minus reserved ranges */
- u32 stolen_reserved_base;
- u32 stolen_reserved_size;
+ struct io_mapping iomap; /* Mapping to our CPU mappable region */
+ struct resource gmadr; /* GMADR resource */
+ resource_size_t mappable_end; /* End offset that we can CPU map */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index ee83ec838ee7..a1d6956734f7 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
+#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
/* convert swiotlb segment size into sensible units (pages)! */
#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
@@ -95,7 +96,8 @@ create_st:
struct page *page;
do {
- page = alloc_pages(gfp | (order ? QUIET : 0), order);
+ page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
+ order);
if (page)
break;
if (!order--)
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 19fb28c177d8..05e89e1c0a08 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -261,6 +261,8 @@ struct drm_i915_gem_object {
} userptr;
unsigned long scratch;
+
+ void *gvt_info;
};
/** for phys allocated objects */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index a90bdd26571f..d575109f7a7f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -252,6 +252,20 @@ static void mark_busy(struct drm_i915_private *i915)
GEM_BUG_ON(!i915->gt.active_requests);
intel_runtime_pm_get_noresume(i915);
+
+ /*
+ * It seems that the DMC likes to transition between the DC states a lot
+ * when there are no connected displays (no active power domains) during
+ * command submission.
+ *
+ * This activity has negative impact on the performance of the chip with
+ * huge latencies observed in the interrupt handler and elsewhere.
+ *
+ * Work around it by grabbing a GT IRQ power domain whilst there is any
+ * GT activity, preventing any DC state transitions.
+ */
+ intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+
i915->gt.awake = true;
intel_enable_gt_powersave(i915);
@@ -465,6 +479,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
+ GEM_BUG_ON(request->global_seqno);
seqno = timeline_get_seqno(timeline);
GEM_BUG_ON(!seqno);
@@ -511,6 +526,7 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
/* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
+ GEM_BUG_ON(!request->global_seqno);
GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
engine->timeline->seqno--;
@@ -663,10 +679,21 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
*
* Do not use kmem_cache_zalloc() here!
*/
- req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto err_unreserve;
+ req = kmem_cache_alloc(dev_priv->requests,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (unlikely(!req)) {
+ /* Ratelimit ourselves to prevent oom from malicious clients */
+ ret = i915_gem_wait_for_idle(dev_priv,
+ I915_WAIT_LOCKED |
+ I915_WAIT_INTERRUPTIBLE);
+ if (ret)
+ goto err_unreserve;
+
+ req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto err_unreserve;
+ }
}
req->timeline = i915_gem_context_lookup_timeline(ctx, engine);
@@ -768,7 +795,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
if (to->engine == from->engine) {
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
- GFP_KERNEL);
+ I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -796,7 +823,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
await_dma_fence:
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
- GFP_KERNEL);
+ I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -847,7 +874,7 @@ i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
else
ret = i915_sw_fence_await_dma_fence(&req->submit, fence,
I915_FENCE_TIMEOUT,
- GFP_KERNEL);
+ I915_FENCE_GFP);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 26249f39de67..0d6d39f19506 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -204,6 +204,8 @@ struct drm_i915_gem_request {
struct list_head client_link;
};
+#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+
extern const struct dma_fence_ops i915_fence_ops;
static inline bool dma_fence_is_i915(const struct dma_fence *fence)
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 1877ae9a1d9b..d3f222fa6356 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -30,9 +30,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#define KB(x) ((x) * 1024)
-#define MB(x) (KB(x) * 1024)
-
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
@@ -79,129 +76,26 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
+static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
+ struct resource *dsm)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
- dma_addr_t base;
-
- /* Almost universally we can find the Graphics Base of Stolen Memory
- * at register BSM (0x5c) in the igfx configuration space. On a few
- * (desktop) machines this is also mirrored in the bridge device at
- * different locations, or in the MCHBAR.
- *
- * On 865 we just check the TOUD register.
- *
- * On 830/845/85x the stolen memory base isn't available in any
- * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
- *
- */
- base = 0;
- if (INTEL_GEN(dev_priv) >= 3) {
- u32 bsm;
-
- pci_read_config_dword(pdev, INTEL_BSM, &bsm);
-
- base = bsm & INTEL_BSM_MASK;
- } else if (IS_I865G(dev_priv)) {
- u32 tseg_size = 0;
- u16 toud = 0;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I845_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE) {
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- tseg_size = KB(512);
- break;
- case I845_TSEG_SIZE_1M:
- tseg_size = MB(1);
- break;
- }
- }
-
- pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
- I865_TOUD, &toud);
-
- base = (toud << 16) + tseg_size;
- } else if (IS_I85X(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I85X_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE)
- tseg_size = MB(1);
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 1),
- I85X_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I845G(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I845_ESMRAMC, &tmp);
-
- if (tmp & TSEG_ENABLE) {
- switch (tmp & I845_TSEG_SIZE_MASK) {
- case I845_TSEG_SIZE_512K:
- tseg_size = KB(512);
- break;
- case I845_TSEG_SIZE_1M:
- tseg_size = MB(1);
- break;
- }
- }
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_I830(dev_priv)) {
- u32 tseg_size = 0;
- u32 tom;
- u8 tmp;
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_ESMRAMC, &tmp);
- if (tmp & TSEG_ENABLE) {
- if (tmp & I830_TSEG_SIZE_1M)
- tseg_size = MB(1);
- else
- tseg_size = KB(512);
- }
-
- pci_bus_read_config_byte(pdev->bus, PCI_DEVFN(0, 0),
- I830_DRB3, &tmp);
- tom = tmp * MB(32);
-
- base = tom - tseg_size - ggtt->stolen_size;
- }
+ if (dsm->start == 0 || dsm->end <= dsm->start)
+ return -EINVAL;
- if (base == 0 || add_overflows(base, ggtt->stolen_size))
- return 0;
+ /*
+ * TODO: We have yet too encounter the case where the GTT wasn't at the
+ * end of stolen. With that assumption we could simplify this.
+ */
- /* make sure we don't clobber the GTT if it's within stolen memory */
+ /* Make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_GEN(dev_priv) <= 4 &&
!IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
- struct {
- dma_addr_t start, end;
- } stolen[2] = {
- { .start = base, .end = base + ggtt->stolen_size, },
- { .start = base, .end = base + ggtt->stolen_size, },
- };
- u64 ggtt_start, ggtt_end;
+ struct resource stolen[2] = {*dsm, *dsm};
+ struct resource ggtt_res;
+ resource_size_t ggtt_start;
ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev_priv))
@@ -209,70 +103,64 @@ static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
ggtt_start &= PGTBL_ADDRESS_LO_MASK;
- ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
-
- if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
- stolen[0].end = ggtt_start;
- if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
- stolen[1].start = ggtt_end;
-
- /* pick the larger of the two chunks */
- if (stolen[0].end - stolen[0].start >
- stolen[1].end - stolen[1].start) {
- base = stolen[0].start;
- ggtt->stolen_size = stolen[0].end - stolen[0].start;
- } else {
- base = stolen[1].start;
- ggtt->stolen_size = stolen[1].end - stolen[1].start;
- }
+
+ ggtt_res =
+ (struct resource) DEFINE_RES_MEM(ggtt_start,
+ ggtt_total_entries(ggtt) * 4);
+
+ if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
+ stolen[0].end = ggtt_res.start;
+ if (ggtt_res.end > stolen[1].start && ggtt_res.end <= stolen[1].end)
+ stolen[1].start = ggtt_res.end;
+
+ /* Pick the larger of the two chunks */
+ if (resource_size(&stolen[0]) > resource_size(&stolen[1]))
+ *dsm = stolen[0];
+ else
+ *dsm = stolen[1];
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
- dma_addr_t end = base + ggtt->stolen_size - 1;
-
- DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
- (unsigned long long)ggtt_start,
- (unsigned long long)ggtt_end - 1);
- DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
- &base, &end);
+ DRM_DEBUG_KMS("GTT within stolen memory at %pR\n", &ggtt_res);
+ DRM_DEBUG_KMS("Stolen memory adjusted to %pR\n", dsm);
}
}
-
- /* Verify that nothing else uses this physical address. Stolen
+ /*
+ * Verify that nothing else uses this physical address. Stolen
* memory should be reserved by the BIOS and hidden from the
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
+ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start,
+ resource_size(dsm),
"Graphics Stolen Memory");
if (r == NULL) {
/*
* One more attempt but this time requesting region from
- * base + 1, as we have seen that this resolves the region
+ * start + 1, as we have seen that this resolves the region
* conflict with the PCI Bus.
* This is a BIOS w/a: Some BIOS wrap stolen in the root
* PCI bus, but have an off-by-one error. Hence retry the
* reservation starting from 1 instead of 0.
* There's also BIOS with off-by-one on the other end.
*/
- r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
- ggtt->stolen_size - 2,
+ r = devm_request_mem_region(dev_priv->drm.dev, dsm->start + 1,
+ resource_size(dsm) - 2,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev_priv)) {
- dma_addr_t end = base + ggtt->stolen_size;
+ DRM_ERROR("conflict detected with stolen region: %pR\n",
+ dsm);
- DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
- &base, &end);
- base = 0;
+ return -EBUSY;
}
}
- return base;
+ return 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -286,13 +174,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ resource_size_t stolen_top = dev_priv->dsm.end + 1;
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
*base = 0;
@@ -321,7 +208,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -353,7 +240,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -379,7 +266,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -411,11 +298,10 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
- dma_addr_t *base, u32 *size)
+ resource_size_t *base, resource_size_t *size)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- dma_addr_t stolen_top;
+ resource_size_t stolen_top;
if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
*base = 0;
@@ -423,7 +309,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
return;
}
- stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ stolen_top = dev_priv->dsm.end + 1;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -439,10 +325,9 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- dma_addr_t reserved_base, stolen_top;
- u32 reserved_total, reserved_size;
- u32 stolen_usable_start;
+ resource_size_t reserved_base, stolen_top;
+ resource_size_t reserved_total, reserved_size;
+ resource_size_t stolen_usable_start;
mutex_init(&dev_priv->mm.stolen_lock);
@@ -456,14 +341,18 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
return 0;
}
- if (ggtt->stolen_size == 0)
+ if (resource_size(&intel_graphics_stolen_res) == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
- if (dev_priv->mm.stolen_base == 0)
+ dev_priv->dsm = intel_graphics_stolen_res;
+
+ if (i915_adjust_stolen(dev_priv, &dev_priv->dsm))
return 0;
- stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ GEM_BUG_ON(dev_priv->dsm.start == 0);
+ GEM_BUG_ON(dev_priv->dsm.end <= dev_priv->dsm.start);
+
+ stolen_top = dev_priv->dsm.end + 1;
reserved_base = 0;
reserved_size = 0;
@@ -504,50 +393,47 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_base = stolen_top;
}
- if (reserved_base < dev_priv->mm.stolen_base ||
- reserved_base + reserved_size > stolen_top) {
- dma_addr_t reserved_top = reserved_base + reserved_size;
- DRM_ERROR("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
- &reserved_base, &reserved_top,
- &dev_priv->mm.stolen_base, &stolen_top);
+ dev_priv->dsm_reserved =
+ (struct resource) DEFINE_RES_MEM(reserved_base, reserved_size);
+
+ if (!resource_contains(&dev_priv->dsm, &dev_priv->dsm_reserved)) {
+ DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
+ &dev_priv->dsm_reserved, &dev_priv->dsm);
return 0;
}
- ggtt->stolen_reserved_base = reserved_base;
- ggtt->stolen_reserved_size = reserved_size;
-
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
- ggtt->stolen_size >> 10,
- (ggtt->stolen_size - reserved_total) >> 10);
+ DRM_DEBUG_KMS("Memory reserved for graphics device: %lluK, usable: %lluK\n",
+ (u64)resource_size(&dev_priv->dsm) >> 10,
+ ((u64)resource_size(&dev_priv->dsm) - reserved_total) >> 10);
stolen_usable_start = 0;
/* WaSkipStolenMemoryFirstPage:bdw+ */
if (INTEL_GEN(dev_priv) >= 8)
stolen_usable_start = 4096;
- ggtt->stolen_usable_size =
- ggtt->stolen_size - reserved_total - stolen_usable_start;
+ dev_priv->stolen_usable_size =
+ resource_size(&dev_priv->dsm) - reserved_total - stolen_usable_start;
/* Basic memrange allocator for stolen space. */
drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
- ggtt->stolen_usable_size);
+ dev_priv->stolen_usable_size);
return 0;
}
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
- u32 offset, u32 size)
+ resource_size_t offset, resource_size_t size)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&dev_priv->dsm)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -567,7 +453,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
+ sg_dma_address(sg) = (dma_addr_t)dev_priv->dsm.start + offset;
sg_dma_len(sg) = size;
return st;
@@ -645,7 +531,8 @@ cleanup:
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
+ resource_size_t size)
{
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
@@ -678,9 +565,9 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
- u32 stolen_offset,
- u32 gtt_offset,
- u32 size)
+ resource_size_t stolen_offset,
+ resource_size_t gtt_offset,
+ resource_size_t size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
@@ -693,8 +580,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
- stolen_offset, gtt_offset, size);
+ DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
+ &stolen_offset, &gtt_offset, &size);
/* KISS and expect everything to be page-aligned */
if (WARN_ON(size == 0) ||
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b85d7ebd9bee..d9dc9df523b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -205,10 +205,7 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
if (tiling_mode == I915_TILING_NONE)
return 0;
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
@@ -285,10 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
}
mutex_unlock(&obj->mm.lock);
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!i915_vma_is_ggtt(vma))
- break;
-
+ for_each_ggtt_vma(vma, obj) {
vma->fence_size =
i915_gem_fence_size(i915, vma->size, tiling, stride);
vma->fence_alignment =
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index c01905d6450c..e9fd87604067 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -33,11 +33,8 @@ static void __intel_timeline_init(struct intel_timeline *tl,
{
tl->fence_context = context;
tl->common = parent;
-#ifdef CONFIG_DEBUG_SPINLOCK
- __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
-#else
spin_lock_init(&tl->lock);
-#endif
+ lockdep_set_class_and_name(&tl->lock, lockclass, lockname);
init_request_active(&tl->last_request, NULL);
INIT_LIST_HEAD(&tl->requests);
i915_syncmap_init(&tl->sync);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 48418fb81066..944059322daa 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -416,6 +416,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
int n;
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
+ err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
@@ -564,34 +565,17 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info)
{
-#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
- DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
-#undef PRINT_FLAG
-}
+ struct drm_printer p = i915_error_printer(m);
-static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
- const char *name,
- const char *type,
- const void *x)
-{
- if (!__builtin_strcmp(type, "bool"))
- err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "char *"))
- err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
- else
- BUILD_BUG();
+ intel_device_info_dump_flags(info, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
- const struct i915_params *p)
+ const struct i915_params *params)
{
-#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
- I915_PARAMS_FOR_EACH(PRINT);
-#undef PRINT
+ struct drm_printer p = i915_error_printer(m);
+
+ i915_params_dump(params, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -956,7 +940,7 @@ i915_error_object_create(struct drm_i915_private *i915,
ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
+ s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
@@ -1256,6 +1240,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->hws = I915_READ(mmio);
}
+ ee->idle = intel_engine_is_idle(engine);
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
ee->hangcheck_stalled = engine->hangcheck.stalled;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7cac07db89b9..3517c6548e2c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1400,7 +1400,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
notify_ring(engine);
- tasklet |= i915_modparams.enable_guc_submission;
+ tasklet |= USES_GUC_SUBMISSION(engine->i915);
}
if (tasklet)
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 49a079494b68..79f8ec756362 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -96,6 +96,11 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
{
- if (static_cpu_has(X86_FEATURE_XMM4_1))
+ /*
+ * Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
+ * emulation. So don't enable movntdqa in hypervisor guest.
+ */
+ if (static_cpu_has(X86_FEATURE_XMM4_1) &&
+ !boot_cpu_has(X86_FEATURE_HYPERVISOR))
static_branch_enable(&has_movntdqa);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 7bc538687871..b5f3eb4fa8a3 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -22,6 +22,8 @@
* IN THE SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "i915_params.h"
#include "i915_drv.h"
@@ -147,13 +149,10 @@ i915_param_named_unsafe(edp_vswing, int, 0400,
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
-i915_param_named_unsafe(enable_guc_loading, int, 0400,
- "Enable GuC firmware loading "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
-
-i915_param_named_unsafe(enable_guc_submission, int, 0400,
- "Enable GuC submission "
- "(-1=auto, 0=never [default], 1=if available, 2=required)");
+i915_param_named_unsafe(enable_guc, int, 0400,
+ "Enable GuC load for GuC submission and/or HuC load. "
+ "Required functionality can be selected using bitmask values. "
+ "(-1=auto, 0=disable [default], 1=GuC submission, 2=HuC load)");
i915_param_named(guc_log_level, int, 0400,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
@@ -175,3 +174,34 @@ i915_param_named(enable_dpcd_backlight, bool, 0600,
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+
+static __always_inline void _print_param(struct drm_printer *p,
+ const char *name,
+ const char *type,
+ const void *x)
+{
+ if (!__builtin_strcmp(type, "bool"))
+ drm_printf(p, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+ else if (!__builtin_strcmp(type, "int"))
+ drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "char *"))
+ drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
+ else
+ BUILD_BUG();
+}
+
+/**
+ * i915_params_dump - dump i915 modparams
+ * @params: i915 modparams
+ * @p: the &drm_printer
+ *
+ * Pretty printer for i915 modparams.
+ */
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
+{
+#define PRINT(T, x, ...) _print_param(p, #x, #T, &params->x);
+ I915_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index c48c88bb95e8..c96360398072 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -25,8 +25,14 @@
#ifndef _I915_PARAMS_H_
#define _I915_PARAMS_H_
+#include <linux/bitops.h>
#include <linux/cache.h> /* for __read_mostly */
+struct drm_printer;
+
+#define ENABLE_GUC_SUBMISSION BIT(0)
+#define ENABLE_GUC_LOAD_HUC BIT(1)
+
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
@@ -41,8 +47,7 @@
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
param(int, invert_brightness, 0) \
- param(int, enable_guc_loading, 0) \
- param(int, enable_guc_submission, 0) \
+ param(int, enable_guc, 0) \
param(int, guc_log_level, -1) \
param(char *, guc_firmware_path, NULL) \
param(char *, huc_firmware_path, NULL) \
@@ -74,5 +79,7 @@ struct i915_params {
extern struct i915_params i915_modparams __read_mostly;
+void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index fa67d3dde20e..36d48422b475 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -633,6 +633,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 09bf043c1c2e..41285bec8fc0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3278,6 +3278,7 @@ enum i915_power_well_id {
# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
@@ -3858,6 +3859,9 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
+#define BXT_GMBUS_GATING_DIS (1 << 14)
+
#define _CLKGATE_DIS_PSL_A 0x46520
#define _CLKGATE_DIS_PSL_B 0x46524
#define _CLKGATE_DIS_PSL_C 0x46528
@@ -3875,6 +3879,9 @@ enum {
#define SARBUNIT_CLKGATE_DIS (1 << 5)
#define RCCUNIT_CLKGATE_DIS (1 << 7)
+#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
+#define VFUNIT_CLKGATE_DIS (1 << 20)
+
/*
* Display engine regs
*/
@@ -6329,6 +6336,7 @@ enum {
#define PLANE_CTL_TILED_X ( 1 << 10)
#define PLANE_CTL_TILED_Y ( 4 << 10)
#define PLANE_CTL_TILED_YF ( 5 << 10)
+#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
@@ -7552,6 +7560,7 @@ enum {
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
@@ -8142,6 +8151,7 @@ enum {
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
#define THROTTLE_12_5 (7<<2)
+#define DISABLE_EARLY_EOT (1<<1)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index e8ca67a129d2..3669f5eeb91e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -303,6 +303,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int pending;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (i915_sw_fence_done(signaler))
return 0;
@@ -367,6 +368,7 @@ struct i915_sw_dma_fence_cb {
struct dma_fence *dma;
struct timer_list timer;
struct irq_work work;
+ struct rcu_head rcu;
};
static void timer_i915_sw_fence_wake(struct timer_list *t)
@@ -406,7 +408,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk)
del_timer_sync(&cb->timer);
dma_fence_put(cb->dma);
- kfree(cb);
+ kfree_rcu(cb, rcu);
}
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
@@ -418,6 +420,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
int ret;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (dma_fence_is_signaled(dma))
return 0;
@@ -464,6 +467,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
int ret = 0, pending;
debug_fence_assert(fence);
+ might_sleep_if(gfpflags_allow_blocking(gfp));
if (write) {
struct dma_fence **shared;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 4e76768ffa95..e1169c02eb2b 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -616,6 +616,7 @@ TRACE_EVENT(i915_gem_request_queue,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -624,15 +625,16 @@ TRACE_EVENT(i915_gem_request_queue,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->flags)
);
DECLARE_EVENT_CLASS(i915_gem_request,
@@ -641,23 +643,25 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, ctx)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
__entry->global = req->global_seqno;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
@@ -683,15 +687,17 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, global_seqno)
- __field(u32, ctx)
__field(u32, port)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
@@ -699,10 +705,10 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
__entry->port = port;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
- __entry->dev, __entry->ring, __entry->ctx,
- __entry->seqno, __entry->global_seqno,
- __entry->port)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
+ __entry->dev, __entry->hw_id, __entry->ring,
+ __entry->ctx, __entry->seqno,
+ __entry->global_seqno, __entry->port)
);
DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
@@ -772,6 +778,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, hw_id)
__field(u32, ring)
__field(u32, ctx)
__field(u32, seqno)
@@ -787,6 +794,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
*/
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
+ __entry->hw_id = req->ctx->hw_id;
__entry->ring = req->engine->id;
__entry->ctx = req->fence.context;
__entry->seqno = req->fence.seqno;
@@ -794,10 +802,10 @@ TRACE_EVENT(i915_gem_request_wait_begin,
__entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
- __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
- __entry->flags)
+ TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global,
+ !!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 8d07764887ec..51dbfe5bb418 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -140,4 +140,19 @@ static inline void drain_delayed_work(struct delayed_work *dw)
} while (delayed_work_pending(dw));
}
+static inline const char *yesno(bool v)
+{
+ return v ? "yes" : "no";
+}
+
+static inline const char *onoff(bool v)
+{
+ return v ? "on" : "off";
+}
+
+static inline const char *enableddisabled(bool v)
+{
+ return v ? "enabled" : "disabled";
+}
+
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index bf6d8d1eaabe..e0e7c48f45dc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -142,6 +142,12 @@ vma_create(struct drm_i915_gem_object *obj,
i915_gem_object_get_stride(obj));
GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
+ /*
+ * We put the GGTT vma at the start of the vma-list, followed
+ * by the ppGGTT vma. This allows us to break early when
+ * iterating over only the GGTT vma for an object, see
+ * for_each_ggtt_vma()
+ */
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
@@ -305,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
ptr = vma->iomap;
if (ptr == NULL) {
- ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
+ ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
vma->node.start,
vma->node.size);
if (ptr == NULL) {
@@ -322,6 +328,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
if (err)
goto err_unpin;
+ i915_vma_set_ggtt_write(vma);
return ptr;
err_unpin:
@@ -330,12 +337,24 @@ err:
return IO_ERR_PTR(err);
}
+void i915_vma_flush_writes(struct i915_vma *vma)
+{
+ if (!i915_vma_has_ggtt_write(vma))
+ return;
+
+ i915_gem_flush_ggtt_writes(vma->vm->i915);
+
+ i915_vma_unset_ggtt_write(vma);
+}
+
void i915_vma_unpin_iomap(struct i915_vma *vma)
{
lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
+ i915_vma_flush_writes(vma);
+
i915_vma_unpin_fence(vma);
i915_vma_unpin(vma);
}
@@ -466,6 +485,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
+ GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -678,7 +698,9 @@ static void i915_vma_destroy(struct i915_vma *vma)
GEM_BUG_ON(i915_gem_active_isset(&vma->last_read[i]));
GEM_BUG_ON(i915_gem_active_isset(&vma->last_fence));
+ list_del(&vma->obj_link);
list_del(&vma->vm_link);
+
if (!i915_vma_is_ggtt(vma))
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
@@ -690,7 +712,6 @@ void i915_vma_close(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_closed(vma));
vma->flags |= I915_VMA_CLOSED;
- list_del(&vma->obj_link);
rb_erase(&vma->obj_node, &vma->obj->vma_tree);
if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
@@ -790,6 +811,15 @@ int i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
if (i915_vma_is_map_and_fenceable(vma)) {
+ /*
+ * Check that we have flushed all writes through the GGTT
+ * before the unbind, other due to non-strict nature of those
+ * indirect writes they may end up referencing the GGTT PTE
+ * after the unbind.
+ */
+ i915_vma_flush_writes(vma);
+ GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
+
/* release the fence reg _after_ flushing */
ret = i915_vma_put_fence(vma);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 1e2bc9b3c3ac..fd5b84904f7c 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -90,6 +90,7 @@ struct i915_vma {
#define I915_VMA_CLOSED BIT(10)
#define I915_VMA_USERFAULT_BIT 11
#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT)
+#define I915_VMA_GGTT_WRITE BIT(12)
unsigned int active;
struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -138,6 +139,24 @@ static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
return vma->flags & I915_VMA_GGTT;
}
+static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma)
+{
+ return vma->flags & I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_set_ggtt_write(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ vma->flags |= I915_VMA_GGTT_WRITE;
+}
+
+static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma)
+{
+ vma->flags &= ~I915_VMA_GGTT_WRITE;
+}
+
+void i915_vma_flush_writes(struct i915_vma *vma);
+
static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma)
{
return vma->flags & I915_VMA_CAN_FENCE;
@@ -389,5 +408,19 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
-#endif
+#define for_each_until(cond) if (cond) break; else
+
+/**
+ * for_each_ggtt_vma - Iterate over the GGTT VMA belonging to an object.
+ * @V: the #i915_vma iterator
+ * @OBJ: the #drm_i915_gem_object
+ *
+ * GGTT VMA are placed at the being of the object's vma_list, see
+ * vma_create(), so we can stop our walk as soon as we see a ppgtt VMA,
+ * or the list is empty ofc.
+ */
+#define for_each_ggtt_vma(V, OBJ) \
+ list_for_each_entry(V, &(OBJ)->vma_list, obj_link) \
+ for_each_until(!i915_vma_is_ggtt(V))
+#endif
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 5ae2d276f7f3..58c624f982d9 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,6 +27,12 @@
#include "i915_drv.h"
+#ifdef CONFIG_SMP
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
+#else
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
+#endif
+
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
struct intel_wait *wait;
@@ -36,8 +42,20 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
wait = b->irq_wait;
if (wait) {
+ /*
+ * N.B. Since task_asleep() and ttwu are not atomic, the
+ * waiter may actually go to sleep after the check, causing
+ * us to suppress a valid wakeup. We prefer to reduce the
+ * number of false positive missed_breadcrumb() warnings
+ * at the expense of a few false negatives, as it it easy
+ * to trigger a false positive under heavy load. Enough
+ * signal should remain from genuine missed_breadcrumb()
+ * for us to detect in CI.
+ */
+ bool was_asleep = task_asleep(wait->tsk);
+
result = ENGINE_WAKEUP_WAITER;
- if (wake_up_process(wait->tsk))
+ if (wake_up_process(wait->tsk) && was_asleep)
result |= ENGINE_WAKEUP_ASLEEP;
}
@@ -64,20 +82,21 @@ static unsigned long wait_timeout(void)
static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
{
- DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n",
- engine->name, __builtin_return_address(0),
- yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)),
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine));
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ intel_engine_dump(engine, &p,
+ "%s missed breadcrumb at %pS\n",
+ engine->name, __builtin_return_address(0));
+ }
set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
}
static void intel_breadcrumbs_hangcheck(struct timer_list *t)
{
- struct intel_engine_cs *engine = from_timer(engine, t,
- breadcrumbs.hangcheck);
+ struct intel_engine_cs *engine =
+ from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
if (!b->irq_armed)
@@ -103,7 +122,7 @@ static void intel_breadcrumbs_hangcheck(struct timer_list *t)
*/
if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
missed_breadcrumb(engine);
- mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+ mod_timer(&b->fake_irq, jiffies + 1);
} else {
mod_timer(&b->hangcheck, wait_timeout());
}
@@ -213,32 +232,42 @@ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct intel_wait *wait, *n, *first;
+ struct intel_wait *wait, *n;
if (!b->irq_armed)
- return;
+ goto wakeup_signaler;
- /* We only disarm the irq when we are idle (all requests completed),
+ /*
+ * We only disarm the irq when we are idle (all requests completed),
* so if the bottom-half remains asleep, it missed the request
* completion.
*/
+ if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
+ missed_breadcrumb(engine);
spin_lock_irq(&b->rb_lock);
spin_lock(&b->irq_lock);
- first = fetch_and_zero(&b->irq_wait);
+ b->irq_wait = NULL;
if (b->irq_armed)
__intel_engine_disarm_breadcrumbs(engine);
spin_unlock(&b->irq_lock);
rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
RB_CLEAR_NODE(&wait->node);
- if (wake_up_process(wait->tsk) && wait == first)
- missed_breadcrumb(engine);
+ wake_up_process(wait->tsk);
}
b->waiters = RB_ROOT;
spin_unlock_irq(&b->rb_lock);
+
+ /*
+ * The signaling thread may be asleep holding a reference to a request,
+ * that had its signaling cancelled prior to being preempted. We need
+ * to kick the signaler, just in case, to release any such reference.
+ */
+wakeup_signaler:
+ wake_up_process(b->signaler);
}
static bool use_fake_irq(const struct intel_breadcrumbs *b)
@@ -683,23 +712,15 @@ static int intel_breadcrumbs_signaler(void *arg)
}
if (unlikely(do_schedule)) {
- DEFINE_WAIT(exec);
-
if (kthread_should_park())
kthread_parkme();
- if (kthread_should_stop()) {
- GEM_BUG_ON(request);
+ if (unlikely(kthread_should_stop())) {
+ i915_gem_request_put(request);
break;
}
- if (request)
- add_wait_queue(&request->execute, &exec);
-
schedule();
-
- if (request)
- remove_wait_queue(&request->execute, &exec);
}
i915_gem_request_put(request);
} while (1);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 07e4f7bc4412..7fe4aac0facc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -44,9 +44,9 @@
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
-#define I915_CSR_SKL "i915/skl_dmc_ver1_26.bin"
+#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
-#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 26)
+#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 369f780588fb..f51645a08dca 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2095,6 +2095,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll))
return;
+ mutex_lock(&dev_priv->dpll_lock);
+
if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2115,7 +2117,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
val = I915_READ(DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
- DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
+ DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
@@ -2124,6 +2126,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
} else if (INTEL_INFO(dev_priv)->gen < 9) {
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
}
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 02f8bf101ccd..d28592e43512 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -22,6 +22,9 @@
*
*/
+#include <drm/drm_print.h>
+
+#include "intel_device_info.h"
#include "i915_drv.h"
#define PLATFORM_NAME(x) [INTEL_##x] = #x
@@ -67,21 +70,55 @@ const char *intel_platform_name(enum intel_platform platform)
return platform_names[platform];
}
-void intel_device_info_dump(struct drm_i915_private *dev_priv)
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p)
{
- const struct intel_device_info *info = &dev_priv->info;
-
- DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
- intel_platform_name(info->platform),
- info->gen,
- dev_priv->drm.pdev->device,
- dev_priv->drm.pdev->revision);
-#define PRINT_FLAG(name) \
- DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
}
+static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
+{
+ drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
+ drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
+ drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
+ drm_printf(p, "subslice mask %04x\n", sseu->subslice_mask);
+ drm_printf(p, "subslice per slice: %u\n",
+ hweight8(sseu->subslice_mask));
+ drm_printf(p, "EU total: %u\n", sseu->eu_total);
+ drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
+ drm_printf(p, "has slice power gating: %s\n",
+ yesno(sseu->has_slice_pg));
+ drm_printf(p, "has subslice power gating: %s\n",
+ yesno(sseu->has_subslice_pg));
+ drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
+}
+
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ sseu_dump(&info->sseu, p);
+
+ drm_printf(p, "CS timestamp frequency: %u kHz\n",
+ info->cs_timestamp_frequency_khz);
+}
+
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
+
+ drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(info->platform),
+ info->gen);
+
+ intel_device_info_dump_flags(info, p);
+}
+
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
@@ -403,24 +440,27 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
freq = f24_mhz;
break;
}
- }
- /* Now figure out how the command stream's timestamp register
- * increments from this frequency (it might increment only
- * every few clock cycle).
- */
- freq >>= 3 - ((rpm_config_reg &
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
- GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ /* Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - ((rpm_config_reg &
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
+ }
return freq;
}
- DRM_ERROR("Unknown gen, unable to compute command stream timestamp frequency\n");
+ MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
return 0;
}
-/*
+/**
+ * intel_device_info_runtime_init - initialize runtime info
+ * @info: intel device info struct
+ *
* Determine various intel_device_info fields at runtime.
*
* Use it when either:
@@ -433,9 +473,10 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
-void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
+void intel_device_info_runtime_init(struct intel_device_info *info)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct drm_i915_private *dev_priv =
+ container_of(info, struct drm_i915_private, info);
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 10) {
@@ -543,22 +584,4 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
-
- DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
- DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
- DRM_DEBUG_DRIVER("subslice total: %u\n",
- sseu_subslice_total(&info->sseu));
- DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
- DRM_DEBUG_DRIVER("subslice per slice: %u\n",
- hweight8(info->sseu.subslice_mask));
- DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
- DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
- DRM_DEBUG_DRIVER("has slice power gating: %s\n",
- info->sseu.has_slice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
- info->sseu.has_subslice_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("has EU power gating: %s\n",
- info->sseu.has_eu_pg ? "y" : "n");
- DRM_DEBUG_DRIVER("CS timestamp frequency: %u kHz\n",
- info->cs_timestamp_frequency_khz);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
new file mode 100644
index 000000000000..49cb27bd04c1
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DEVICE_INFO_H_
+#define _INTEL_DEVICE_INFO_H_
+
+#include "intel_display.h"
+
+struct drm_printer;
+struct drm_i915_private;
+
+/* Keep in gen based order, and chronological order within a gen */
+enum intel_platform {
+ INTEL_PLATFORM_UNINITIALIZED = 0,
+ /* gen2 */
+ INTEL_I830,
+ INTEL_I845G,
+ INTEL_I85X,
+ INTEL_I865G,
+ /* gen3 */
+ INTEL_I915G,
+ INTEL_I915GM,
+ INTEL_I945G,
+ INTEL_I945GM,
+ INTEL_G33,
+ INTEL_PINEVIEW,
+ /* gen4 */
+ INTEL_I965G,
+ INTEL_I965GM,
+ INTEL_G45,
+ INTEL_GM45,
+ /* gen5 */
+ INTEL_IRONLAKE,
+ /* gen6 */
+ INTEL_SANDYBRIDGE,
+ /* gen7 */
+ INTEL_IVYBRIDGE,
+ INTEL_VALLEYVIEW,
+ INTEL_HASWELL,
+ /* gen8 */
+ INTEL_BROADWELL,
+ INTEL_CHERRYVIEW,
+ /* gen9 */
+ INTEL_SKYLAKE,
+ INTEL_BROXTON,
+ INTEL_KABYLAKE,
+ INTEL_GEMINILAKE,
+ INTEL_COFFEELAKE,
+ /* gen10 */
+ INTEL_CANNONLAKE,
+ INTEL_MAX_PLATFORMS
+};
+
+#define DEV_INFO_FOR_EACH_FLAG(func) \
+ func(is_mobile); \
+ func(is_lp); \
+ func(is_alpha_support); \
+ /* Keep has_* in alphabetical order */ \
+ func(has_64bit_reloc); \
+ func(has_aliasing_ppgtt); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_reset_engine); \
+ func(has_fbc); \
+ func(has_fpga_dbg); \
+ func(has_full_ppgtt); \
+ func(has_full_48bit_ppgtt); \
+ func(has_gmch_display); \
+ func(has_guc); \
+ func(has_guc_ct); \
+ func(has_hotplug); \
+ func(has_l3_dpf); \
+ func(has_llc); \
+ func(has_logical_ring_contexts); \
+ func(has_logical_ring_preemption); \
+ func(has_overlay); \
+ func(has_pooled_eu); \
+ func(has_psr); \
+ func(has_rc6); \
+ func(has_rc6p); \
+ func(has_resource_streamer); \
+ func(has_runtime_pm); \
+ func(has_snoop); \
+ func(unfenced_needs_alignment); \
+ func(cursor_needs_physical); \
+ func(hws_needs_physical); \
+ func(overlay_needs_physical); \
+ func(supports_tv); \
+ func(has_ipc);
+
+struct sseu_dev_info {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 eu_total;
+ u8 eu_per_subslice;
+ u8 min_eu_in_pool;
+ /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+ u8 subslice_7eu[3];
+ u8 has_slice_pg:1;
+ u8 has_subslice_pg:1;
+ u8 has_eu_pg:1;
+};
+
+struct intel_device_info {
+ u16 device_id;
+ u16 gen_mask;
+
+ u8 gen;
+ u8 gt; /* GT number, 0 if undefined */
+ u8 num_rings;
+ u8 ring_mask; /* Rings supported by the HW */
+
+ enum intel_platform platform;
+ u32 platform_mask;
+
+ u32 display_mmio_offset;
+
+ u8 num_pipes;
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ unsigned int page_sizes; /* page sizes supported by the HW */
+
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ u16 ddb_size; /* in blocks */
+
+ /* Register offsets for the various display pipes and transcoders */
+ int pipe_offsets[I915_MAX_TRANSCODERS];
+ int trans_offsets[I915_MAX_TRANSCODERS];
+ int palette_offsets[I915_MAX_PIPES];
+ int cursor_offsets[I915_MAX_PIPES];
+
+ /* Slice/subslice/EU info */
+ struct sseu_dev_info sseu;
+
+ u32 cs_timestamp_frequency_khz;
+
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ } color;
+};
+
+static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
+{
+ return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
+}
+
+const char *intel_platform_name(enum intel_platform platform);
+
+void intel_device_info_runtime_init(struct intel_device_info *info);
+void intel_device_info_dump(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_flags(const struct intel_device_info *info,
+ struct drm_printer *p);
+void intel_device_info_dump_runtime(const struct intel_device_info *info,
+ struct drm_printer *p);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1f7e312d0d0d..0cd355978ab4 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2639,7 +2639,6 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
@@ -2655,7 +2654,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size_aligned * 2 > ggtt->stolen_usable_size)
+ if (size_aligned * 2 > dev_priv->stolen_usable_size)
return false;
mutex_lock(&dev->struct_mutex);
@@ -3074,6 +3073,12 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
int ret;
+ if (rotation & DRM_MODE_REFLECT_X &&
+ fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ return -EINVAL;
+ }
+
if (!plane_state->base.visible)
return 0;
@@ -3454,9 +3459,9 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return 0;
}
-static u32 skl_plane_ctl_rotation(unsigned int rotation)
+static u32 skl_plane_ctl_rotate(unsigned int rotate)
{
- switch (rotation) {
+ switch (rotate) {
case DRM_MODE_ROTATE_0:
break;
/*
@@ -3470,7 +3475,22 @@ static u32 skl_plane_ctl_rotation(unsigned int rotation)
case DRM_MODE_ROTATE_270:
return PLANE_CTL_ROTATE_90;
default:
- MISSING_CASE(rotation);
+ MISSING_CASE(rotate);
+ }
+
+ return 0;
+}
+
+static u32 cnl_plane_ctl_flip(unsigned int reflect)
+{
+ switch (reflect) {
+ case 0:
+ break;
+ case DRM_MODE_REFLECT_X:
+ return PLANE_CTL_FLIP_HORIZONTAL;
+ case DRM_MODE_REFLECT_Y:
+ default:
+ MISSING_CASE(reflect);
}
return 0;
@@ -3498,7 +3518,11 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
+ plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ plane_ctl |= cnl_plane_ctl_flip(rotation &
+ DRM_MODE_REFLECT_MASK);
if (key->flags & I915_SET_COLORKEY_DESTINATION)
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
@@ -9695,111 +9719,27 @@ err:
return ERR_PTR(ret);
}
-static u32
-intel_framebuffer_pitch_for_width(int width, int bpp)
-{
- u32 pitch = DIV_ROUND_UP(width * bpp, 8);
- return ALIGN(pitch, 64);
-}
-
-static u32
-intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
-{
- u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return PAGE_ALIGN(pitch * mode->vdisplay);
-}
-
-static struct drm_framebuffer *
-intel_framebuffer_create_for_mode(struct drm_device *dev,
- const struct drm_display_mode *mode,
- int depth, int bpp)
-{
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-
- obj = i915_gem_object_create(to_i915(dev),
- intel_framebuffer_size_for_mode(mode, bpp));
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- mode_cmd.width = mode->hdisplay;
- mode_cmd.height = mode->vdisplay;
- mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
- bpp);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- if (IS_ERR(fb))
- i915_gem_object_put(obj);
-
- return fb;
-}
-
-static struct drm_framebuffer *
-mode_fits_in_fbdev(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj;
- struct drm_framebuffer *fb;
-
- if (!dev_priv->fbdev)
- return NULL;
-
- if (!dev_priv->fbdev->fb)
- return NULL;
-
- obj = dev_priv->fbdev->fb->obj;
- BUG_ON(!obj);
-
- fb = &dev_priv->fbdev->fb->base;
- if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->format->cpp[0] * 8))
- return NULL;
-
- if (obj->base.size < mode->vdisplay * fb->pitches[0])
- return NULL;
-
- drm_framebuffer_get(fb);
- return fb;
-#else
- return NULL;
-#endif
-}
-
-static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
- struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_framebuffer *fb,
- int x, int y)
+static int intel_modeset_disable_planes(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
{
+ struct drm_plane *plane;
struct drm_plane_state *plane_state;
- int hdisplay, vdisplay;
- int ret;
-
- plane_state = drm_atomic_get_plane_state(state, crtc->primary);
- if (IS_ERR(plane_state))
- return PTR_ERR(plane_state);
-
- if (mode)
- drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
- else
- hdisplay = vdisplay = 0;
+ int ret, i;
- ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
+ ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
- drm_atomic_set_fb_for_plane(plane_state, fb);
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_w = hdisplay;
- plane_state->crtc_h = vdisplay;
- plane_state->src_x = x << 16;
- plane_state->src_y = y << 16;
- plane_state->src_w = hdisplay << 16;
- plane_state->src_h = vdisplay << 16;
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ if (plane_state->crtc != crtc)
+ continue;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
+ if (ret)
+ return ret;
+
+ drm_atomic_set_fb_for_plane(plane_state, NULL);
+ }
return 0;
}
@@ -9817,7 +9757,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb;
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL, *restore_state = NULL;
struct drm_connector_state *connector_state;
@@ -9885,10 +9824,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
found:
intel_crtc = to_intel_crtc(crtc);
- ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
- if (ret)
- goto fail;
-
state = drm_atomic_state_alloc(dev);
restore_state = drm_atomic_state_alloc(dev);
if (!state || !restore_state) {
@@ -9920,40 +9855,17 @@ found:
if (!mode)
mode = &load_detect_mode;
- /* We need a framebuffer large enough to accommodate all accesses
- * that the plane may generate whilst we perform load detection.
- * We can not rely on the fbcon either being present (we get called
- * during its initialisation to detect all boot displays, or it may
- * not even exist) or that it is large enough to satisfy the
- * requested mode.
- */
- fb = mode_fits_in_fbdev(dev, mode);
- if (fb == NULL) {
- DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
- fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
- } else
- DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
- if (IS_ERR(fb)) {
- DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
- ret = PTR_ERR(fb);
- goto fail;
- }
-
- ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+ ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
if (ret)
goto fail;
- drm_framebuffer_put(fb);
-
- ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
+ ret = intel_modeset_disable_planes(state, crtc);
if (ret)
goto fail;
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
if (!ret)
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
- if (!ret)
- ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
if (ret) {
DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
goto fail;
@@ -10967,31 +10879,6 @@ fail:
return ret;
}
-static void
-intel_modeset_update_crtc_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *new_crtc_state;
- int i;
-
- /* Double check state. */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
-
- /*
- * Update legacy state to satisfy fbc code. This can
- * be removed when fbc uses the atomic state.
- */
- if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
- struct drm_plane_state *plane_state = crtc->primary->state;
-
- crtc->primary->fb = plane_state->fb;
- crtc->x = plane_state->src_x >> 16;
- crtc->y = plane_state->src_y >> 16;
- }
- }
-}
-
static bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
@@ -12364,9 +12251,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
}
}
- /* Only after disabling all output pipelines that will be changed can we
- * update the the output configuration. */
- intel_modeset_update_crtc_state(state);
+ /* FIXME: Eventually get rid of our intel_crtc->config pointer */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
if (intel_state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
@@ -12596,11 +12483,15 @@ static int intel_atomic_commit(struct drm_device *dev,
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock)
+ if (nonblock && intel_state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->commit_work);
+ } else if (nonblock) {
queue_work(system_unbound_wq, &state->commit_work);
- else
+ } else {
+ if (intel_state->modeset)
+ flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
-
+ }
return 0;
}
@@ -13265,7 +13156,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10) {
intel_primary_formats = skl_primary_formats;
num_formats = ARRAY_SIZE(skl_primary_formats);
modifiers = skl_format_modifiers_ccs;
@@ -13327,7 +13218,12 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(dev_priv) >= 10) {
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
+ DRM_MODE_REFLECT_X;
+ } else if (INTEL_GEN(dev_priv) >= 9) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
@@ -14558,6 +14454,8 @@ int intel_modeset_init(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
+
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
@@ -14621,7 +14519,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = ggtt->mappable_base;
+ dev->mode_config.fb_base = ggtt->gmadr.start;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev_priv)->num_pipes,
@@ -15362,6 +15260,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
intel_teardown_gmbus(dev_priv);
+
+ destroy_workqueue(dev_priv->modeset_wq);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
new file mode 100644
index 000000000000..a0d2b6169361
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_A,
+ TRANSCODER_DSI_C,
+
+ I915_MAX_TRANSCODERS
+};
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
+
+/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum i9xx_plane_id {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+
+#define plane_name(p) ((p) + 'A')
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1,
+ DPIO_PHY2,
+};
+
+#define I915_NUM_PHYS_VLV 2
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+ POWER_DOMAIN_PORT_DDI_A_LANES,
+ POWER_DOMAIN_PORT_DDI_B_LANES,
+ POWER_DOMAIN_PORT_DDI_C_LANES,
+ POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+#define for_each_pipe(__dev_priv, __p) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for_each_if((__mask) & BIT(__p))
+
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if((__ports_mask) & BIT(__port))
+
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head)
+
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((plane_mask) & \
+ BIT(drm_plane_index(&intel_plane->base)))
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
+
+#define for_each_intel_encoder(dev, intel_encoder) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head)
+
+#define for_each_intel_connector_iter(intel_connector, iter) \
+ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ for_each_if((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ for_each_if(BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_rev(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_rev(__dev_priv, __power_well) \
+ for_each_if((__power_well)->domains & (__domain_mask))
+
+#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+ bool reduce_m_n);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 86d4c85c8725..ebdcbcbacb3c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1272,6 +1272,9 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
if (ret)
return ret;
+ /* WaDisableEarlyEOT:cnl */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
+
return 0;
}
@@ -1513,10 +1516,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- if (READ_ONCE(dev_priv->gt.active_requests))
- return false;
-
- /* If the driver is wedged, HW state may be very inconsistent and
+ /*
+ * If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
if (i915_terminally_wedged(&dev_priv->gpu_error))
@@ -1596,7 +1597,7 @@ void intel_engines_park(struct drm_i915_private *i915)
dev_err(i915->drm.dev,
"%s is not idle before parking\n",
engine->name);
- intel_engine_dump(engine, &p);
+ intel_engine_dump(engine, &p, NULL);
}
if (engine->park)
@@ -1666,7 +1667,38 @@ static void print_request(struct drm_printer *m,
rq->timeline->common->name);
}
-void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
+static void hexdump(struct drm_printer *m, const void *buf, size_t len)
+{
+ const size_t rowsize = 8 * sizeof(u32);
+ const void *prev = NULL;
+ bool skip = false;
+ size_t pos;
+
+ for (pos = 0; pos < len; pos += rowsize) {
+ char line[128];
+
+ if (prev && !memcmp(prev, buf + pos, rowsize)) {
+ if (!skip) {
+ drm_printf(m, "*\n");
+ skip = true;
+ }
+ continue;
+ }
+
+ WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
+ rowsize, sizeof(u32),
+ line, sizeof(line),
+ false) >= sizeof(line));
+ drm_printf(m, "%08zx %s\n", pos, line);
+
+ prev = buf + pos;
+ skip = false;
+ }
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...)
{
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
const struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1674,17 +1706,29 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *rq;
struct rb_node *rb;
+ char hdr[80];
u64 addr;
- drm_printf(m, "%s\n", engine->name);
+ if (header) {
+ va_list ap;
+
+ va_start(ap, header);
+ drm_vprintf(m, header, &ap);
+ va_end(ap);
+ }
+
+ if (i915_terminally_wedged(&engine->i915->gpu_error))
+ drm_printf(m, "*** WEDGED ***\n");
+
drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
engine->timeline->inflight_seqnos);
- drm_printf(m, "\tReset count: %d\n",
- i915_reset_engine_count(error, engine));
+ drm_printf(m, "\tReset count: %d (global %d)\n",
+ i915_reset_engine_count(error, engine),
+ i915_reset_count(error));
rcu_read_lock();
@@ -1745,6 +1789,24 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
addr = intel_engine_get_last_batch_head(engine);
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 8)
+ addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
+ RING_DMA_FADD_UDW(engine->mmio_base));
+ else if (INTEL_GEN(dev_priv) >= 4)
+ addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ else
+ addr = I915_READ(DMA_FADD_I8XX);
+ drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
+ upper_32_bits(addr), lower_32_bits(addr));
+ if (INTEL_GEN(dev_priv) >= 4) {
+ drm_printf(m, "\tIPEIR: 0x%08x\n",
+ I915_READ(RING_IPEIR(engine->mmio_base)));
+ drm_printf(m, "\tIPEHR: 0x%08x\n",
+ I915_READ(RING_IPEHR(engine->mmio_base)));
+ } else {
+ drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+ }
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1786,12 +1848,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
rq = port_unpack(&execlists->port[idx], &count);
if (rq) {
- drm_printf(m, "\t\tELSP[%d] count=%d, ",
- idx, count);
- print_request(m, rq, "rq: ");
+ snprintf(hdr, sizeof(hdr),
+ "\t\tELSP[%d] count=%d, rq: ",
+ idx, count);
+ print_request(m, rq, hdr);
} else {
- drm_printf(m, "\t\tELSP[%d] idle\n",
- idx);
+ drm_printf(m, "\t\tELSP[%d] idle\n", idx);
}
}
drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
@@ -1826,8 +1888,21 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m)
}
spin_unlock_irq(&b->rb_lock);
+ if (INTEL_GEN(dev_priv) >= 6) {
+ drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ }
+
+ drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
+ engine->irq_posted,
+ yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted)),
+ yesno(test_bit(ENGINE_IRQ_EXECLIST,
+ &engine->irq_posted)));
+
+ drm_printf(m, "HWSP:\n");
+ hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
+
drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
- drm_printf(m, "\n");
}
static u8 user_class_map[] = {
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 4aefc658a5cf..f88c1b5dae4c 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -531,7 +531,6 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size,
int fb_cpp)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1;
int ret;
u64 end;
@@ -541,7 +540,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
- end = ggtt->stolen_size - 8 * 1024 * 1024;
+ end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -615,10 +614,16 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fbc->compressed_llb = compressed_llb;
+ GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + fbc->compressed_fb.start);
+ dev_priv->dsm.start + fbc->compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
- dev_priv->mm.stolen_base + compressed_llb->start);
+ dev_priv->dsm.start + compressed_llb->start);
}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ea96682568e8..da48af11eb6b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -115,7 +115,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
int size, ret;
@@ -139,7 +138,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features. */
obj = NULL;
- if (size * 2 < ggtt->stolen_usable_size)
+ if (size * 2 < dev_priv->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
obj = i915_gem_object_create(dev_priv, size);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index d08e760252d4..3c6bf5a34c3c 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -61,6 +61,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
+ intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
mutex_init(&guc->send_mutex);
@@ -68,6 +69,114 @@ void intel_guc_init_early(struct intel_guc *guc)
guc->notify = gen8_guc_raise_irq;
}
+int intel_guc_init_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.runtime.flush_wq)
+ return -ENOMEM;
+
+ /*
+ * Even though both sending GuC action, and adding a new workitem to
+ * GuC workqueue are serialized (each with its own locking), since
+ * we're using mutliple engines, it's possible that we're going to
+ * issue a preempt request with two (or more - each for different
+ * engine) workitems in GuC queue. In this situation, GuC may submit
+ * all of them, which will make us very confused.
+ * Our preemption contexts may even already be complete - before we
+ * even had the chance to sent the preempt action to GuC!. Rather
+ * than introducing yet another lock, we can just use ordered workqueue
+ * to make sure we're always sending a single preemption request with a
+ * single workitem.
+ */
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv)) {
+ guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
+ WQ_HIGHPRI);
+ if (!guc->preempt_wq) {
+ destroy_workqueue(guc->log.runtime.flush_wq);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void intel_guc_fini_wq(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
+ USES_GUC_SUBMISSION(dev_priv))
+ destroy_workqueue(guc->preempt_wq);
+
+ destroy_workqueue(guc->log.runtime.flush_wq);
+}
+
+static int guc_shared_data_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ void *vaddr;
+
+ vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ i915_vma_unpin_and_release(&vma);
+ return PTR_ERR(vaddr);
+ }
+
+ guc->shared_data = vma;
+ guc->shared_data_vaddr = vaddr;
+
+ return 0;
+}
+
+static void guc_shared_data_destroy(struct intel_guc *guc)
+{
+ i915_gem_object_unpin_map(guc->shared_data->obj);
+ i915_vma_unpin_and_release(&guc->shared_data);
+}
+
+int intel_guc_init(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ return ret;
+ GEM_BUG_ON(!guc->shared_data);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(dev_priv);
+
+ return 0;
+}
+
+void intel_guc_fini(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ i915_ggtt_disable_guc(dev_priv);
+ guc_shared_data_destroy(guc);
+}
+
static u32 get_gt_type(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
@@ -128,7 +237,7 @@ void intel_guc_init_params(struct intel_guc *guc)
}
/* If GuC submission is enabled, set up additional parameters here */
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 59856726d2bc..52856a97477d 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -119,6 +119,10 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
void intel_guc_init_params(struct intel_guc *guc);
+int intel_guc_init_wq(struct intel_guc *guc);
+void intel_guc_fini_wq(struct intel_guc *guc);
+int intel_guc_init(struct intel_guc *guc);
+void intel_guc_fini(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 89862fa8ab42..cbc51c960425 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -56,45 +56,54 @@ MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
-/**
- * intel_guc_fw_select() - selects GuC firmware for uploading
- *
- * @guc: intel_guc struct
- *
- * Return: zero when we know firmware, non-zero in other case
- */
-int intel_guc_fw_select(struct intel_guc *guc)
+static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
+ struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- intel_uc_fw_init(&guc->fw, INTEL_UC_FW_TYPE_GUC);
+ GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
+
+ if (!HAS_GUC(dev_priv))
+ return;
if (i915_modparams.guc_firmware_path) {
- guc->fw.path = i915_modparams.guc_firmware_path;
- guc->fw.major_ver_wanted = 0;
- guc->fw.minor_ver_wanted = 0;
+ guc_fw->path = i915_modparams.guc_firmware_path;
+ guc_fw->major_ver_wanted = 0;
+ guc_fw->minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
- guc->fw.path = I915_SKL_GUC_UCODE;
- guc->fw.major_ver_wanted = SKL_FW_MAJOR;
- guc->fw.minor_ver_wanted = SKL_FW_MINOR;
+ guc_fw->path = I915_SKL_GUC_UCODE;
+ guc_fw->major_ver_wanted = SKL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
- guc->fw.path = I915_BXT_GUC_UCODE;
- guc->fw.major_ver_wanted = BXT_FW_MAJOR;
- guc->fw.minor_ver_wanted = BXT_FW_MINOR;
+ guc_fw->path = I915_BXT_GUC_UCODE;
+ guc_fw->major_ver_wanted = BXT_FW_MAJOR;
+ guc_fw->minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- guc->fw.path = I915_KBL_GUC_UCODE;
- guc->fw.major_ver_wanted = KBL_FW_MAJOR;
- guc->fw.minor_ver_wanted = KBL_FW_MINOR;
+ guc_fw->path = I915_KBL_GUC_UCODE;
+ guc_fw->major_ver_wanted = KBL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
- guc->fw.path = I915_GLK_GUC_UCODE;
- guc->fw.major_ver_wanted = GLK_FW_MAJOR;
- guc->fw.minor_ver_wanted = GLK_FW_MINOR;
+ guc_fw->path = I915_GLK_GUC_UCODE;
+ guc_fw->major_ver_wanted = GLK_FW_MAJOR;
+ guc_fw->minor_ver_wanted = GLK_FW_MINOR;
} else {
- DRM_ERROR("No GuC firmware known for platform with GuC!\n");
- return -ENOENT;
+ DRM_WARN("%s: No firmware known for this platform!\n",
+ intel_uc_fw_type_repr(guc_fw->type));
}
+}
- return 0;
+/**
+ * intel_guc_fw_init_early() - initializes GuC firmware struct
+ * @guc: intel_guc struct
+ *
+ * On platforms with GuC selects firmware for uploading
+ */
+void intel_guc_fw_init_early(struct intel_guc *guc)
+{
+ struct intel_uc_fw *guc_fw = &guc->fw;
+
+ intel_uc_fw_init(guc_fw, INTEL_UC_FW_TYPE_GUC);
+ guc_fw_select(guc_fw);
}
static void guc_prepare_xfer(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/intel_guc_fw.h
index 023f5baa9dd6..4ec5d3d9e2b0 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/intel_guc_fw.h
@@ -27,7 +27,7 @@
struct intel_guc;
-int intel_guc_fw_select(struct intel_guc *guc);
+void intel_guc_fw_init_early(struct intel_guc *guc);
int intel_guc_fw_upload(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 76d3eb1e4614..eaedd63e3819 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -411,30 +411,8 @@ static int guc_log_runtime_create(struct intel_guc *guc)
guc->log.runtime.relay_chan = guc_log_relay_chan;
INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (!guc->log.runtime.flush_wq) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- ret = -ENOMEM;
- goto err_relaychan;
- }
-
return 0;
-err_relaychan:
- relay_close(guc->log.runtime.relay_chan);
err_vaddr:
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.runtime.buf_addr = NULL;
@@ -450,7 +428,6 @@ static void guc_log_runtime_destroy(struct intel_guc *guc)
if (!guc_log_has_runtime(guc))
return;
- destroy_workqueue(guc->log.runtime.flush_wq);
relay_close(guc->log.runtime.relay_chan);
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.runtime.buf_addr = NULL;
@@ -505,7 +482,7 @@ static void guc_flush_logs(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- if (!i915_modparams.enable_guc_submission ||
+ if (!USES_GUC_SUBMISSION(dev_priv) ||
(i915_modparams.guc_log_level < 0))
return;
@@ -646,7 +623,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_guc_submission ||
+ if (!USES_GUC_SUBMISSION(dev_priv) ||
(i915_modparams.guc_log_level < 0))
return;
@@ -657,7 +634,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
{
- if (!i915_modparams.enable_guc_submission)
+ if (!USES_GUC_SUBMISSION(dev_priv))
return;
mutex_lock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 912ff143d531..4d2409466a3a 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -88,7 +88,7 @@ static inline bool is_high_priority(struct intel_guc_client *client)
client->priority == GUC_CLIENT_PRIORITY_HIGH);
}
-static int __reserve_doorbell(struct intel_guc_client *client)
+static int reserve_doorbell(struct intel_guc_client *client)
{
unsigned long offset;
unsigned long end;
@@ -120,7 +120,7 @@ static int __reserve_doorbell(struct intel_guc_client *client)
return 0;
}
-static void __unreserve_doorbell(struct intel_guc_client *client)
+static void unreserve_doorbell(struct intel_guc_client *client)
{
GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
@@ -188,32 +188,21 @@ static bool has_doorbell(struct intel_guc_client *client)
return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
}
-static int __create_doorbell(struct intel_guc_client *client)
+static void __create_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
- int err;
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = 0;
-
- err = __guc_allocate_doorbell(client->guc, client->stage_id);
- if (err) {
- doorbell->db_status = GUC_DOORBELL_DISABLED;
- DRM_ERROR("Couldn't create client %u doorbell: %d\n",
- client->stage_id, err);
- }
-
- return err;
}
-static int __destroy_doorbell(struct intel_guc_client *client)
+static void __destroy_doorbell(struct intel_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
- GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
@@ -225,50 +214,42 @@ static int __destroy_doorbell(struct intel_guc_client *client)
*/
if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
WARN_ONCE(true, "Doorbell never became invalid after disable\n");
-
- return __guc_deallocate_doorbell(client->guc, client->stage_id);
}
static int create_doorbell(struct intel_guc_client *client)
{
int ret;
- ret = __reserve_doorbell(client);
- if (ret)
- return ret;
-
__update_doorbell_desc(client, client->doorbell_id);
+ __create_doorbell(client);
- ret = __create_doorbell(client);
- if (ret)
- goto err;
+ ret = __guc_allocate_doorbell(client->guc, client->stage_id);
+ if (ret) {
+ __destroy_doorbell(client);
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+ DRM_ERROR("Couldn't create client %u doorbell: %d\n",
+ client->stage_id, ret);
+ return ret;
+ }
return 0;
-
-err:
- __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- __unreserve_doorbell(client);
- return ret;
}
static int destroy_doorbell(struct intel_guc_client *client)
{
- int err;
+ int ret;
GEM_BUG_ON(!has_doorbell(client));
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
-
- err = __destroy_doorbell(client);
- if (err)
- return err;
+ __destroy_doorbell(client);
+ ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
+ if (ret)
+ DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
+ client->stage_id, ret);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
- __unreserve_doorbell(client);
-
- return 0;
+ return ret;
}
static unsigned long __select_cacheline(struct intel_guc *guc)
@@ -447,33 +428,6 @@ static void guc_stage_desc_fini(struct intel_guc *guc,
memset(desc, 0, sizeof(*desc));
}
-static int guc_shared_data_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- void *vaddr;
-
- vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- i915_vma_unpin_and_release(&vma);
- return PTR_ERR(vaddr);
- }
-
- guc->shared_data = vma;
- guc->shared_data_vaddr = vaddr;
-
- return 0;
-}
-
-static void guc_shared_data_destroy(struct intel_guc *guc)
-{
- i915_gem_object_unpin_map(guc->shared_data->obj);
- i915_vma_unpin_and_release(&guc->shared_data);
-}
-
/* Construct a Work Item and append it to the GuC's Work Queue */
static void guc_wq_item_append(struct intel_guc_client *client,
u32 target_engine, u32 context_desc,
@@ -866,83 +820,47 @@ static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
return false;
}
-/*
- * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
- * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
- * doorbell to the rightful owner.
- */
-static int __reset_doorbell(struct intel_guc_client *client, u16 db_id)
+static bool guc_verify_doorbells(struct intel_guc *guc)
{
- int err;
+ u16 db_id;
- __update_doorbell_desc(client, db_id);
- err = __create_doorbell(client);
- if (!err)
- err = __destroy_doorbell(client);
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
+ if (!doorbell_ok(guc, db_id))
+ return false;
- return err;
+ return true;
}
-/*
- * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
- * HW is (re)initialised. For that end, we might have to borrow the first
- * client. Also, tell GuC about all the doorbells in use by all clients.
- * We do this because the KMD, the GuC and the doorbell HW can easily go out of
- * sync (e.g. we can reset the GuC, but not the doorbel HW).
- */
-static int guc_init_doorbell_hw(struct intel_guc *guc)
+static int guc_clients_doorbell_init(struct intel_guc *guc)
{
- struct intel_guc_client *client = guc->execbuf_client;
- bool recreate_first_client = false;
- u16 db_id;
int ret;
- /* For unused doorbells, make sure they are disabled */
- for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
- if (doorbell_ok(guc, db_id))
- continue;
-
- if (has_doorbell(client)) {
- /* Borrow execbuf_client (we will recreate it later) */
- destroy_doorbell(client);
- recreate_first_client = true;
- }
-
- ret = __reset_doorbell(client, db_id);
- WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
- }
-
- if (recreate_first_client) {
- ret = __reserve_doorbell(client);
- if (unlikely(ret)) {
- DRM_ERROR("Couldn't re-reserve first client db: %d\n",
- ret);
- return ret;
- }
-
- __update_doorbell_desc(client, client->doorbell_id);
- }
-
- /* Now for every client (and not only execbuf_client) make sure their
- * doorbells are known by the GuC
- */
- ret = __create_doorbell(guc->execbuf_client);
+ ret = create_doorbell(guc->execbuf_client);
if (ret)
return ret;
- ret = __create_doorbell(guc->preempt_client);
+ ret = create_doorbell(guc->preempt_client);
if (ret) {
- __destroy_doorbell(guc->execbuf_client);
+ destroy_doorbell(guc->execbuf_client);
return ret;
}
- /* Read back & verify all (used & unused) doorbell registers */
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
- WARN_ON(!doorbell_ok(guc, db_id));
-
return 0;
}
+static void guc_clients_doorbell_fini(struct intel_guc *guc)
+{
+ /*
+ * By the time we're here, GuC has already been reset.
+ * Instead of trying (in vain) to communicate with it, let's just
+ * cleanup the doorbell HW and our internal state.
+ */
+ __destroy_doorbell(guc->preempt_client);
+ __update_doorbell_desc(guc->preempt_client, GUC_DOORBELL_INVALID);
+ __destroy_doorbell(guc->execbuf_client);
+ __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
+}
+
/**
* guc_client_alloc() - Allocate an intel_guc_client
* @dev_priv: driver private data structure
@@ -1018,7 +936,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
guc_proc_desc_init(guc, client);
guc_stage_desc_init(guc, client);
- ret = create_doorbell(client);
+ ret = reserve_doorbell(client);
if (ret)
goto err_vaddr;
@@ -1042,16 +960,7 @@ err_client:
static void guc_client_free(struct intel_guc_client *client)
{
- /*
- * XXX: wait for any outstanding submissions before freeing memory.
- * Be sure to drop any locks
- */
-
- /* FIXME: in many cases, by the time we get here the GuC has been
- * reset, so we cannot destroy the doorbell properly. Ignore the
- * error message for now
- */
- destroy_doorbell(client);
+ unreserve_doorbell(client);
guc_stage_desc_fini(client->guc, client);
i915_gem_object_unpin_map(client->vma->obj);
i915_vma_unpin_and_release(&client->vma);
@@ -1214,57 +1123,15 @@ static void guc_ads_destroy(struct intel_guc *guc)
i915_vma_unpin_and_release(&guc->ads_vma);
}
-static int guc_preempt_work_create(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /*
- * Even though both sending GuC action, and adding a new workitem to
- * GuC workqueue are serialized (each with its own locking), since
- * we're using mutliple engines, it's possible that we're going to
- * issue a preempt request with two (or more - each for different
- * engine) workitems in GuC queue. In this situation, GuC may submit
- * all of them, which will make us very confused.
- * Our preemption contexts may even already be complete - before we
- * even had the chance to sent the preempt action to GuC!. Rather
- * than introducing yet another lock, we can just use ordered workqueue
- * to make sure we're always sending a single preemption request with a
- * single workitem.
- */
- guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
- WQ_HIGHPRI);
- if (!guc->preempt_wq)
- return -ENOMEM;
-
- for_each_engine(engine, dev_priv, id) {
- guc->preempt_work[id].engine = engine;
- INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
- }
-
- return 0;
-}
-
-static void guc_preempt_work_destroy(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- cancel_work_sync(&guc->preempt_work[id].work);
-
- destroy_workqueue(guc->preempt_wq);
- guc->preempt_wq = NULL;
-}
-
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
*/
int intel_guc_submission_init(struct intel_guc *guc)
{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int ret;
if (guc->stage_desc_pool)
@@ -1279,33 +1146,29 @@ int intel_guc_submission_init(struct intel_guc *guc)
*/
GEM_BUG_ON(!guc->stage_desc_pool);
- ret = guc_shared_data_create(guc);
- if (ret)
- goto err_stage_desc_pool;
- GEM_BUG_ON(!guc->shared_data);
-
ret = intel_guc_log_create(guc);
if (ret < 0)
- goto err_shared_data;
-
- ret = guc_preempt_work_create(guc);
- if (ret)
- goto err_log;
- GEM_BUG_ON(!guc->preempt_wq);
+ goto err_stage_desc_pool;
ret = guc_ads_create(guc);
if (ret < 0)
- goto err_wq;
+ goto err_log;
GEM_BUG_ON(!guc->ads_vma);
+ WARN_ON(!guc_verify_doorbells(guc));
+ ret = guc_clients_create(guc);
+ if (ret)
+ return ret;
+
+ for_each_engine(engine, dev_priv, id) {
+ guc->preempt_work[id].engine = engine;
+ INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
+ }
+
return 0;
-err_wq:
- guc_preempt_work_destroy(guc);
err_log:
intel_guc_log_destroy(guc);
-err_shared_data:
- guc_shared_data_destroy(guc);
err_stage_desc_pool:
guc_stage_desc_pool_destroy(guc);
return ret;
@@ -1313,10 +1176,18 @@ err_stage_desc_pool:
void intel_guc_submission_fini(struct intel_guc *guc)
{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id)
+ cancel_work_sync(&guc->preempt_work[id].work);
+
+ guc_clients_destroy(guc);
+ WARN_ON(!guc_verify_doorbells(guc));
+
guc_ads_destroy(guc);
- guc_preempt_work_destroy(guc);
intel_guc_log_destroy(guc);
- guc_shared_data_destroy(guc);
guc_stage_desc_pool_destroy(guc);
}
@@ -1420,28 +1291,18 @@ int intel_guc_submission_enable(struct intel_guc *guc)
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
- /*
- * We're being called on both module initialization and on reset,
- * until this flow is changed, we're using regular client presence to
- * determine which case are we in, and whether we should allocate new
- * clients or just reset their workqueues.
- */
- if (!guc->execbuf_client) {
- err = guc_clients_create(guc);
- if (err)
- return err;
- } else {
- guc_reset_wq(guc->execbuf_client);
- guc_reset_wq(guc->preempt_client);
- }
+ GEM_BUG_ON(!guc->execbuf_client);
+
+ guc_reset_wq(guc->execbuf_client);
+ guc_reset_wq(guc->preempt_client);
err = intel_guc_sample_forcewake(guc);
if (err)
- goto err_free_clients;
+ return err;
- err = guc_init_doorbell_hw(guc);
+ err = guc_clients_doorbell_init(guc);
if (err)
- goto err_free_clients;
+ return err;
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(dev_priv);
@@ -1458,10 +1319,6 @@ int intel_guc_submission_enable(struct intel_guc *guc)
}
return 0;
-
-err_free_clients:
- guc_clients_destroy(guc);
- return err;
}
void intel_guc_submission_disable(struct intel_guc *guc)
@@ -1471,11 +1328,10 @@ void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
guc_interrupts_release(dev_priv);
+ guc_clients_doorbell_fini(guc);
/* Revert back to manual ELSP submission */
intel_engines_reset_default_submission(dev_priv);
-
- guc_clients_destroy(guc);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index 021fe85c8f71..fb081cefef93 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -77,5 +77,7 @@ int intel_guc_submission_init(struct intel_guc *guc);
int intel_guc_submission_enable(struct intel_guc *guc);
void intel_guc_submission_disable(struct intel_guc *guc);
void intel_guc_submission_fini(struct intel_guc *guc);
+int intel_guc_preempt_work_create(struct intel_guc *guc);
+void intel_guc_preempt_work_destroy(struct intel_guc *guc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 126f7c769c69..a2fe7c8d4477 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 95bbb5a79c4f..31f01d64c021 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -349,13 +349,18 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
case ENGINE_ACTIVE_HEAD:
case ENGINE_ACTIVE_SUBUNITS:
- /* Seqno stuck with still active engine gets leeway,
+ /*
+ * Seqno stuck with still active engine gets leeway,
* in hopes that it is just a long shader.
*/
timeout = I915_SEQNO_DEAD_TIMEOUT;
break;
case ENGINE_DEAD:
+ if (drm_debug & DRM_UT_DRIVER) {
+ struct drm_printer p = drm_debug_printer("hangcheck");
+ intel_engine_dump(engine, &p, "%s", engine->name);
+ }
break;
default:
@@ -424,18 +429,18 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- struct intel_engine_hangcheck cur_state, *hc = &cur_state;
const bool busy = intel_engine_has_waiter(engine);
+ struct intel_engine_hangcheck hc;
semaphore_clear_deadlocks(dev_priv);
- hangcheck_load_sample(engine, hc);
- hangcheck_accumulate_sample(engine, hc);
- hangcheck_store_sample(engine, hc);
+ hangcheck_load_sample(engine, &hc);
+ hangcheck_accumulate_sample(engine, &hc);
+ hangcheck_store_sample(engine, &hc);
if (engine->hangcheck.stalled) {
hung |= intel_engine_flag(engine);
- if (hc->action != ENGINE_DEAD)
+ if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a40f35af225c..bced7b954d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1383,7 +1383,7 @@ static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
}
}
- /* Display Wa #1139 */
+ /* Display WA #1139: glk */
if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
crtc_state->base.adjusted_mode.htotal > 5460)
return false;
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 98d17254593c..974be3defa70 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -77,43 +77,57 @@ MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
GLK_HUC_FW_MINOR, GLK_BLD_NUM)
-/**
- * intel_huc_select_fw() - selects HuC firmware for loading
- * @huc: intel_huc struct
- */
-void intel_huc_select_fw(struct intel_huc *huc)
+static void huc_fw_select(struct intel_uc_fw *huc_fw)
{
+ struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct drm_i915_private *dev_priv = huc_to_i915(huc);
- intel_uc_fw_init(&huc->fw, INTEL_UC_FW_TYPE_HUC);
+ GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
+
+ if (!HAS_HUC(dev_priv))
+ return;
if (i915_modparams.huc_firmware_path) {
- huc->fw.path = i915_modparams.huc_firmware_path;
- huc->fw.major_ver_wanted = 0;
- huc->fw.minor_ver_wanted = 0;
+ huc_fw->path = i915_modparams.huc_firmware_path;
+ huc_fw->major_ver_wanted = 0;
+ huc_fw->minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
- huc->fw.path = I915_SKL_HUC_UCODE;
- huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
+ huc_fw->path = I915_SKL_HUC_UCODE;
+ huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
- huc->fw.path = I915_BXT_HUC_UCODE;
- huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
+ huc_fw->path = I915_BXT_HUC_UCODE;
+ huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc->fw.path = I915_KBL_HUC_UCODE;
- huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+ huc_fw->path = I915_KBL_HUC_UCODE;
+ huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
} else if (IS_GEMINILAKE(dev_priv)) {
- huc->fw.path = I915_GLK_HUC_UCODE;
- huc->fw.major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc->fw.minor_ver_wanted = GLK_HUC_FW_MINOR;
+ huc_fw->path = I915_GLK_HUC_UCODE;
+ huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
} else {
- DRM_ERROR("No HuC firmware known for platform with HuC!\n");
- return;
+ DRM_WARN("%s: No firmware known for this platform!\n",
+ intel_uc_fw_type_repr(huc_fw->type));
}
}
/**
+ * intel_huc_init_early() - initializes HuC struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_init_early(struct intel_huc *huc)
+{
+ struct intel_uc_fw *huc_fw = &huc->fw;
+
+ intel_uc_fw_init(huc_fw, INTEL_UC_FW_TYPE_HUC);
+ huc_fw_select(huc_fw);
+}
+
+/**
* huc_ucode_xfer() - DMA's the firmware
* @dev_priv: the drm_i915_private device
*
@@ -167,17 +181,17 @@ static int huc_ucode_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
* intel_huc_init_hw() - load HuC uCode to device
* @huc: intel_huc structure
*
- * Called from guc_setup() during driver loading and also after a GPU reset.
- * Be note that HuC loading must be done before GuC loading.
+ * Called from intel_uc_init_hw() during driver loading and also after a GPU
+ * reset. Be note that HuC loading must be done before GuC loading.
*
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_huc_init(), so here we need only check that
+ * earlier call to intel_uc_init_fw(), so here we need only check that
* is succeeded, and then transfer the image to the h/w.
*
*/
-void intel_huc_init_hw(struct intel_huc *huc)
+int intel_huc_init_hw(struct intel_huc *huc)
{
- intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
+ return intel_uc_fw_upload(&huc->fw, huc_ucode_xfer);
}
/**
@@ -191,7 +205,7 @@ void intel_huc_init_hw(struct intel_huc *huc)
* signature through intel_guc_auth_huc(). It then waits for 50ms for
* firmware verification ACK and unpins the object.
*/
-void intel_huc_auth(struct intel_huc *huc)
+int intel_huc_auth(struct intel_huc *huc)
{
struct drm_i915_private *i915 = huc_to_i915(huc);
struct intel_guc *guc = &i915->guc;
@@ -199,14 +213,14 @@ void intel_huc_auth(struct intel_huc *huc)
int ret;
if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
+ return -ENOEXEC;
vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
- DRM_ERROR("failed to pin huc fw object %d\n",
- (int)PTR_ERR(vma));
- return;
+ ret = PTR_ERR(vma);
+ DRM_ERROR("HuC: Failed to pin huc fw object %d\n", ret);
+ return ret;
}
ret = intel_guc_auth_huc(guc,
@@ -229,4 +243,5 @@ void intel_huc_auth(struct intel_huc *huc)
out:
i915_vma_unpin(vma);
+ return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/intel_huc.h
index aaa38b9e5817..40039db59e04 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/intel_huc.h
@@ -34,8 +34,8 @@ struct intel_huc {
/* HuC-specific additions */
};
-void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_init_hw(struct intel_huc *huc);
-void intel_huc_auth(struct intel_huc *huc);
+void intel_huc_init_early(struct intel_huc *huc);
+int intel_huc_init_hw(struct intel_huc *huc);
+int intel_huc_auth(struct intel_huc *huc);
#endif
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 49fdf09f9919..ef9f91a0b0c9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,22 +128,46 @@ intel_i2c_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GMBUS4, 0);
}
-static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
{
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- if (!IS_PINEVIEW(dev_priv))
- return;
-
val = I915_READ(DSPCLK_GATE_D);
- if (enable)
- val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ if (!enable)
+ val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
- val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, val);
}
+static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ if (!enable)
+ val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+}
+
+static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ u32 val;
+
+ val = I915_READ(GEN9_CLKGATE_DIS_4);
+ if (!enable)
+ val |= BXT_GMBUS_GATING_DIS;
+ else
+ val &= ~BXT_GMBUS_GATING_DIS;
+ I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+}
+
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
@@ -221,7 +245,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
struct drm_i915_private *dev_priv = bus->dev_priv;
intel_i2c_reset(dev_priv);
- intel_i2c_quirk_set(dev_priv, true);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, false);
+
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
@@ -238,7 +265,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
set_data(bus, 1);
set_clock(bus, 1);
- intel_i2c_quirk_set(dev_priv, false);
+
+ if (IS_PINEVIEW(dev_priv))
+ pnv_gmbus_clock_gating(dev_priv, true);
}
static void
@@ -481,6 +510,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
int i = 0, inc, try = 0;
int ret = 0;
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, false);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, false);
+
retry:
I915_WRITE_FW(GMBUS0, bus->reg0);
@@ -582,6 +618,13 @@ timeout:
ret = -EAGAIN;
out:
+ /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+ if (IS_GEN9_LP(dev_priv))
+ bxt_gmbus_clock_gating(dev_priv, true);
+ else if (HAS_PCH_SPT(dev_priv) ||
+ HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
+ pch_gmbus_clock_gating(dev_priv, true);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 3bf65288ffff..5809b29044fc 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
+ DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2a8160f603ab..739c33b07c59 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -431,8 +431,6 @@ static inline void elsp_write(u64 desc, u32 __iomem *elsp)
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
struct execlist_port *port = engine->execlists.port;
- u32 __iomem *elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
for (n = execlists_num_ports(&engine->execlists); n--; ) {
@@ -451,14 +449,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
engine->name, n,
- rq->ctx->hw_id, count,
+ port[n].context_id, count,
rq->global_seqno);
} else {
GEM_BUG_ON(!n);
desc = 0;
}
- elsp_write(desc, elsp);
+ elsp_write(desc, engine->execlists.elsp);
}
execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
}
@@ -496,8 +494,6 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_context *ce =
&engine->i915->preempt_context->engine[engine->id];
- u32 __iomem *elsp =
- engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
unsigned int n;
GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
@@ -508,11 +504,11 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
ce->ring->tail &= (ce->ring->size - 1);
ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
- GEM_TRACE("\n");
+ GEM_TRACE("%s\n", engine->name);
for (n = execlists_num_ports(&engine->execlists); --n; )
- elsp_write(0, elsp);
+ elsp_write(0, engine->execlists.elsp);
- elsp_write(ce->lrc_desc, elsp);
+ elsp_write(ce->lrc_desc, engine->execlists.elsp);
execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
}
@@ -865,9 +861,10 @@ static void execlists_submission_tasklet(unsigned long data)
*/
status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
- GEM_TRACE("%s csb[%dd]: status=0x%08x:0x%08x\n",
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
engine->name, head,
- status, buf[2*head + 1]);
+ status, buf[2*head + 1],
+ execlists->active);
if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
GEN8_CTX_STATUS_PREEMPTED))
@@ -885,6 +882,8 @@ static void execlists_submission_tasklet(unsigned long data)
if (status & GEN8_CTX_STATUS_COMPLETE &&
buf[2*head + 1] == PREEMPT_ID) {
+ GEM_TRACE("%s preempt-idle\n", engine->name);
+
execlists_cancel_port_requests(execlists);
execlists_unwind_incomplete_requests(execlists);
@@ -909,8 +908,8 @@ static void execlists_submission_tasklet(unsigned long data)
rq = port_unpack(port, &count);
GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
engine->name,
- rq->ctx->hw_id, count,
- rq->global_seqno);
+ port->context_id, count,
+ rq ? rq->global_seqno : 0);
GEM_BUG_ON(count == 0);
if (--count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
@@ -1509,6 +1508,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
execlists->csb_head = -1;
execlists->active = 0;
+ execlists->elsp =
+ dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
+
/* After a GPU reset, we may have requests to replay */
if (execlists->first)
tasklet_schedule(&execlists->tasklet);
@@ -1556,6 +1558,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
struct intel_context *ce;
unsigned long flags;
+ GEM_TRACE("%s seqno=%x\n",
+ engine->name, request ? request->global_seqno : 0);
spin_lock_irqsave(&engine->timeline->lock, flags);
/*
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index fc65f5e451dd..c58e5f53bab0 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -32,6 +32,8 @@
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
+#include "intel_opregion.h"
#include "i915_drv.h"
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
new file mode 100644
index 000000000000..e0e437ba9e51
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright © 2008-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_OPREGION_H_
+#define _INTEL_OPREGION_H_
+
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+ struct opregion_header *header;
+ struct opregion_acpi *acpi;
+ struct opregion_swsci *swsci;
+ u32 swsci_gbda_sub_functions;
+ u32 swsci_sbcb_sub_functions;
+ struct opregion_asle *asle;
+ void *rvda;
+ void *vbt_firmware;
+ const void *vbt;
+ u32 vbt_size;
+ u32 *lid_state;
+ struct work_struct asle_work;
+};
+
+#define OPREGION_SIZE (8 * 1024)
+
+#ifdef CONFIG_ACPI
+
+int intel_opregion_setup(struct drm_i915_private *dev_priv);
+void intel_opregion_register(struct drm_i915_private *dev_priv);
+void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+ bool enable);
+int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
+
+#else /* CONFIG_ACPI*/
+
+static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
+{
+ return 0;
+}
+
+static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+ return 0;
+}
+
+static inline int
+intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
+{
+ return 0;
+}
+
+static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 1b397b41cb4f..41e9465d44a8 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -219,7 +219,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr,
PAGE_SIZE);
@@ -1508,7 +1508,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
+ regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
overlay->flip_addr);
return regs;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 67f326230a7e..1db79a860b96 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -58,7 +58,7 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
if (HAS_LLC(dev_priv)) {
/*
* WaCompressedResourceDisplayNewHashMode:skl,kbl
- * Display WA#0390: skl,kbl
+ * Display WA #0390: skl,kbl
*
* Must match Sampler, Pixel Back End, and Media. See
* WaCompressedResourceSamplerPbeMediaNewHashMode.
@@ -6416,7 +6416,6 @@ static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true;
unsigned long rc6_ctx_base;
u32 rc_ctl;
@@ -6441,9 +6440,8 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
- (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
- ggtt->stolen_reserved_size))) {
+ if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) &&
+ (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) {
DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -7020,7 +7018,7 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
{
unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
- WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
+ WARN_ON(pctx_addr != dev_priv->dsm.start +
dev_priv->vlv_pctx->stolen->start);
}
@@ -7035,16 +7033,15 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long pctx_paddr, paddr;
+ resource_size_t pctx_paddr, paddr;
+ resource_size_t pctx_size = 32*1024;
u32 pcbr;
- int pctx_size = 32*1024;
pcbr = I915_READ(VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
- paddr = (dev_priv->mm.stolen_base +
- (ggtt->stolen_size - pctx_size));
+ paddr = dev_priv->dsm.end + 1 - pctx_size;
+ GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
@@ -7056,16 +7053,16 @@ static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *pctx;
- unsigned long pctx_paddr;
+ resource_size_t pctx_paddr;
+ resource_size_t pctx_size = 24*1024;
u32 pcbr;
- int pctx_size = 24*1024;
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
- int pcbr_offset;
+ resource_size_t pcbr_offset;
- pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
+ pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
pcbr_offset,
I915_GTT_OFFSET_NONE,
@@ -7089,7 +7086,11 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
goto out;
}
- pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
+ GEM_BUG_ON(range_overflows_t(u64,
+ dev_priv->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
+ pctx_paddr = dev_priv->dsm.start + pctx->stolen->start;
I915_WRITE(VLV_PCBR, pctx_paddr);
out:
@@ -8417,7 +8418,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
if (!HAS_PCH_CNP(dev_priv))
return;
- /* Wa #1181 */
+ /* Display WA #1181: cnp */
I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) |
CNP_PWM_CGE_GATING_DISABLE);
}
@@ -8446,6 +8447,11 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
val |= SARBUNIT_CLKGATE_DIS;
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
+
+ /* WaDisableVFclkgate:cnl */
+ val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
+ val |= VFUNIT_CLKGATE_DIS;
+ I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a1ad85fa5c1a..2e32615eeada 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- i915_reg_t psr_ctl;
+ i915_reg_t psr_status;
u32 psr_status_mask;
if (dev_priv->psr.aux_frame_sync)
@@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
0);
if (dev_priv->psr.psr2_support) {
- psr_ctl = EDP_PSR2_CTL;
+ psr_status = EDP_PSR2_STATUS_CTL;
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) &
+ I915_WRITE(EDP_PSR2_CTL,
+ I915_READ(EDP_PSR2_CTL) &
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
} else {
- psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status = EDP_PSR_STATUS_CTL;
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
- I915_WRITE(psr_ctl,
- I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ I915_WRITE(EDP_PSR_CTL,
+ I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
}
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- psr_ctl, psr_status_mask, 0,
+ psr_status, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c68ab3ead83c..c5ff203e42d6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -200,6 +200,11 @@ struct intel_engine_execlists {
bool no_priolist;
/**
+ * @elsp: the ExecList Submission Port register
+ */
+ u32 __iomem *elsp;
+
+ /**
* @port: execlist port states
*
* For each hardware ELSP (ExecList Submission Port) we keep
@@ -1008,7 +1013,10 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
-void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *p);
+__printf(3, 4)
+void intel_engine_dump(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ const char *header, ...);
struct intel_engine_cs *
intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8315499452dc..db9d57f39534 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -130,6 +130,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "INIT";
case POWER_DOMAIN_MODESET:
return "MODESET";
+ case POWER_DOMAIN_GT_IRQ:
+ return "GT_IRQ";
default:
MISSING_CASE(domain);
return "?";
@@ -1705,6 +1707,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_INIT))
@@ -1723,12 +1726,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_AUX_C) | \
BIT_ULL(POWER_DOMAIN_AUDIO) | \
BIT_ULL(POWER_DOMAIN_VGA) | \
- BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
@@ -1785,8 +1789,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 1e2a30a40ede..907deac6e3fa 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -24,6 +24,7 @@
#include "intel_uc.h"
#include "intel_guc_submission.h"
+#include "intel_guc.h"
#include "i915_drv.h"
/* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -47,55 +48,93 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
return ret;
}
-void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+static int __get_platform_enable_guc(struct drm_i915_private *dev_priv)
{
- if (!HAS_GUC(dev_priv)) {
- if (i915_modparams.enable_guc_loading > 0 ||
- i915_modparams.enable_guc_submission > 0)
- DRM_INFO("Ignoring GuC options, no hardware\n");
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ int enable_guc = 0;
- i915_modparams.enable_guc_loading = 0;
- i915_modparams.enable_guc_submission = 0;
- return;
- }
+ /* Default is to enable GuC/HuC if we know their firmwares */
+ if (intel_uc_fw_is_selected(guc_fw))
+ enable_guc |= ENABLE_GUC_SUBMISSION;
+ if (intel_uc_fw_is_selected(huc_fw))
+ enable_guc |= ENABLE_GUC_LOAD_HUC;
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc_loading < 0)
- i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+ /* Any platform specific fine-tuning can be done here */
+
+ return enable_guc;
+}
- /* Verify firmware version */
- if (i915_modparams.enable_guc_loading) {
- if (HAS_HUC_UCODE(dev_priv))
- intel_huc_select_fw(&dev_priv->huc);
+/**
+ * intel_uc_sanitize_options - sanitize uC related modparam options
+ * @dev_priv: device private
+ *
+ * In case of "enable_guc" option this function will attempt to modify
+ * it only if it was initially set to "auto(-1)". Default value for this
+ * modparam varies between platforms and it is hardcoded in driver code.
+ * Any other modparam value is only monitored against availability of the
+ * related hardware or firmware definitions.
+ */
+void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- if (intel_guc_fw_select(&dev_priv->guc))
- i915_modparams.enable_guc_loading = 0;
+ /* A negative value means "use platform default" */
+ if (i915_modparams.enable_guc < 0)
+ i915_modparams.enable_guc = __get_platform_enable_guc(dev_priv);
+
+ DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_is_using_guc_submission()),
+ yesno(intel_uc_is_using_huc()));
+
+ /* Verify GuC firmware availability */
+ if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
+ DRM_WARN("Incompatible option detected: enable_guc=%d, %s!\n",
+ i915_modparams.enable_guc,
+ !HAS_GUC(dev_priv) ? "no GuC hardware" :
+ "no GuC firmware");
}
- /* Can't enable guc submission without guc loaded */
- if (!i915_modparams.enable_guc_loading)
- i915_modparams.enable_guc_submission = 0;
+ /* Verify HuC firmware availability */
+ if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
+ DRM_WARN("Incompatible option detected: enable_guc=%d, %s!\n",
+ i915_modparams.enable_guc,
+ !HAS_HUC(dev_priv) ? "no HuC hardware" :
+ "no HuC firmware");
+ }
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc_submission < 0)
- i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+ /* Make sure that sanitization was done */
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
}
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
intel_guc_init_early(&dev_priv->guc);
+ intel_huc_init_early(&dev_priv->huc);
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+ if (!USES_GUC(dev_priv))
+ return;
+
+ if (USES_HUC(dev_priv))
+ intel_uc_fw_fetch(dev_priv, &dev_priv->huc.fw);
+
intel_uc_fw_fetch(dev_priv, &dev_priv->guc.fw);
}
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
+ if (!USES_GUC(dev_priv))
+ return;
+
intel_uc_fw_fini(&dev_priv->guc.fw);
- intel_uc_fw_fini(&dev_priv->huc.fw);
+
+ if (USES_HUC(dev_priv))
+ intel_uc_fw_fini(&dev_priv->huc.fw);
}
/**
@@ -149,30 +188,91 @@ static void guc_disable_communication(struct intel_guc *guc)
guc->send = intel_guc_send_nop;
}
-int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+int intel_uc_init_wq(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!USES_GUC(dev_priv))
+ return 0;
+
+ ret = intel_guc_init_wq(&dev_priv->guc);
+ if (ret) {
+ DRM_ERROR("Couldn't allocate workqueues for GuC\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_uc_fini_wq(struct drm_i915_private *dev_priv)
+{
+ if (!USES_GUC(dev_priv))
+ return;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ intel_guc_fini_wq(&dev_priv->guc);
+}
+
+int intel_uc_init(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- int ret, attempts;
+ int ret;
- if (!i915_modparams.enable_guc_loading)
+ if (!USES_GUC(dev_priv))
return 0;
- guc_disable_communication(guc);
- gen9_reset_guc_interrupts(dev_priv);
+ if (!HAS_GUC(dev_priv))
+ return -ENODEV;
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
+ ret = intel_guc_init(guc);
+ if (ret)
+ return ret;
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
ret = intel_guc_submission_init(guc);
- if (ret)
- goto err_guc;
+ if (ret) {
+ intel_guc_fini(guc);
+ return ret;
+ }
}
+ return 0;
+}
+
+void intel_uc_fini(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ if (!USES_GUC(dev_priv))
+ return;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ if (USES_GUC_SUBMISSION(dev_priv))
+ intel_guc_submission_fini(guc);
+
+ intel_guc_fini(guc);
+}
+
+int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct intel_huc *huc = &dev_priv->huc;
+ int ret, attempts;
+
+ if (!USES_GUC(dev_priv))
+ return 0;
+
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ guc_disable_communication(guc);
+ gen9_reset_guc_interrupts(dev_priv);
+
/* init WOPCM */
I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET,
@@ -192,9 +292,14 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
*/
ret = __intel_uc_reset_hw(dev_priv);
if (ret)
- goto err_submission;
+ goto err_out;
+
+ if (USES_HUC(dev_priv)) {
+ ret = intel_huc_init_hw(huc);
+ if (ret)
+ goto err_out;
+ }
- intel_huc_init_hw(&dev_priv->huc);
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
if (ret == 0 || ret != -EAGAIN)
@@ -212,8 +317,13 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_log_capture;
- intel_huc_auth(&dev_priv->huc);
- if (i915_modparams.enable_guc_submission) {
+ if (USES_HUC(dev_priv)) {
+ ret = intel_huc_auth(huc);
+ if (ret)
+ goto err_communication;
+ }
+
+ if (USES_GUC_SUBMISSION(dev_priv)) {
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
@@ -222,50 +332,33 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
goto err_interrupts;
}
- dev_info(dev_priv->drm.dev, "GuC %s (firmware %s [version %u.%u])\n",
- i915_modparams.enable_guc_submission ? "submission enabled" :
- "loaded",
- guc->fw.path,
+ dev_info(dev_priv->drm.dev, "GuC firmware version %u.%u\n",
guc->fw.major_ver_found, guc->fw.minor_ver_found);
+ dev_info(dev_priv->drm.dev, "GuC submission %s\n",
+ enableddisabled(USES_GUC_SUBMISSION(dev_priv)));
+ dev_info(dev_priv->drm.dev, "HuC %s\n",
+ enableddisabled(USES_HUC(dev_priv)));
return 0;
/*
* We've failed to load the firmware :(
- *
- * Decide whether to disable GuC submission and fall back to
- * execlist mode, and whether to hide the error by returning
- * zero or to return -EIO, which the caller will treat as a
- * nonfatal error (i.e. it doesn't prevent driver load, but
- * marks the GPU as wedged until reset).
*/
err_interrupts:
- guc_disable_communication(guc);
gen9_disable_guc_interrupts(dev_priv);
+err_communication:
+ guc_disable_communication(guc);
err_log_capture:
guc_capture_load_err_log(guc);
-err_submission:
- if (i915_modparams.enable_guc_submission)
- intel_guc_submission_fini(guc);
-err_guc:
- i915_ggtt_disable_guc(dev_priv);
-
- if (i915_modparams.enable_guc_loading > 1 ||
- i915_modparams.enable_guc_submission > 1) {
- DRM_ERROR("GuC init failed. Firmware loading disabled.\n");
- ret = -EIO;
- } else {
- DRM_NOTE("GuC init failed. Firmware loading disabled.\n");
- ret = 0;
- }
-
- if (i915_modparams.enable_guc_submission) {
- i915_modparams.enable_guc_submission = 0;
- DRM_NOTE("Falling back from GuC submission to execlist mode\n");
- }
-
- i915_modparams.enable_guc_loading = 0;
+err_out:
+ /*
+ * Note that there is no fallback as either user explicitly asked for
+ * the GuC or driver default option was to run with the GuC enabled.
+ */
+ if (GEM_WARN_ON(ret == -EIO))
+ ret = -EINVAL;
+ dev_err(dev_priv->drm.dev, "GuC initialization failed %d\n", ret);
return ret;
}
@@ -275,18 +368,16 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
guc_free_load_err_log(guc);
- if (!i915_modparams.enable_guc_loading)
+ if (!USES_GUC(dev_priv))
return;
- if (i915_modparams.enable_guc_submission)
+ GEM_BUG_ON(!HAS_GUC(dev_priv));
+
+ if (USES_GUC_SUBMISSION(dev_priv))
intel_guc_submission_disable(guc);
guc_disable_communication(guc);
- if (i915_modparams.enable_guc_submission) {
+ if (USES_GUC_SUBMISSION(dev_priv))
gen9_disable_guc_interrupts(dev_priv);
- intel_guc_submission_fini(guc);
- }
-
- i915_ggtt_disable_guc(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index e18d3bb02088..8a7249722ef1 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -26,13 +26,36 @@
#include "intel_guc.h"
#include "intel_huc.h"
+#include "i915_params.h"
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
+int intel_uc_init_wq(struct drm_i915_private *dev_priv);
+void intel_uc_fini_wq(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
+int intel_uc_init(struct drm_i915_private *dev_priv);
+void intel_uc_fini(struct drm_i915_private *dev_priv);
+
+static inline bool intel_uc_is_using_guc(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc > 0;
+}
+
+static inline bool intel_uc_is_using_guc_submission(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
+}
+
+static inline bool intel_uc_is_using_huc(void)
+{
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+ return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
+}
#endif
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index b376dd3b28cc..784eff9cdfc8 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -214,7 +214,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -EIO;
+ return -ENOEXEC;
uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("%s fw load %s\n",
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 5394d9d1e683..d5fd4609c785 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -110,6 +110,11 @@ void intel_uc_fw_init(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
uc_fw->type = type;
}
+static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->path != NULL;
+}
+
void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw);
int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index e6b31041cc88..2ea69394f428 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1637,7 +1637,7 @@ static int igt_shrink_thp(void *arg)
* shmem to truncate our pages.
*/
i915_gem_shrink_all(i915);
- if (!IS_ERR_OR_NULL(obj->mm.pages)) {
+ if (i915_gem_object_has_pages(obj)) {
pr_err("shrink-all didn't truncate the pages\n");
err = -EINVAL;
goto out_close;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 6491cf0a4f46..4a28d713a7d8 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1074,7 +1074,7 @@ static int igt_ggtt_page(void *arg)
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
@@ -1092,7 +1092,7 @@ static int igt_ggtt_page(void *arg)
i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0);
- vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
val = ioread32(vaddr + n);
io_mapping_unmap_atomic(vaddr);
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 7b23597858bb..3f9016466dea 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -85,21 +85,26 @@ static int validate_client(struct intel_guc_client *client,
return 0;
}
+static bool client_doorbell_in_sync(struct intel_guc_client *client)
+{
+ return doorbell_ok(client->guc, client->doorbell_id);
+}
+
/*
- * Check that guc_init_doorbell_hw is doing what it should.
+ * Check that we're able to synchronize guc_clients with their doorbells
*
- * During GuC submission enable, we create GuC clients and their doorbells,
- * but after resetting the microcontroller (resume & gpu reset), these
- * GuC clients are still around, but the status of their doorbells may be
- * incorrect. This is the reason behind validating that the doorbells status
- * expected by the driver matches what the GuC/HW have.
+ * We're creating clients and reserving doorbells once, at module load. During
+ * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
+ * GuC being reset. In other words - GuC clients are still around, but the
+ * status of their doorbells may be incorrect. This is the reason behind
+ * validating that the doorbells status expected by the driver matches what the
+ * GuC/HW have.
*/
-static int igt_guc_init_doorbell_hw(void *args)
+static int igt_guc_clients(void *args)
{
struct drm_i915_private *dev_priv = args;
struct intel_guc *guc;
- DECLARE_BITMAP(db_bitmap_bk, GUC_NUM_DOORBELLS);
- int i, err = 0;
+ int err = 0;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -148,10 +153,21 @@ static int igt_guc_init_doorbell_hw(void *args)
goto out;
}
- /* each client should have received a doorbell during alloc */
+ /* each client should now have reserved a doorbell */
if (!has_doorbell(guc->execbuf_client) ||
!has_doorbell(guc->preempt_client)) {
- pr_err("guc_clients_create didn't create doorbells\n");
+ pr_err("guc_clients_create didn't reserve doorbells\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Now create the doorbells */
+ guc_clients_doorbell_init(guc);
+
+ /* each client should now have received a doorbell */
+ if (!client_doorbell_in_sync(guc->execbuf_client) ||
+ !client_doorbell_in_sync(guc->preempt_client)) {
+ pr_err("failed to initialize the doorbells\n");
err = -EINVAL;
goto out;
}
@@ -160,25 +176,26 @@ static int igt_guc_init_doorbell_hw(void *args)
* Basic test - an attempt to reallocate a valid doorbell to the
* client it is currently assigned should not cause a failure.
*/
- err = guc_init_doorbell_hw(guc);
+ err = guc_clients_doorbell_init(guc);
if (err)
goto out;
/*
* Negative test - a client with no doorbell (invalid db id).
- * Each client gets a doorbell when it is created, after destroying
- * the doorbell, the db id is changed to GUC_DOORBELL_INVALID and the
- * firmware will reject any attempt to allocate a doorbell with an
- * invalid id (db has to be reserved before allocation).
+ * After destroying the doorbell, the db id is changed to
+ * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
+ * allocate a doorbell with an invalid id (db has to be reserved before
+ * allocation).
*/
destroy_doorbell(guc->execbuf_client);
- if (has_doorbell(guc->execbuf_client)) {
+ if (client_doorbell_in_sync(guc->execbuf_client)) {
pr_err("destroy db did not work\n");
err = -EINVAL;
goto out;
}
- err = guc_init_doorbell_hw(guc);
+ unreserve_doorbell(guc->execbuf_client);
+ err = guc_clients_doorbell_init(guc);
if (err != -EIO) {
pr_err("unexpected (err = %d)", err);
goto out;
@@ -191,33 +208,13 @@ static int igt_guc_init_doorbell_hw(void *args)
}
/* clean after test */
- err = create_doorbell(guc->execbuf_client);
- if (err) {
- pr_err("recreate doorbell failed\n");
- goto out;
- }
-
- /*
- * Negative test - doorbell_bitmap out of sync, will trigger a few of
- * WARN_ON(!doorbell_ok(guc, db_id)) but that's ok as long as the
- * doorbells from our clients don't fail.
- */
- bitmap_copy(db_bitmap_bk, guc->doorbell_bitmap, GUC_NUM_DOORBELLS);
- for (i = 0; i < GUC_NUM_DOORBELLS; i++)
- if (i % 2)
- test_and_change_bit(i, guc->doorbell_bitmap);
-
- err = guc_init_doorbell_hw(guc);
+ err = reserve_doorbell(guc->execbuf_client);
if (err) {
- pr_err("out of sync doorbell caused an error\n");
- goto out;
+ pr_err("failed to reserve back the doorbell back\n");
}
-
- /* restore 'correct' db bitmap */
- bitmap_copy(guc->doorbell_bitmap, db_bitmap_bk, GUC_NUM_DOORBELLS);
- err = guc_init_doorbell_hw(guc);
+ err = create_doorbell(guc->execbuf_client);
if (err) {
- pr_err("restored doorbell caused an error\n");
+ pr_err("recreate doorbell failed\n");
goto out;
}
@@ -226,8 +223,11 @@ out:
* Leave clean state for other test, plus the driver always destroy the
* clients during unload.
*/
+ destroy_doorbell(guc->execbuf_client);
+ destroy_doorbell(guc->preempt_client);
guc_clients_destroy(guc);
guc_clients_create(guc);
+ guc_clients_doorbell_init(guc);
unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
@@ -309,25 +309,7 @@ static int igt_guc_doorbells(void *arg)
db_id = clients[i]->doorbell_id;
- /*
- * Client alloc gives us a doorbell, but we want to exercise
- * this ourselves (this resembles guc_init_doorbell_hw)
- */
- destroy_doorbell(clients[i]);
- if (clients[i]->doorbell_id != GUC_DOORBELL_INVALID) {
- pr_err("[%d] destroy db did not work!\n", i);
- err = -EINVAL;
- goto out;
- }
-
- err = __reserve_doorbell(clients[i]);
- if (err) {
- pr_err("[%d] Failed to reserve a doorbell\n", i);
- goto out;
- }
-
- __update_doorbell_desc(clients[i], clients[i]->doorbell_id);
- err = __create_doorbell(clients[i]);
+ err = create_doorbell(clients[i]);
if (err) {
pr_err("[%d] Failed to create a doorbell\n", i);
goto out;
@@ -348,8 +330,10 @@ static int igt_guc_doorbells(void *arg)
out:
for (i = 0; i < ATTEMPTS; i++)
- if (!IS_ERR_OR_NULL(clients[i]))
+ if (!IS_ERR_OR_NULL(clients[i])) {
+ destroy_doorbell(clients[i]);
guc_client_free(clients[i]);
+ }
unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
@@ -358,11 +342,11 @@ unlock:
int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
{
static const struct i915_subtest tests[] = {
- SUBTEST(igt_guc_init_doorbell_hw),
+ SUBTEST(igt_guc_clients),
SUBTEST(igt_guc_doorbells),
};
- if (!i915_modparams.enable_guc_submission)
+ if (!USES_GUC_SUBMISSION(dev_priv))
return 0;
return i915_subtests(tests, dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 1bbb8c46e2d9..d1f91a533afa 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -132,6 +132,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
@@ -140,6 +146,12 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
} else if (INTEL_GEN(i915) >= 4) {
@@ -147,12 +159,24 @@ static int emit_recurse_batch(struct hang *h,
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
*batch++ = lower_32_bits(vma->node.start);
} else {
*batch++ = MI_STORE_DWORD_IMM;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = rq->fence.seqno;
+ *batch++ = MI_ARB_CHECK;
+
+ memset(batch, 0, 1024);
+ batch += 1024 / sizeof(*batch);
+
+ *batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
*batch++ = lower_32_bits(vma->node.start);
}
@@ -234,6 +258,16 @@ static void hang_fini(struct hang *h)
i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
}
+static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+{
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 1000));
+}
+
static int igt_hang_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -296,6 +330,9 @@ static void global_reset_lock(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
wait_event(i915->gpu_error.reset_queue,
!test_bit(I915_RESET_BACKOFF,
@@ -353,54 +390,128 @@ static int igt_global_reset(void *arg)
return err;
}
-static int igt_reset_engine(void *arg)
+static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
- struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- unsigned int reset_count, reset_engine_count;
+ struct hang h;
int err = 0;
- /* Check that we can issue a global GPU and engine reset */
+ /* Check that we can issue an engine reset on an idle engine (no-op) */
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ unsigned int reset_count, reset_engine_count;
+ IGT_TIMEOUT(end_time);
+
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
reset_count = i915_reset_count(&i915->gpu_error);
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
engine);
- err = i915_reset_engine(engine, I915_RESET_QUIET);
- if (err) {
- pr_err("i915_reset_engine failed\n");
- break;
- }
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ if (active) {
+ struct drm_i915_gem_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_gem_request_put(rq);
+ err = -EIO;
+ break;
+ }
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
- pr_err("Full GPU reset recorded! (engine reset expected)\n");
- err = -EINVAL;
- break;
- }
+ i915_gem_request_put(rq);
+ }
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
+ err = i915_reset_engine(engine, I915_RESET_QUIET);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ reset_engine_count += active;
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count) {
+ pr_err("%s engine reset %srecorded!\n",
+ engine->name, active ? "not " : "");
+ err = -EINVAL;
+ break;
+ }
+
+ engine->hangcheck.stalled = false;
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- if (i915_reset_engine_count(&i915->gpu_error, engine) ==
- reset_engine_count) {
- pr_err("No %s engine reset recorded!\n", engine->name);
- err = -EINVAL;
+ if (err)
break;
- }
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ cond_resched();
}
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
+static int igt_reset_idle_engine(void *arg)
+{
+ return __igt_reset_engine(arg, false);
+}
+
+static int igt_reset_active_engine(void *arg)
+{
+ return __igt_reset_engine(arg, true);
+}
+
static int active_engine(void *data)
{
struct intel_engine_cs *engine = data;
@@ -462,11 +573,12 @@ err_file:
return err;
}
-static int igt_reset_active_engines(void *arg)
+static int __igt_reset_engine_others(struct drm_i915_private *i915,
+ bool active)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine, *active;
+ struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
+ struct hang h;
int err = 0;
/* Check that issuing a reset on one engine does not interfere
@@ -476,24 +588,36 @@ static int igt_reset_active_engines(void *arg)
if (!intel_has_reset_engine(i915))
return 0;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+ }
+
for_each_engine(engine, i915, id) {
- struct task_struct *threads[I915_NUM_ENGINES];
+ struct task_struct *threads[I915_NUM_ENGINES] = {};
unsigned long resets[I915_NUM_ENGINES];
unsigned long global = i915_reset_count(&i915->gpu_error);
+ unsigned long count = 0;
IGT_TIMEOUT(end_time);
+ if (active && !intel_engine_can_store_dword(engine))
+ continue;
+
memset(threads, 0, sizeof(threads));
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
struct task_struct *tsk;
- if (active == engine)
- continue;
-
resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
- active);
+ other);
- tsk = kthread_run(active_engine, active,
- "igt/%s", active->name);
+ if (other == engine)
+ continue;
+
+ tsk = kthread_run(active_engine, other,
+ "igt/%s", other->name);
if (IS_ERR(tsk)) {
err = PTR_ERR(tsk);
goto unwind;
@@ -503,20 +627,70 @@ static int igt_reset_active_engines(void *arg)
get_task_struct(tsk);
}
- set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
do {
+ if (active) {
+ struct drm_i915_gem_request *rq;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ rq = hang_create_request(&h, engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ mutex_unlock(&i915->drm.struct_mutex);
+ break;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (!wait_for_hang(&h, rq)) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ i915_gem_request_put(rq);
+ err = -EIO;
+ break;
+ }
+
+ i915_gem_request_put(rq);
+ }
+
+ engine->hangcheck.stalled = true;
+ engine->hangcheck.seqno =
+ intel_engine_get_seqno(engine);
+
err = i915_reset_engine(engine, I915_RESET_QUIET);
if (err) {
- pr_err("i915_reset_engine(%s) failed, err=%d\n",
- engine->name, err);
+ pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
+ engine->name, active ? "active" : "idle", err);
break;
}
+
+ engine->hangcheck.stalled = false;
+ count++;
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("i915_reset_engine(%s:%s): %lu resets\n",
+ engine->name, active ? "active" : "idle", count);
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) -
+ resets[engine->id] != (active ? count : 0)) {
+ pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+ engine->name, active ? "active" : "idle", count,
+ i915_reset_engine_count(&i915->gpu_error,
+ engine) - resets[engine->id]);
+ if (!err)
+ err = -EINVAL;
+ }
unwind:
- for_each_engine(active, i915, tmp) {
+ for_each_engine(other, i915, tmp) {
int ret;
if (!threads[tmp])
@@ -524,27 +698,29 @@ unwind:
ret = kthread_stop(threads[tmp]);
if (ret) {
- pr_err("kthread for active engine %s failed, err=%d\n",
- active->name, ret);
+ pr_err("kthread for other engine %s failed, err=%d\n",
+ other->name, ret);
if (!err)
err = ret;
}
put_task_struct(threads[tmp]);
if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
- active)) {
+ other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
- active->name,
+ other->name,
i915_reset_engine_count(&i915->gpu_error,
- active) - resets[tmp]);
- err = -EIO;
+ other) - resets[tmp]);
+ if (!err)
+ err = -EINVAL;
}
}
if (global != i915_reset_count(&i915->gpu_error)) {
pr_err("Global reset (count=%ld)!\n",
i915_reset_count(&i915->gpu_error) - global);
- err = -EIO;
+ if (!err)
+ err = -EINVAL;
}
if (err)
@@ -556,9 +732,25 @@ unwind:
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
+ if (active) {
+ mutex_lock(&i915->drm.struct_mutex);
+ hang_fini(&h);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+
return err;
}
+static int igt_reset_idle_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, false);
+}
+
+static int igt_reset_active_engine_others(void *arg)
+{
+ return __igt_reset_engine_others(arg, true);
+}
+
static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
{
u32 reset_count;
@@ -574,16 +766,6 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
return reset_count;
}
-static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
-{
- return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(h, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int igt_wait_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -617,9 +799,9 @@ static int igt_wait_reset(void *arg)
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -712,9 +894,10 @@ static int igt_reset_queue(void *arg)
if (!wait_for_hang(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- prev->fence.seqno, hws_seqno(&h, prev));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, prev->fence.seqno, hws_seqno(&h, prev));
+ intel_engine_dump(prev->engine, &p,
+ "%s\n", prev->engine->name);
i915_gem_request_put(rq);
i915_gem_request_put(prev);
@@ -818,9 +1001,9 @@ static int igt_handle_error(void *arg)
if (!wait_for_hang(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev);
- pr_err("Failed to start request %x, at %x\n",
- rq->fence.seqno, hws_seqno(&h, rq));
- intel_engine_dump(rq->engine, &p);
+ pr_err("%s: Failed to start request %x, at %x\n",
+ __func__, rq->fence.seqno, hws_seqno(&h, rq));
+ intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
i915_reset(i915, 0);
i915_gem_set_wedged(i915);
@@ -863,21 +1046,26 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_hang_sanitycheck),
- SUBTEST(igt_reset_engine),
- SUBTEST(igt_reset_active_engines),
+ SUBTEST(igt_reset_idle_engine),
+ SUBTEST(igt_reset_active_engine),
+ SUBTEST(igt_reset_idle_engine_others),
+ SUBTEST(igt_reset_active_engine_others),
SUBTEST(igt_wait_reset),
SUBTEST(igt_reset_queue),
SUBTEST(igt_handle_error),
};
+ bool saved_hangcheck;
int err;
if (!intel_has_gpu_reset(i915))
return 0;
intel_runtime_pm_get(i915);
+ saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
err = i915_subtests(tests, i915);
+ i915_modparams.enable_hangcheck = saved_hangcheck;
intel_runtime_pm_put(i915);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 80f152aaedf9..1bc61f3f76fc 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -85,6 +85,8 @@ static void mock_device_release(struct drm_device *dev)
i915_gemfs_fini(i915);
+ drm_mode_config_cleanup(&i915->drm);
+
drm_dev_fini(&i915->drm);
put_device(&i915->drm.pdev->dev);
}
@@ -187,7 +189,7 @@ struct drm_i915_private *mock_gem_device(void)
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
- goto put_device;
+ goto err_drv;
mock_init_contexts(i915);
@@ -266,6 +268,9 @@ err_objects:
kmem_cache_destroy(i915->objects);
err_wq:
destroy_workqueue(i915->wq);
+err_drv:
+ drm_mode_config_cleanup(&i915->drm);
+ drm_dev_fini(&i915->drm);
put_device:
put_device(&pdev->dev);
err:
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 336e1afb250f..e96873f96116 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -110,8 +110,8 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->base.i915 = i915;
- ggtt->mappable_base = 0;
- ggtt->mappable_end = 2048 * PAGE_SIZE;
+ ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+ ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->base.total = 4096 * PAGE_SIZE;
ggtt->base.clear_range = nop_clear_range;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 17d2f3a1c562..1d053bbefc02 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -38,7 +38,6 @@
struct imx_drm_device {
struct drm_device *drm;
unsigned int pipes;
- struct drm_fbdev_cma *fbhelper;
struct drm_atomic_state *state;
};
@@ -47,13 +46,6 @@ static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
#endif
-static void imx_drm_driver_lastclose(struct drm_device *drm)
-{
- struct imx_drm_device *imxdrm = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(imx_drm_driver_fops);
void imx_drm_connector_destroy(struct drm_connector *connector)
@@ -69,13 +61,6 @@ void imx_drm_encoder_destroy(struct drm_encoder *encoder)
}
EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-static void imx_drm_output_poll_changed(struct drm_device *drm)
-{
- struct imx_drm_device *imxdrm = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(imxdrm->fbhelper);
-}
-
static int imx_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -107,7 +92,7 @@ static int imx_drm_atomic_check(struct drm_device *dev,
static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = imx_drm_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = imx_drm_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -186,7 +171,7 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
static struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = imx_drm_driver_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -272,6 +257,7 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
+ drm->mode_config.allow_fb_modifiers = true;
drm_mode_config_init(drm);
@@ -298,12 +284,9 @@ static int imx_drm_bind(struct device *dev)
dev_warn(dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
legacyfb_depth = 16;
}
- imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, MAX_CRTC);
- if (IS_ERR(imxdrm->fbhelper)) {
- ret = PTR_ERR(imxdrm->fbhelper);
- imxdrm->fbhelper = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, legacyfb_depth, MAX_CRTC);
+ if (ret)
goto err_unbind;
- }
#endif
drm_kms_helper_poll_init(drm);
@@ -317,8 +300,7 @@ static int imx_drm_bind(struct device *dev)
err_fbhelper:
drm_kms_helper_poll_fini(drm);
#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
- if (imxdrm->fbhelper)
- drm_fbdev_cma_fini(imxdrm->fbhelper);
+ drm_fb_cma_fbdev_fini(drm);
err_unbind:
#endif
component_unbind_all(drm->dev, drm);
@@ -333,14 +315,12 @@ err_unref:
static void imx_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct imx_drm_device *imxdrm = drm->dev_private;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
- if (imxdrm->fbhelper)
- drm_fbdev_cma_fini(imxdrm->fbhelper);
+ drm_fb_cma_fbdev_fini(drm);
drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index f0b7556c0857..15c2bec47a04 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -8,7 +8,6 @@ struct drm_connector;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
-struct drm_fbdev_cma;
struct drm_framebuffer;
struct drm_plane;
struct imx_drm_crtc;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 5a67daedcf4d..57ed56d8623f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -77,6 +77,18 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_BGRX8888_A8,
};
+static const uint64_t ipu_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static const uint64_t pre_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_VIVANTE_TILED,
+ DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
+ DRM_FORMAT_MOD_INVALID
+};
+
int ipu_plane_irq(struct ipu_plane *ipu_plane)
{
return ipu_idmac_channel_irq(ipu_plane->ipu, ipu_plane->ipu_ch,
@@ -303,6 +315,22 @@ void ipu_plane_destroy_state(struct drm_plane *plane,
kfree(ipu_state);
}
+static bool ipu_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format, uint64_t modifier)
+{
+ struct ipu_soc *ipu = to_ipu_plane(plane)->ipu;
+
+ /* linear is supported for all planes and formats */
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ /* without a PRG there are no supported modifiers */
+ if (!ipu_prg_present(ipu))
+ return false;
+
+ return ipu_prg_format_supported(ipu, format, modifier);
+}
+
static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -310,6 +338,7 @@ static const struct drm_plane_funcs ipu_plane_funcs = {
.reset = ipu_plane_state_reset,
.atomic_duplicate_state = ipu_plane_duplicate_state,
.atomic_destroy_state = ipu_plane_destroy_state,
+ .format_mod_supported = ipu_plane_format_mod_supported,
};
static int ipu_plane_atomic_check(struct drm_plane *plane,
@@ -550,8 +579,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
drm_rect_width(&state->src) >> 16,
drm_rect_height(&state->src) >> 16,
- fb->pitches[0],
- fb->format->format, &eba);
+ fb->pitches[0], fb->format->format,
+ fb->modifier, &eba);
}
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -700,18 +729,71 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state, *crtc_state;
struct drm_plane_state *plane_state;
+ struct ipu_plane_state *ipu_state;
+ struct ipu_plane *ipu_plane;
struct drm_plane *plane;
+ struct drm_crtc *crtc;
int available_pres = ipu_prg_max_active_channels();
- int i;
+ int ret, i;
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * We are going over the planes in 2 passes: first we assign PREs to
+ * planes with a tiling modifier, which need the PREs to resolve into
+ * linear. Any failure to assign a PRE there is fatal. In the second
+ * pass we try to assign PREs to linear FBs, to improve memory access
+ * patterns for them. Failure at this point is non-fatal, as we can
+ * scan out linear FBs without a PRE.
+ */
for_each_new_plane_in_state(state, plane, plane_state, i) {
- struct ipu_plane_state *ipu_state =
- to_ipu_plane_state(plane_state);
- struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+ ipu_state = to_ipu_plane_state(plane_state);
+ ipu_plane = to_ipu_plane(plane);
+
+ if (!plane_state->fb) {
+ ipu_state->use_pre = false;
+ continue;
+ }
+
+ if (!(plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) ||
+ plane_state->fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ continue;
+
+ if (!ipu_prg_present(ipu_plane->ipu) || !available_pres)
+ return -EINVAL;
+
+ if (!ipu_prg_format_supported(ipu_plane->ipu,
+ plane_state->fb->format->format,
+ plane_state->fb->modifier))
+ return -EINVAL;
+
+ ipu_state->use_pre = true;
+ available_pres--;
+ }
+
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
+ ipu_state = to_ipu_plane_state(plane_state);
+ ipu_plane = to_ipu_plane(plane);
+
+ if (!plane_state->fb) {
+ ipu_state->use_pre = false;
+ continue;
+ }
+
+ if ((plane_state->fb->flags & DRM_MODE_FB_MODIFIERS) &&
+ plane_state->fb->modifier != DRM_FORMAT_MOD_LINEAR)
+ continue;
+
+ /* make sure that modifier is initialized */
+ plane_state->fb->modifier = DRM_FORMAT_MOD_LINEAR;
if (ipu_prg_present(ipu_plane->ipu) && available_pres &&
- plane_state->fb &&
ipu_prg_format_supported(ipu_plane->ipu,
plane_state->fb->format->format,
plane_state->fb->modifier)) {
@@ -731,6 +813,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
enum drm_plane_type type)
{
struct ipu_plane *ipu_plane;
+ const uint64_t *modifiers = ipu_format_modifiers;
int ret;
DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n",
@@ -746,10 +829,13 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
ipu_plane->dma = dma;
ipu_plane->dp_flow = dp;
+ if (ipu_prg_present(ipu))
+ modifiers = pre_format_modifiers;
+
ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs,
&ipu_plane_funcs, ipu_plane_formats,
ARRAY_SIZE(ipu_plane_formats),
- NULL, type, NULL);
+ modifiers, type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(ipu_plane);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 3b804fdaf7a0..f9ad0e960263 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -151,6 +151,14 @@ static struct regmap_config meson_regmap_config = {
.max_register = 0x1000,
};
+static void meson_vpu_init(struct meson_drm *priv)
+{
+ writel_relaxed(0x210000, priv->io_base + _REG(VPU_RDARB_MODE_L1C1));
+ writel_relaxed(0x10000, priv->io_base + _REG(VPU_RDARB_MODE_L1C2));
+ writel_relaxed(0x900000, priv->io_base + _REG(VPU_RDARB_MODE_L2C1));
+ writel_relaxed(0x20000, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
+}
+
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -222,6 +230,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
/* Hardware Initialization */
+ meson_vpu_init(priv);
meson_venc_init(priv);
meson_vpp_init(priv);
meson_viu_init(priv);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index cef414466f9f..17de3afd98f6 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -23,6 +23,7 @@
#include <linux/of_graph.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
@@ -137,6 +138,7 @@ struct meson_dw_hdmi {
struct reset_control *hdmitx_phy;
struct clk *hdmi_pclk;
struct clk *venci_clk;
+ struct regulator *hdmi_supply;
u32 irq_stat;
};
#define encoder_to_meson_dw_hdmi(x) \
@@ -751,6 +753,17 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
encoder = &meson_dw_hdmi->encoder;
+ meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi");
+ if (IS_ERR(meson_dw_hdmi->hdmi_supply)) {
+ if (PTR_ERR(meson_dw_hdmi->hdmi_supply) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ meson_dw_hdmi->hdmi_supply = NULL;
+ } else {
+ ret = regulator_enable(meson_dw_hdmi->hdmi_supply);
+ if (ret)
+ return ret;
+ }
+
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
"hdmitx_apb");
if (IS_ERR(meson_dw_hdmi->hdmitx_apb)) {
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 284738196af9..bca87143e548 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -1363,6 +1363,10 @@
#define VPU_PROT3_STAT_1 0x277a
#define VPU_PROT3_STAT_2 0x277b
#define VPU_PROT3_REQ_ONOFF 0x277c
+#define VPU_RDARB_MODE_L1C1 0x2790
+#define VPU_RDARB_MODE_L1C2 0x2799
+#define VPU_RDARB_MODE_L2C1 0x279d
+#define VPU_WRARB_MODE_L2C1 0x27a2
/* osd super scale */
#define OSDSR_HV_SIZEIN 0x3130
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index f03da63abc7b..c97009bb77dd 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -216,9 +216,10 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev,
return tt;
}
-static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -237,7 +238,6 @@ struct ttm_bo_driver mgag200_bo_driver = {
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int mgag200_mm_init(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index a1f4eeeb73e2..7e09d44e4a15 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -17,6 +17,8 @@
#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/pm_opp.h>
+#include <linux/nvmem-consumer.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
@@ -595,6 +597,12 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
/* Turn on performance counters */
gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* Select RBBM0 to countable 6 to get the busy status for devfreq */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
/* Increase VFD cache access so LRZ and other data gets evicted less */
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
@@ -1165,6 +1173,14 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
return a5xx_gpu->cur_ring;
}
+static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+ REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -1180,10 +1196,30 @@ static const struct adreno_gpu_funcs funcs = {
#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
#endif
+ .gpu_busy = a5xx_gpu_busy,
},
.get_timestamp = a5xx_get_timestamp,
};
+static void check_speed_bin(struct device *dev)
+{
+ struct nvmem_cell *cell;
+ u32 bin, val;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+
+ /* If a nvmem cell isn't defined, nothing to do */
+ if (IS_ERR(cell))
+ return;
+
+ bin = *((u32 *) nvmem_cell_read(cell, NULL));
+ nvmem_cell_put(cell);
+
+ val = (1 << bin);
+
+ dev_pm_opp_set_supported_hw(dev, &val, 1);
+}
+
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -1210,6 +1246,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
a5xx_gpu->lm_leakage = 0x4E001A;
+ check_speed_bin(&pdev->dev);
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index e5700bbf09dd..4e4d965fd9ab 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -103,10 +103,16 @@ static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct dev_pm_opp *opp;
+ u32 ret = 0;
opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
- return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+ if (!IS_ERR(opp)) {
+ ret = dev_pm_opp_get_voltage(opp) / 1000;
+ dev_pm_opp_put(opp);
+ }
+
+ return ret;
}
/* Setup thermal limit management */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 05022ea2a007..62bdb7316da1 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -17,7 +17,6 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#define ANY_ID 0xff
@@ -90,14 +89,19 @@ static const struct adreno_info gpulist[] = {
},
};
-MODULE_FIRMWARE("a300_pm4.fw");
-MODULE_FIRMWARE("a300_pfp.fw");
-MODULE_FIRMWARE("a330_pm4.fw");
-MODULE_FIRMWARE("a330_pfp.fw");
-MODULE_FIRMWARE("a420_pm4.fw");
-MODULE_FIRMWARE("a420_pfp.fw");
-MODULE_FIRMWARE("a530_fm4.fw");
-MODULE_FIRMWARE("a530_pfp.fw");
+MODULE_FIRMWARE("qcom/a300_pm4.fw");
+MODULE_FIRMWARE("qcom/a300_pfp.fw");
+MODULE_FIRMWARE("qcom/a330_pm4.fw");
+MODULE_FIRMWARE("qcom/a330_pfp.fw");
+MODULE_FIRMWARE("qcom/a420_pm4.fw");
+MODULE_FIRMWARE("qcom/a420_pfp.fw");
+MODULE_FIRMWARE("qcom/a530_pm4.fw");
+MODULE_FIRMWARE("qcom/a530_pfp.fw");
+MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
+MODULE_FIRMWARE("qcom/a530_zap.mdt");
+MODULE_FIRMWARE("qcom/a530_zap.b00");
+MODULE_FIRMWARE("qcom/a530_zap.b01");
+MODULE_FIRMWARE("qcom/a530_zap.b02");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -125,11 +129,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
- struct msm_gpu *gpu = platform_get_drvdata(priv->gpu_pdev);
+ struct msm_gpu *gpu = NULL;
int ret;
+ if (pdev)
+ gpu = platform_get_drvdata(pdev);
+
if (!gpu) {
- dev_err(dev->dev, "no adreno device\n");
+ dev_err_once(dev->dev, "no GPU device was found\n");
return NULL;
}
@@ -153,101 +160,45 @@ static void set_gpu_pdev(struct drm_device *dev,
priv->gpu_pdev = pdev;
}
-static int find_chipid(struct device *dev, u32 *chipid)
+static int find_chipid(struct device *dev, struct adreno_rev *rev)
{
struct device_node *node = dev->of_node;
const char *compat;
int ret;
+ u32 chipid;
/* first search the compat strings for qcom,adreno-XYZ.W: */
ret = of_property_read_string_index(node, "compatible", 0, &compat);
if (ret == 0) {
- unsigned rev, patch;
+ unsigned int r, patch;
- if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) {
- *chipid = 0;
- *chipid |= (rev / 100) << 24; /* core */
- rev %= 100;
- *chipid |= (rev / 10) << 16; /* major */
- rev %= 10;
- *chipid |= rev << 8; /* minor */
- *chipid |= patch;
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+ rev->core = r / 100;
+ r %= 100;
+ rev->major = r / 10;
+ r %= 10;
+ rev->minor = r;
+ rev->patchid = patch;
return 0;
}
}
/* and if that fails, fall back to legacy "qcom,chipid" property: */
- ret = of_property_read_u32(node, "qcom,chipid", chipid);
- if (ret)
+ ret = of_property_read_u32(node, "qcom,chipid", &chipid);
+ if (ret) {
+ dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
-
- dev_warn(dev, "Using legacy qcom,chipid binding!\n");
- dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
- (*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff,
- (*chipid >> 8) & 0xff, *chipid & 0xff);
-
- return 0;
-}
-
-/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
-static int adreno_get_legacy_pwrlevels(struct device *dev)
-{
- struct device_node *child, *node;
- int ret;
-
- node = of_find_compatible_node(dev->of_node, NULL,
- "qcom,gpu-pwrlevels");
- if (!node) {
- dev_err(dev, "Could not find the GPU powerlevels\n");
- return -ENXIO;
}
- for_each_child_of_node(node, child) {
- unsigned int val;
-
- ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
- if (ret)
- continue;
+ rev->core = (chipid >> 24) & 0xff;
+ rev->major = (chipid >> 16) & 0xff;
+ rev->minor = (chipid >> 8) & 0xff;
+ rev->patchid = (chipid & 0xff);
- /*
- * Skip the intentionally bogus clock value found at the bottom
- * of most legacy frequency tables
- */
- if (val != 27000000)
- dev_pm_opp_add(dev, val, 0);
- }
-
- return 0;
-}
-
-static int adreno_get_pwrlevels(struct device *dev,
- struct adreno_platform_config *config)
-{
- unsigned long freq = ULONG_MAX;
- struct dev_pm_opp *opp;
- int ret;
-
- /* You down with OPP? */
- if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
- ret = adreno_get_legacy_pwrlevels(dev);
- else
- ret = dev_pm_opp_of_add_table(dev);
-
- if (ret)
- return ret;
-
- /* Find the fastest defined rate */
- opp = dev_pm_opp_find_freq_floor(dev, &freq);
- if (!IS_ERR(opp))
- config->fast_rate = dev_pm_opp_get_freq(opp);
-
- if (!config->fast_rate) {
- DRM_DEV_INFO(dev,
- "Could not find clock rate. Using default\n");
- /* Pick a suitably safe clock speed for any target */
- config->fast_rate = 200000000;
- }
+ dev_warn(dev, "Using legacy qcom,chipid binding!\n");
+ dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n",
+ rev->core, rev->major, rev->minor, rev->patchid);
return 0;
}
@@ -258,22 +209,9 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
struct msm_gpu *gpu;
- u32 val;
int ret;
- ret = find_chipid(dev, &val);
- if (ret) {
- dev_err(dev, "could not find chipid: %d\n", ret);
- return ret;
- }
-
- config.rev = ADRENO_REV((val >> 24) & 0xff,
- (val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
-
- /* find clock rates: */
- config.fast_rate = 0;
-
- ret = adreno_get_pwrlevels(dev, &config);
+ ret = find_chipid(dev, &config.rev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index e2ffecce59a3..de63ff26a062 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,11 +17,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
-
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -461,10 +461,80 @@ void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
if (spin_until(ring_freewords(ring) >= ndwords))
DRM_DEV_ERROR(ring->gpu->dev->dev,
- "timeout waiting for space in ringubffer %d\n",
+ "timeout waiting for space in ringbuffer %d\n",
ring->id);
}
+/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
+static int adreno_get_legacy_pwrlevels(struct device *dev)
+{
+ struct device_node *child, *node;
+ int ret;
+
+ node = of_find_compatible_node(dev->of_node, NULL,
+ "qcom,gpu-pwrlevels");
+ if (!node) {
+ dev_err(dev, "Could not find the GPU powerlevels\n");
+ return -ENXIO;
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int val;
+
+ ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
+ if (ret)
+ continue;
+
+ /*
+ * Skip the intentionally bogus clock value found at the bottom
+ * of most legacy frequency tables
+ */
+ if (val != 27000000)
+ dev_pm_opp_add(dev, val, 0);
+ }
+
+ return 0;
+}
+
+static int adreno_get_pwrlevels(struct device *dev,
+ struct msm_gpu *gpu)
+{
+ unsigned long freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ gpu->fast_rate = 0;
+
+ /* You down with OPP? */
+ if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
+ ret = adreno_get_legacy_pwrlevels(dev);
+ else {
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret)
+ dev_err(dev, "Unable to set the OPP table\n");
+ }
+
+ if (!ret) {
+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (!IS_ERR(opp)) {
+ gpu->fast_rate = freq;
+ dev_pm_opp_put(opp);
+ }
+ }
+
+ if (!gpu->fast_rate) {
+ dev_warn(dev,
+ "Could not find a clock rate. Using a reasonable default\n");
+ /* Pick a suitably safe clock speed for any target */
+ gpu->fast_rate = 200000000;
+ }
+
+ DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate);
+
+ return 0;
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *adreno_gpu,
const struct adreno_gpu_funcs *funcs, int nr_rings)
@@ -479,15 +549,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
- gpu->fast_rate = config->fast_rate;
- gpu->bus_freq = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- gpu->bus_scale_table = config->bus_scale_table;
-#endif
-
- DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
- gpu->fast_rate, gpu->bus_freq);
-
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
adreno_gpu_config.irqname = "kgsl_3d0_irq";
@@ -496,6 +557,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.nr_rings = nr_rings;
+ adreno_get_pwrlevels(&pdev->dev, gpu);
+
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 28e3de6e5f94..8d3d0a924908 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -129,10 +129,6 @@ struct adreno_gpu {
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
-#endif
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 940de51ac5cd..a1b3e31e959e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -234,10 +234,6 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
#endif
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-static inline int match_dev_name(struct device *dev, void *data)
-{
- return !strcmp(dev_name(dev), data);
-}
/* bus scaling data is associated with extra pointless platform devices,
* "dtv", etc.. this is a bit of a hack, but we need a way for encoders
* to find their pdata to make the bus-scaling stuff work.
@@ -245,8 +241,7 @@ static inline int match_dev_name(struct device *dev, void *data)
static inline void *mdp4_find_pdata(const char *devname)
{
struct device *dev;
- dev = bus_find_device(&platform_bus_type, NULL,
- (void *)devname, match_dev_name);
+ dev = bus_find_device_by_name(&platform_bus_type, NULL, devname);
return dev ? dev->platform_data : NULL;
}
#endif
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index ee41423baeb7..29678876fc09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -966,8 +966,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
uint32_t src_x, src_y;
uint32_t src_w, src_h;
uint32_t src_img_w, src_img_h;
- uint32_t src_x_r;
- int crtc_x_r;
int ret;
nplanes = fb->format->num_planes;
@@ -1012,9 +1010,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
crtc_w /= 2;
src_w /= 2;
src_img_w /= 2;
-
- crtc_x_r = crtc_x + crtc_w;
- src_x_r = src_x + src_w;
}
ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
@@ -1054,9 +1049,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
if (right_hwpipe)
mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
config, hdecm, vdecm, hflip, vflip,
- crtc_x_r, crtc_y, crtc_w, crtc_h,
+ crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
src_img_w, src_img_h,
- src_x_r, src_y, src_w, src_h);
+ src_x + src_w, src_y, src_w, src_h);
plane->fb = fb;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0a3ea3034e39..d90ef1d78a1b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -37,16 +37,9 @@
#define MSM_VERSION_MINOR 3
#define MSM_VERSION_PATCHLEVEL 0
-static void msm_fb_output_poll_changed(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
- drm_fb_helper_hotplug_event(priv->fbdev);
-}
-
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
- .output_poll_changed = msm_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
.atomic_state_alloc = msm_atomic_state_alloc,
@@ -551,13 +544,6 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
context_close(ctx);
}
-static void msm_lastclose(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
- drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
-}
-
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -866,7 +852,7 @@ static struct drm_driver msm_driver = {
DRIVER_MODESET,
.open = msm_open,
.postclose = msm_postclose,
- .lastclose = msm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c646843d8822..0a653dd2e618 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -303,7 +303,8 @@ int msm_perf_debugfs_init(struct drm_minor *minor);
void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
#else
static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
-static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
+static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
+ const char *fmt, ...) {}
static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 81fe6d6740ce..07376de9ff4c 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -93,14 +93,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
+ msm_obj->pages = p;
+
msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
if (IS_ERR(msm_obj->sgt)) {
+ void *ptr = ERR_CAST(msm_obj->sgt);
+
dev_err(dev->dev, "failed to allocate sgt\n");
- return ERR_CAST(msm_obj->sgt);
+ msm_obj->sgt = NULL;
+ return ptr;
}
- msm_obj->pages = p;
-
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
@@ -135,7 +138,10 @@ static void put_pages(struct drm_gem_object *obj)
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
- sg_free_table(msm_obj->sgt);
+
+ if (msm_obj->sgt)
+ sg_free_table(msm_obj->sgt);
+
kfree(msm_obj->sgt);
if (use_pages(obj))
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 232201403439..bd376f9e18a7 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -21,42 +21,90 @@
#include "msm_fence.h"
#include <linux/string_helpers.h>
+#include <linux/pm_opp.h>
+#include <linux/devfreq.h>
/*
* Power Management:
*/
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-static void bs_init(struct msm_gpu *gpu)
+static int msm_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
{
- if (gpu->bus_scale_table) {
- gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
- DBG("bus scale client: %08x", gpu->bsc);
- }
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ clk_set_rate(gpu->core_clk, *freq);
+ dev_pm_opp_put(opp);
+
+ return 0;
}
-static void bs_fini(struct msm_gpu *gpu)
+static int msm_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
{
- if (gpu->bsc) {
- msm_bus_scale_unregister_client(gpu->bsc);
- gpu->bsc = 0;
- }
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ u64 cycles;
+ u32 freq = ((u32) status->current_frequency) / 1000000;
+ ktime_t time;
+
+ status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
+ gpu->funcs->gpu_busy(gpu, &cycles);
+
+ status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
+
+ gpu->devfreq.busy_cycles = cycles;
+
+ time = ktime_get();
+ status->total_time = ktime_us_delta(time, gpu->devfreq.time);
+ gpu->devfreq.time = time;
+
+ return 0;
+}
+
+static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+
+ *freq = (unsigned long) clk_get_rate(gpu->core_clk);
+
+ return 0;
}
-static void bs_set(struct msm_gpu *gpu, int idx)
+static struct devfreq_dev_profile msm_devfreq_profile = {
+ .polling_ms = 10,
+ .target = msm_devfreq_target,
+ .get_dev_status = msm_devfreq_get_dev_status,
+ .get_cur_freq = msm_devfreq_get_cur_freq,
+};
+
+static void msm_devfreq_init(struct msm_gpu *gpu)
{
- if (gpu->bsc) {
- DBG("set bus scaling: %d", idx);
- msm_bus_scale_client_update_request(gpu->bsc, idx);
+ /* We need target support to do devfreq */
+ if (!gpu->funcs->gpu_busy)
+ return;
+
+ msm_devfreq_profile.initial_freq = gpu->fast_rate;
+
+ /*
+ * Don't set the freq_table or max_state and let devfreq build the table
+ * from OPP
+ */
+
+ gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
+ &msm_devfreq_profile, "simple_ondemand", NULL);
+
+ if (IS_ERR(gpu->devfreq.devfreq)) {
+ dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ gpu->devfreq.devfreq = NULL;
}
}
-#else
-static void bs_init(struct msm_gpu *gpu) {}
-static void bs_fini(struct msm_gpu *gpu) {}
-static void bs_set(struct msm_gpu *gpu, int idx) {}
-#endif
static int enable_pwrrail(struct msm_gpu *gpu)
{
@@ -143,8 +191,6 @@ static int enable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_prepare_enable(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, gpu->bus_freq);
return 0;
}
@@ -152,8 +198,6 @@ static int disable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_disable_unprepare(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, 0);
return 0;
}
@@ -175,6 +219,13 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ if (gpu->devfreq.devfreq) {
+ gpu->devfreq.busy_cycles = 0;
+ gpu->devfreq.time = ktime_get();
+
+ devfreq_resume_device(gpu->devfreq.devfreq);
+ }
+
gpu->needs_hw_init = true;
return 0;
@@ -186,6 +237,9 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
DBG("%s", gpu->name);
+ if (gpu->devfreq.devfreq)
+ devfreq_suspend_device(gpu->devfreq.devfreq);
+
ret = disable_axi(gpu);
if (ret)
return ret;
@@ -294,6 +348,8 @@ static void recover_worker(struct work_struct *work)
msm_rd_dump_submit(priv->hangrd, submit,
"offending task: %s (%s)", task->comm, cmd);
+
+ kfree(cmd);
} else {
msm_rd_dump_submit(priv->hangrd, submit, NULL);
}
@@ -306,7 +362,7 @@ static void recover_worker(struct work_struct *work)
* needs to happen after msm_rd_dump_submit() to ensure that the
* bo's referenced by the offending submit are still around.
*/
- for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
uint32_t fence = ring->memptrs->fence;
@@ -753,7 +809,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->pdev = pdev;
platform_set_drvdata(pdev, gpu);
- bs_init(gpu);
+ msm_devfreq_init(gpu);
gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
config->va_start, config->va_end);
@@ -824,8 +880,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
WARN_ON(!list_empty(&gpu->active_list));
- bs_fini(gpu);
-
for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
msm_ringbuffer_destroy(gpu->rb[i]);
gpu->rb[i] = NULL;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index e113d64574d3..fccfccd303af 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -66,6 +66,7 @@ struct msm_gpu_funcs {
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
#endif
+ int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
};
struct msm_gpu {
@@ -108,12 +109,7 @@ struct msm_gpu {
struct clk **grp_clks;
int nr_clocks;
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
- uint32_t fast_rate, bus_freq;
-
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
- uint32_t bsc;
-#endif
+ uint32_t fast_rate;
/* Hang and Inactivity Detection:
*/
@@ -125,6 +121,12 @@ struct msm_gpu {
struct work_struct recover_work;
struct drm_gem_object *memptrs_bo;
+
+ struct {
+ struct devfreq *devfreq;
+ u64 busy_cycles;
+ ktime_t time;
+ } devfreq;
};
/* It turns out that all targets use the same ringbuffer size */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 949bf6b3feab..ce328edee7a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1218,7 +1218,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_tt_bind(bo->ttm, &tmp_reg);
+ ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
if (ret)
goto out;
@@ -1226,7 +1226,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
+ ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
out:
ttm_bo_mem_put(bo, &tmp_reg);
return ret;
@@ -1255,7 +1255,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
+ ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
if (ret)
goto out;
@@ -1380,8 +1380,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1548,7 +1547,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
@@ -1573,17 +1572,17 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- return ttm_agp_tt_populate(ttm);
+ return ttm_agp_tt_populate(ttm, ctx);
}
#endif
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev);
+ return ttm_dma_populate((void *)ttm, dev, ctx);
}
#endif
- r = ttm_pool_populate(ttm);
+ r = ttm_pool_populate(ttm, ctx);
if (r) {
return r;
}
@@ -1673,5 +1672,4 @@ struct ttm_bo_driver nouveau_bo_driver = {
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e7785f49e6d..009713404cc4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <nvif/class.h>
@@ -292,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index c533d8e04afc..45a4572cd2fb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -413,14 +413,6 @@ out:
return ret;
}
-void
-nouveau_fbcon_output_poll_changed(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon)
- drm_fb_helper_hotplug_event(&drm->fbcon->helper);
-}
-
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e2bca729721e..a6f192ea3fa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -68,8 +68,6 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
void nouveau_fbcon_accel_restore(struct drm_device *dev);
-void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
-
extern int nouveau_nofbaccel;
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 52e52a360fb1..3da5a4305aa4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -4,6 +4,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include "nouveau_drv.h"
#include "nouveau_acpi.h"
@@ -61,7 +62,7 @@ static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- nouveau_fbcon_output_poll_changed(dev);
+ drm_fb_helper_output_poll_changed(dev);
}
static bool
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 65336948e807..b22c37bde13f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4311,7 +4311,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = nouveau_fbcon_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 542a76503fbd..95ea6abae914 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -1,7 +1,7 @@
/*
* Analog TV Connector driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 05fa24a518c8..10b4b97d3595 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -1,7 +1,7 @@
/*
* Generic DVI Connector driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 4600d3841c25..2867476419dc 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -1,7 +1,7 @@
/*
* HDMI Connector driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index b1f6aa09f699..d523c67a3ae3 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -6,7 +6,7 @@
*
* based on encoder-tfp410
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 947295f9e30f..e01ab3db6d86 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -1,7 +1,7 @@
/*
* TFP410 DPI-to-DVI encoder driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -173,7 +173,8 @@ static int tfp410_probe_of(struct platform_device *pdev)
if (gpio_is_valid(gpio) || gpio == -ENOENT) {
ddata->pd_gpio = gpio;
} else {
- dev_err(&pdev->dev, "failed to parse PD gpio\n");
+ if (gpio != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to parse PD gpio\n");
return gpio;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index e3d98d78fc40..1fd493e5fa3d 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -1,7 +1,7 @@
/*
* TPD12S015 HDMI ESD protection & level shifter chip driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index e065f7e10cca..efff6dbbb86f 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -1,7 +1,7 @@
/*
* Generic MIPI DPI Panel Driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 92c556ac22c7..15399a1a666b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -1,7 +1,7 @@
/*
* Generic DSI Command Mode panel driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -22,9 +22,10 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
+#include <video/of_display_timing.h>
#include "../dss/omapdss.h"
@@ -49,6 +50,7 @@ struct panel_drv_data {
struct mutex lock;
struct backlight_device *bldev;
+ struct backlight_device *extbldev;
unsigned long hw_guard_end; /* next value of jiffies when we can
* issue the next sleep in/out command
@@ -56,11 +58,17 @@ struct panel_drv_data {
unsigned long hw_guard_wait; /* max guard time in jiffies */
/* panel HW configuration from DT or platform data */
- int reset_gpio;
- int ext_te_gpio;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *ext_te_gpio;
+
+ struct regulator *vpnl;
+ struct regulator *vddi;
bool use_dsi_backlight;
+ int width_mm;
+ int height_mm;
+
struct omap_dsi_pin_config pin_config;
/* runtime variables */
@@ -92,6 +100,30 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata);
static void dsicm_ulps_work(struct work_struct *work);
+static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable)
+{
+ struct backlight_device *backlight;
+
+ if (ddata->bldev)
+ backlight = ddata->bldev;
+ else if (ddata->extbldev)
+ backlight = ddata->extbldev;
+ else
+ return;
+
+ if (enable) {
+ backlight->props.fb_blank = FB_BLANK_UNBLANK;
+ backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED);
+ backlight->props.power = FB_BLANK_UNBLANK;
+ } else {
+ backlight->props.fb_blank = FB_BLANK_NORMAL;
+ backlight->props.power = FB_BLANK_POWERDOWN;
+ backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED;
+ }
+
+ backlight_update_status(backlight);
+}
+
static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec)
{
ddata->hw_guard_wait = msecs_to_jiffies(guard_msec);
@@ -255,8 +287,8 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata)
if (r)
goto err;
- if (gpio_is_valid(ddata->ext_te_gpio))
- disable_irq(gpio_to_irq(ddata->ext_te_gpio));
+ if (ddata->ext_te_gpio)
+ disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
in->ops.dsi->disable(in, false, true);
@@ -297,8 +329,8 @@ static int dsicm_exit_ulps(struct panel_drv_data *ddata)
goto err2;
}
- if (gpio_is_valid(ddata->ext_te_gpio))
- enable_irq(gpio_to_irq(ddata->ext_te_gpio));
+ if (ddata->ext_te_gpio)
+ enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
dsicm_queue_ulps_work(ddata);
@@ -311,8 +343,8 @@ err2:
r = dsicm_panel_reset(ddata);
if (!r) {
- if (gpio_is_valid(ddata->ext_te_gpio))
- enable_irq(gpio_to_irq(ddata->ext_te_gpio));
+ if (ddata->ext_te_gpio)
+ enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
ddata->ulps_enabled = false;
}
err1:
@@ -335,7 +367,7 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
struct omap_dss_device *in = ddata->in;
- int r;
+ int r = 0;
int level;
if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
@@ -356,8 +388,6 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
in->ops.dsi->bus_unlock(in);
- } else {
- r = 0;
}
mutex_unlock(&ddata->lock);
@@ -560,16 +590,13 @@ static const struct attribute_group dsicm_attr_group = {
static void dsicm_hw_reset(struct panel_drv_data *ddata)
{
- if (!gpio_is_valid(ddata->reset_gpio))
- return;
-
- gpio_set_value(ddata->reset_gpio, 1);
+ gpiod_set_value(ddata->reset_gpio, 1);
udelay(10);
/* reset the panel */
- gpio_set_value(ddata->reset_gpio, 0);
+ gpiod_set_value(ddata->reset_gpio, 0);
/* assert reset */
udelay(10);
- gpio_set_value(ddata->reset_gpio, 1);
+ gpiod_set_value(ddata->reset_gpio, 1);
/* wait after releasing reset */
usleep_range(5000, 10000);
}
@@ -589,25 +616,43 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
.lp_clk_max = 10000000,
};
+ if (ddata->vpnl) {
+ r = regulator_enable(ddata->vpnl);
+ if (r) {
+ dev_err(&ddata->pdev->dev,
+ "failed to enable VPNL: %d\n", r);
+ return r;
+ }
+ }
+
+ if (ddata->vddi) {
+ r = regulator_enable(ddata->vddi);
+ if (r) {
+ dev_err(&ddata->pdev->dev,
+ "failed to enable VDDI: %d\n", r);
+ goto err_vpnl;
+ }
+ }
+
if (ddata->pin_config.num_pins > 0) {
r = in->ops.dsi->configure_pins(in, &ddata->pin_config);
if (r) {
dev_err(&ddata->pdev->dev,
"failed to configure DSI pins\n");
- goto err0;
+ goto err_vddi;
}
}
r = in->ops.dsi->set_config(in, &dsi_config);
if (r) {
dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
- goto err0;
+ goto err_vddi;
}
r = in->ops.dsi->enable(in);
if (r) {
dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err0;
+ goto err_vddi;
}
dsicm_hw_reset(ddata);
@@ -665,7 +710,13 @@ err:
dsicm_hw_reset(ddata);
in->ops.dsi->disable(in, true, false);
-err0:
+err_vddi:
+ if (ddata->vddi)
+ regulator_disable(ddata->vddi);
+err_vpnl:
+ if (ddata->vpnl)
+ regulator_disable(ddata->vpnl);
+
return r;
}
@@ -688,6 +739,11 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
in->ops.dsi->disable(in, true, false);
+ if (ddata->vddi)
+ regulator_disable(ddata->vddi);
+ if (ddata->vpnl)
+ regulator_disable(ddata->vpnl);
+
ddata->enabled = 0;
}
@@ -782,6 +838,8 @@ static int dsicm_enable(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->lock);
+ dsicm_bl_power(ddata, true);
+
return 0;
err:
dev_dbg(&ddata->pdev->dev, "enable failed\n");
@@ -797,6 +855,8 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
dev_dbg(&ddata->pdev->dev, "disable\n");
+ dsicm_bl_power(ddata, false);
+
mutex_lock(&ddata->lock);
dsicm_cancel_ulps_work(ddata);
@@ -890,7 +950,7 @@ static int dsicm_update(struct omap_dss_device *dssdev,
if (r)
goto err;
- if (ddata->te_enabled && gpio_is_valid(ddata->ext_te_gpio)) {
+ if (ddata->te_enabled && ddata->ext_te_gpio) {
schedule_delayed_work(&ddata->te_timeout_work,
msecs_to_jiffies(250));
atomic_set(&ddata->do_update, 1);
@@ -937,7 +997,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
else
r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
- if (!gpio_is_valid(ddata->ext_te_gpio))
+ if (!ddata->ext_te_gpio)
in->ops.dsi->enable_te(in, enable);
/* possible panel bug */
@@ -1099,6 +1159,45 @@ static void dsicm_ulps_work(struct work_struct *work)
mutex_unlock(&ddata->lock);
}
+static void dsicm_get_timings(struct omap_dss_device *dssdev,
+ struct videomode *vm)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *vm = ddata->vm;
+}
+
+static int dsicm_check_timings(struct omap_dss_device *dssdev,
+ struct videomode *vm)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ int ret = 0;
+
+ if (vm->hactive != ddata->vm.hactive)
+ ret = -EINVAL;
+
+ if (vm->vactive != ddata->vm.vactive)
+ ret = -EINVAL;
+
+ if (ret) {
+ dev_warn(dssdev->dev, "wrong resolution: %d x %d",
+ vm->hactive, vm->vactive);
+ dev_warn(dssdev->dev, "panel resolution: %d x %d",
+ ddata->vm.hactive, ddata->vm.vactive);
+ }
+
+ return ret;
+}
+
+static void dsicm_get_size(struct omap_dss_device *dssdev,
+ unsigned int *width, unsigned int *height)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ *width = ddata->width_mm;
+ *height = ddata->height_mm;
+}
+
static struct omap_dss_driver dsicm_ops = {
.connect = dsicm_connect,
.disconnect = dsicm_disconnect,
@@ -1109,6 +1208,10 @@ static struct omap_dss_driver dsicm_ops = {
.update = dsicm_update,
.sync = dsicm_sync,
+ .get_timings = dsicm_get_timings,
+ .check_timings = dsicm_check_timings,
+ .get_size = dsicm_get_size,
+
.enable_te = dsicm_enable_te,
.get_te = dsicm_get_te,
@@ -1118,41 +1221,87 @@ static struct omap_dss_driver dsicm_ops = {
static int dsicm_probe_of(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
+ struct device_node *backlight;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *in;
- int gpio;
+ struct display_timing timing;
+ int err;
+
+ ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ddata->reset_gpio)) {
+ err = PTR_ERR(ddata->reset_gpio);
+ dev_err(&pdev->dev, "reset gpio request failed: %d", err);
+ return err;
+ }
- gpio = of_get_named_gpio(node, "reset-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(&pdev->dev, "failed to parse reset gpio\n");
- return gpio;
+ ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te",
+ GPIOD_IN);
+ if (IS_ERR(ddata->ext_te_gpio)) {
+ err = PTR_ERR(ddata->ext_te_gpio);
+ dev_err(&pdev->dev, "TE gpio request failed: %d", err);
+ return err;
}
- ddata->reset_gpio = gpio;
- gpio = of_get_named_gpio(node, "te-gpios", 0);
- if (gpio_is_valid(gpio) || gpio == -ENOENT) {
- ddata->ext_te_gpio = gpio;
+ err = of_get_display_timing(node, "panel-timing", &timing);
+ if (!err) {
+ videomode_from_timing(&timing, &ddata->vm);
+ if (!ddata->vm.pixelclock)
+ ddata->vm.pixelclock =
+ ddata->vm.hactive * ddata->vm.vactive * 60;
} else {
- dev_err(&pdev->dev, "failed to parse TE gpio\n");
- return gpio;
+ dev_warn(&pdev->dev,
+ "failed to get video timing, using defaults\n");
}
+ ddata->width_mm = 0;
+ of_property_read_u32(node, "width-mm", &ddata->width_mm);
+
+ ddata->height_mm = 0;
+ of_property_read_u32(node, "height-mm", &ddata->height_mm);
+
in = omapdss_of_find_source_for_first_ep(node);
if (IS_ERR(in)) {
dev_err(&pdev->dev, "failed to find video source\n");
return PTR_ERR(in);
}
+ ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl");
+ if (IS_ERR(ddata->vpnl)) {
+ err = PTR_ERR(ddata->vpnl);
+ if (err == -EPROBE_DEFER)
+ return err;
+ ddata->vpnl = NULL;
+ }
+
+ ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi");
+ if (IS_ERR(ddata->vddi)) {
+ err = PTR_ERR(ddata->vddi);
+ if (err == -EPROBE_DEFER)
+ return err;
+ ddata->vddi = NULL;
+ }
+
ddata->in = in;
- /* TODO: ulps, backlight */
+ backlight = of_parse_phandle(node, "backlight", 0);
+ if (backlight) {
+ ddata->extbldev = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!ddata->extbldev)
+ return -EPROBE_DEFER;
+ } else {
+ /* assume native backlight support */
+ ddata->use_dsi_backlight = true;
+ }
+
+ /* TODO: ulps */
return 0;
}
static int dsicm_probe(struct platform_device *pdev)
{
- struct backlight_properties props;
struct panel_drv_data *ddata;
struct backlight_device *bldev = NULL;
struct device *dev = &pdev->dev;
@@ -1171,14 +1320,14 @@ static int dsicm_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
- r = dsicm_probe_of(pdev);
- if (r)
- return r;
-
ddata->vm.hactive = 864;
ddata->vm.vactive = 480;
ddata->vm.pixelclock = 864 * 480 * 60;
+ r = dsicm_probe_of(pdev);
+ if (r)
+ return r;
+
dssdev = &ddata->dssdev;
dssdev->dev = dev;
dssdev->driver = &dsicm_ops;
@@ -1200,31 +1349,15 @@ static int dsicm_probe(struct platform_device *pdev)
atomic_set(&ddata->do_update, 0);
- if (gpio_is_valid(ddata->reset_gpio)) {
- r = devm_gpio_request_one(dev, ddata->reset_gpio,
- GPIOF_OUT_INIT_LOW, "taal rst");
- if (r) {
- dev_err(dev, "failed to request reset gpio\n");
- return r;
- }
- }
-
- if (gpio_is_valid(ddata->ext_te_gpio)) {
- r = devm_gpio_request_one(dev, ddata->ext_te_gpio,
- GPIOF_IN, "taal irq");
- if (r) {
- dev_err(dev, "GPIO request failed\n");
- return r;
- }
-
- r = devm_request_irq(dev, gpio_to_irq(ddata->ext_te_gpio),
+ if (ddata->ext_te_gpio) {
+ r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio),
dsicm_te_isr,
IRQF_TRIGGER_RISING,
"taal vsync", ddata);
if (r) {
dev_err(dev, "IRQ request failed\n");
- return r;
+ goto err_reg;
}
INIT_DEFERRABLE_WORK(&ddata->te_timeout_work,
@@ -1234,48 +1367,43 @@ static int dsicm_probe(struct platform_device *pdev)
}
ddata->workqueue = create_singlethread_workqueue("dsicm_wq");
- if (ddata->workqueue == NULL) {
- dev_err(dev, "can't create workqueue\n");
- return -ENOMEM;
+ if (!ddata->workqueue) {
+ r = -ENOMEM;
+ goto err_reg;
}
INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work);
dsicm_hw_reset(ddata);
if (ddata->use_dsi_backlight) {
- memset(&props, 0, sizeof(props));
+ struct backlight_properties props = { 0 };
props.max_brightness = 255;
-
props.type = BACKLIGHT_RAW;
- bldev = backlight_device_register(dev_name(dev),
- dev, ddata, &dsicm_bl_ops, &props);
+
+ bldev = devm_backlight_device_register(dev, dev_name(dev),
+ dev, ddata, &dsicm_bl_ops, &props);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
goto err_bl;
}
ddata->bldev = bldev;
-
- bldev->props.fb_blank = FB_BLANK_UNBLANK;
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = 255;
-
- dsicm_bl_update_status(bldev);
}
r = sysfs_create_group(&dev->kobj, &dsicm_attr_group);
if (r) {
dev_err(dev, "failed to create sysfs files\n");
- goto err_sysfs_create;
+ goto err_bl;
}
return 0;
-err_sysfs_create:
- backlight_device_unregister(bldev);
err_bl:
destroy_workqueue(ddata->workqueue);
err_reg:
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
+
return r;
}
@@ -1283,7 +1411,6 @@ static int __exit dsicm_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct backlight_device *bldev;
dev_dbg(&pdev->dev, "remove\n");
@@ -1294,12 +1421,8 @@ static int __exit dsicm_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
- bldev = ddata->bldev;
- if (bldev != NULL) {
- bldev->props.power = FB_BLANK_POWERDOWN;
- dsicm_bl_update_status(bldev);
- backlight_device_unregister(bldev);
- }
+ if (ddata->extbldev)
+ put_device(&ddata->extbldev->dev);
omap_dss_put_device(ddata->in);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 74d13969b9ca..57af22ce87c5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -1,7 +1,7 @@
/*
* LG.Philips LB035Q02 LCD Panel driver
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
* Based on a driver by: Steve Sakoman <steve@sakoman.com>
*
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index df8132d3b9c6..bf53676263ad 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -1,7 +1,7 @@
/*
* NEC NL8048HL11 Panel driver
*
- * Copyright (C) 2010 Texas Instruments Inc.
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
* Author: Erik Gilling <konkers@android.com>
* Converted to new DSS device model: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 98d170aecaba..34555801fa4c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -1,7 +1,7 @@
/*
* LCD panel driver for Sharp LS037V7DW01
*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 0a38a0e8c925..2721a86ac5e7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -452,15 +452,27 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
}
static const struct of_device_id td028ttec1_of_match[] = {
+ { .compatible = "omapdss,tpo,td028ttec1", },
+ /* keep to not break older DTB */
{ .compatible = "omapdss,toppoly,td028ttec1", },
{},
};
MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
+static const struct spi_device_id td028ttec1_ids[] = {
+ { "toppoly,td028ttec1", 0 },
+ { "tpo,td028ttec1", 0},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td028ttec1_ids);
+
+
static struct spi_driver td028ttec1_spi_driver = {
.probe = td028ttec1_panel_probe,
.remove = td028ttec1_panel_remove,
+ .id_table = td028ttec1_ids,
.driver = {
.name = "panel-tpo-td028ttec1",
@@ -471,7 +483,6 @@ static struct spi_driver td028ttec1_spi_driver = {
module_spi_driver(td028ttec1_spi_driver);
-MODULE_ALIAS("spi:toppoly,td028ttec1");
MODULE_AUTHOR("H. Nikolaus Schaller <hns@goldelico.com>");
MODULE_DESCRIPTION("Toppoly TD028TTEC1 panel driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 13e91faaf7a6..67cc87a4f1f6 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -1,3 +1,18 @@
+/*
+ * OMAP Display Subsystem Base
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 197ddbc1512b..acef7ece5783 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/core.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -30,38 +28,21 @@
#include "dss.h"
/* INIT */
-static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
- dss_init_platform_driver,
- dispc_init_platform_driver,
+static struct platform_driver * const omap_dss_drivers[] = {
+ &omap_dsshw_driver,
+ &omap_dispchw_driver,
#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_init_platform_driver,
+ &omap_dsihw_driver,
#endif
#ifdef CONFIG_OMAP2_DSS_VENC
- venc_init_platform_driver,
+ &omap_venchw_driver,
#endif
#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi4_init_platform_driver,
+ &omapdss_hdmi4hw_driver,
#endif
#ifdef CONFIG_OMAP5_DSS_HDMI
- hdmi5_init_platform_driver,
-#endif
-};
-
-static void (*dss_output_drv_unreg_funcs[])(void) = {
-#ifdef CONFIG_OMAP5_DSS_HDMI
- hdmi5_uninit_platform_driver,
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- hdmi4_uninit_platform_driver,
+ &omapdss_hdmi5hw_driver,
#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- venc_uninit_platform_driver,
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_uninit_platform_driver,
-#endif
- dispc_uninit_platform_driver,
- dss_uninit_platform_driver,
};
static struct platform_device *omap_drm_device;
@@ -69,13 +50,11 @@ static struct platform_device *omap_drm_device;
static int __init omap_dss_init(void)
{
int r;
- int i;
- for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
- r = dss_output_drv_reg_funcs[i]();
- if (r)
- goto err_reg;
- }
+ r = platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
+ if (r)
+ goto err_reg;
omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0);
if (IS_ERR(omap_drm_device)) {
@@ -86,22 +65,18 @@ static int __init omap_dss_init(void)
return 0;
err_reg:
- for (i = ARRAY_SIZE(dss_output_drv_reg_funcs) - i;
- i < ARRAY_SIZE(dss_output_drv_reg_funcs);
- ++i)
- dss_output_drv_unreg_funcs[i]();
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
return r;
}
static void __exit omap_dss_exit(void)
{
- int i;
-
platform_device_unregister(omap_drm_device);
- for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i)
- dss_output_drv_unreg_funcs[i]();
+ platform_unregister_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
}
module_init(omap_dss_init);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 0f4fdb221498..4e8f68efd169 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dispc.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -4325,6 +4323,17 @@ static void dispc_free_irq(void *dev_id)
dispc.user_data = NULL;
}
+static u32 dispc_get_memory_bandwidth_limit(void)
+{
+ u32 limit = 0;
+
+ /* Optional maximum memory bandwidth */
+ of_property_read_u32(dispc.pdev->dev.of_node, "max-memory-bandwidth",
+ &limit);
+
+ return limit;
+}
+
/*
* Workaround for errata i734 in DSS dispc
* - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
@@ -4497,6 +4506,8 @@ static const struct dispc_ops dispc_ops = {
.get_num_ovls = dispc_get_num_ovls,
.get_num_mgrs = dispc_get_num_mgrs,
+ .get_memory_bandwidth_limit = dispc_get_memory_bandwidth_limit,
+
.mgr_enable = dispc_mgr_enable,
.mgr_is_enabled = dispc_mgr_is_enabled,
.mgr_get_vsync_irq = dispc_mgr_get_vsync_irq,
@@ -4685,7 +4696,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
.runtime_resume = dispc_runtime_resume,
};
-static struct platform_driver omap_dispchw_driver = {
+struct platform_driver omap_dispchw_driver = {
.probe = dispc_probe,
.remove = dispc_remove,
.driver = {
@@ -4695,13 +4706,3 @@ static struct platform_driver omap_dispchw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init dispc_init_platform_driver(void)
-{
- return platform_driver_register(&omap_dispchw_driver);
-}
-
-void dispc_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_dispchw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h
index 003adce532f4..e901dd1e4365 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.h
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.h
@@ -1,10 +1,7 @@
/*
- * linux/drivers/video/omap2/dss/dispc.h
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Archit Taneja <archit@ti.com>
*
- *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
index 34fad2376f8d..44804c8c8777 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/video/omap2/dss/dispc_coefs.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Chandrabhanu Mahapatra <cmahapatra@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 42279933790e..0c9480ba85c0 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/display.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -175,17 +173,3 @@ out:
return dssdev;
}
EXPORT_SYMBOL(omap_dss_get_next_device);
-
-struct omap_dss_device *omap_dss_find_device(void *data,
- int (*match)(struct omap_dss_device *dssdev, void *data))
-{
- struct omap_dss_device *dssdev = NULL;
-
- while ((dssdev = omap_dss_get_next_device(dssdev)) != NULL) {
- if (match(dssdev, data))
- return dssdev;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(omap_dss_find_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index ca1e3b489540..ea44137ed08c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dpi.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -52,8 +50,6 @@ struct dpi_data {
int data_lines;
struct omap_dss_device output;
-
- bool port_initialized;
};
static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
@@ -786,8 +782,6 @@ int dpi_init_port(struct platform_device *pdev, struct device_node *port,
dpi_init_output_port(dpi, port);
- dpi->port_initialized = true;
-
return 0;
err_datalines:
@@ -800,7 +794,7 @@ void dpi_uninit_port(struct device_node *port)
{
struct dpi_data *dpi = port->data;
- if (!dpi->port_initialized)
+ if (!dpi)
return;
dpi_uninit_output_port(port);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index c2cf6d98e577..80f1f3679a3c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dsi.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -5660,7 +5658,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
.runtime_resume = dsi_runtime_resume,
};
-static struct platform_driver omap_dsihw_driver = {
+struct platform_driver omap_dsihw_driver = {
.probe = dsi_probe,
.remove = dsi_remove,
.driver = {
@@ -5670,13 +5668,3 @@ static struct platform_driver omap_dsihw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init dsi_init_platform_driver(void)
-{
- return platform_driver_register(&omap_dsihw_driver);
-}
-
-void dsi_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_dsihw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index c6b86f348a5c..967d9e1b34e5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index d1755f12236b..04300b2da1b1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dss.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -23,6 +21,7 @@
#define DSS_SUBSYS_NAME "DSS"
#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -367,7 +366,8 @@ const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
return dss_generic_clk_source_names[clk_src];
}
-void dss_dump_clocks(struct seq_file *s)
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
+static void dss_dump_clocks(struct seq_file *s)
{
const char *fclk_name;
unsigned long fclk_rate;
@@ -386,6 +386,7 @@ void dss_dump_clocks(struct seq_file *s)
dss_runtime_put();
}
+#endif
static void dss_dump_regs(struct seq_file *s)
{
@@ -1441,6 +1442,12 @@ static int dss_probe(struct platform_device *pdev)
dss.pdev = pdev;
+ r = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (r) {
+ dev_err(&pdev->dev, "Failed to set the DMA mask\n");
+ return r;
+ }
+
/*
* The various OMAP3-based SoCs can't be told apart using the compatible
* string, use SoC device matching.
@@ -1527,7 +1534,7 @@ static const struct dev_pm_ops dss_pm_ops = {
.runtime_resume = dss_runtime_resume,
};
-static struct platform_driver omap_dsshw_driver = {
+struct platform_driver omap_dsshw_driver = {
.probe = dss_probe,
.remove = dss_remove,
.shutdown = dss_shutdown,
@@ -1538,13 +1545,3 @@ static struct platform_driver omap_dsshw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init dss_init_platform_driver(void)
-{
- return platform_driver_register(&omap_dsshw_driver);
-}
-
-void dss_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_dsshw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index ed465572491e..6374e57ed9da 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/dss.h
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -264,9 +262,6 @@ static inline int dss_debugfs_create_file(const char *name,
}
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
-int dss_init_platform_driver(void) __init;
-void dss_uninit_platform_driver(void);
-
int dss_runtime_get(void);
void dss_runtime_put(void);
@@ -277,7 +272,6 @@ int dss_dpi_select_source(int port, enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
-void dss_dump_clocks(struct seq_file *s);
/* DSS VIDEO PLL */
struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
@@ -329,9 +323,6 @@ static inline void sdi_uninit_port(struct device_node *port)
struct dentry;
struct file_operations;
-int dsi_init_platform_driver(void) __init;
-void dsi_uninit_platform_driver(void);
-
void dsi_dump_clocks(struct seq_file *s);
void dsi_irq_handler(void);
@@ -355,8 +346,6 @@ static inline void dpi_uninit_port(struct device_node *port)
#endif
/* DISPC */
-int dispc_init_platform_driver(void) __init;
-void dispc_uninit_platform_driver(void);
void dispc_dump_clocks(struct seq_file *s);
int dispc_runtime_get(void);
@@ -400,18 +389,6 @@ void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct videomode *vm);
-/* VENC */
-int venc_init_platform_driver(void) __init;
-void venc_uninit_platform_driver(void);
-
-/* HDMI */
-int hdmi4_init_platform_driver(void) __init;
-void hdmi4_uninit_platform_driver(void);
-
-int hdmi5_init_platform_driver(void) __init;
-void hdmi5_uninit_platform_driver(void);
-
-
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
{
@@ -455,4 +432,19 @@ int dss_pll_write_config_type_b(struct dss_pll *pll,
const struct dss_pll_clock_info *cinfo);
int dss_pll_wait_reset_done(struct dss_pll *pll);
+extern struct platform_driver omap_dsshw_driver;
+extern struct platform_driver omap_dispchw_driver;
+#ifdef CONFIG_OMAP2_DSS_DSI
+extern struct platform_driver omap_dsihw_driver;
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+extern struct platform_driver omap_venchw_driver;
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+extern struct platform_driver omapdss_hdmi4hw_driver;
+#endif
+#ifdef CONFIG_OMAP5_DSS_HDMI
+extern struct platform_driver omapdss_hdmi5hw_driver;
+#endif
+
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a598dfdeb585..bf914f2ac99e 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -1,5 +1,6 @@
/*
* HDMI interface DSS driver for TI's OMAP4 family of SoCs.
+ *
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
* Authors: Yong Zhi
* Mythri pk <mythripk@ti.com>
@@ -844,7 +845,7 @@ static const struct of_device_id hdmi_of_match[] = {
{},
};
-static struct platform_driver omapdss_hdmihw_driver = {
+struct platform_driver omapdss_hdmi4hw_driver = {
.probe = hdmi4_probe,
.remove = hdmi4_remove,
.driver = {
@@ -854,13 +855,3 @@ static struct platform_driver omapdss_hdmihw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init hdmi4_init_platform_driver(void)
-{
- return platform_driver_register(&omapdss_hdmihw_driver);
-}
-
-void hdmi4_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omapdss_hdmihw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index b06f9956e733..35ed2add6189 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -1,7 +1,6 @@
/*
- * ti_hdmi_4xxx_ip.c
- *
* HDMI TI81xx, TI38xx, TI OMAP4 etc IP driver Library
+ *
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
* Authors: Yong Zhi
* Mythri pk <mythripk@ti.com>
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index b3221ca5bcd8..689cda41858b 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -1,7 +1,7 @@
/*
* HDMI driver for OMAP5
*
- * Copyright (C) 2014 Texas Instruments Incorporated
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
*
* Authors:
* Yong Zhi
@@ -841,7 +841,7 @@ static const struct of_device_id hdmi_of_match[] = {
{},
};
-static struct platform_driver omapdss_hdmihw_driver = {
+struct platform_driver omapdss_hdmi5hw_driver = {
.probe = hdmi5_probe,
.remove = hdmi5_remove,
.driver = {
@@ -851,13 +851,3 @@ static struct platform_driver omapdss_hdmihw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init hdmi5_init_platform_driver(void)
-{
- return platform_driver_register(&omapdss_hdmihw_driver);
-}
-
-void hdmi5_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omapdss_hdmihw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index ab179ec133c0..09759f8ea7bc 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -1,8 +1,7 @@
/*
* OMAP5 HDMI CORE IP driver library
*
- * Copyright (C) 2014 Texas Instruments Incorporated
- *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Authors:
* Yong Zhi
* Mythri pk
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index a156292b1820..5c14ed851609 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -1,7 +1,7 @@
/*
* HDMI PHY
*
- * Copyright (C) 2013 Texas Instruments Incorporated
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 55bee81f4dd5..08885d7de1e8 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -1,7 +1,7 @@
/*
* HDMI PLL
*
- * Copyright (C) 2013 Texas Instruments Incorporated
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 88034fbe0e9f..806e5fdcfe52 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -1,7 +1,7 @@
/*
* HDMI wrapper
*
- * Copyright (C) 2013 Texas Instruments Incorporated
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index bf626acae271..3bfb95d230e0 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Texas Instruments
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 990422b35784..f8f83e826a56 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2016 Texas Instruments
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -563,6 +563,8 @@ struct omap_dss_driver {
struct videomode *vm);
void (*get_timings)(struct omap_dss_device *dssdev,
struct videomode *vm);
+ void (*get_size)(struct omap_dss_device *dssdev,
+ unsigned int *width, unsigned int *height);
int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
u32 (*get_wss)(struct omap_dss_device *dssdev);
@@ -585,9 +587,6 @@ struct omap_dss_driver {
bool omapdss_is_initialized(void);
-int omap_dss_register_driver(struct omap_dss_driver *);
-void omap_dss_unregister_driver(struct omap_dss_driver *);
-
int omapdss_register_display(struct omap_dss_device *dssdev);
void omapdss_unregister_display(struct omap_dss_device *dssdev);
@@ -595,9 +594,6 @@ struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
void omap_dss_put_device(struct omap_dss_device *dssdev);
#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
-struct omap_dss_device *omap_dss_find_device(void *data,
- int (*match)(struct omap_dss_device *dssdev, void *data));
-
int omap_dss_get_num_overlay_managers(void);
@@ -695,6 +691,8 @@ struct dispc_ops {
int (*get_num_ovls)(void);
int (*get_num_mgrs)(void);
+ u32 (*get_memory_bandwidth_limit)(void);
+
void (*mgr_enable)(enum omap_channel channel, bool enable);
bool (*mgr_is_enabled)(enum omap_channel channel);
u32 (*mgr_get_vsync_irq)(enum omap_channel channel);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 3c572b699ed3..b9afd80ae385 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Texas Instruments Ltd
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
* Author: Archit Taneja <archit@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 9d9d9d42009b..058714b1eb56 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Texas Instruments Incorporated
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index d18ad58c5a19..d8ab31f3a813 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/sdi.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index d58da6f32693..6de9d734ddb9 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -1,6 +1,4 @@
/*
- * linux/drivers/video/omap2/dss/venc.c
- *
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
@@ -857,10 +855,10 @@ static int venc_probe_of(struct platform_device *pdev)
of_node_put(ep);
return 0;
+
err:
of_node_put(ep);
-
- return 0;
+ return r;
}
/* VENC HW IP initialisation */
@@ -986,7 +984,7 @@ static const struct of_device_id venc_of_match[] = {
{},
};
-static struct platform_driver omap_venchw_driver = {
+struct platform_driver omap_venchw_driver = {
.probe = venc_probe,
.remove = venc_remove,
.driver = {
@@ -996,13 +994,3 @@ static struct platform_driver omap_venchw_driver = {
.suppress_bind_attrs = true,
},
};
-
-int __init venc_init_platform_driver(void)
-{
- return platform_driver_register(&omap_venchw_driver);
-}
-
-void venc_uninit_platform_driver(void)
-{
- platform_driver_unregister(&omap_venchw_driver);
-}
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index 38a239cc5e04..bbedac797927 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -1,13 +1,15 @@
/*
-* Copyright (C) 2014 Texas Instruments Ltd
-*
-* This program is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 as published by
-* the Free Software Foundation.
-*
-* You should have received a copy of the GNU General Public License along with
-* this program. If not, see <http://www.gnu.org/licenses/>.
-*/
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
#include <linux/clk.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index aa5ba9ae2191..a0d7b1d905e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_connector.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -149,6 +147,12 @@ static int omap_connector_get_modes(struct drm_connector *connector)
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
+ if (dssdrv->get_size) {
+ dssdrv->get_size(dssdev,
+ &connector->display_info.width_mm,
+ &connector->display_info.height_mm);
+ }
+
n = 1;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
new file mode 100644
index 000000000000..98bbc779b302
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -0,0 +1,37 @@
+/*
+ * omap_connector.h -- OMAP DRM Connector
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_CONNECTOR_H__
+#define __OMAPDRM_CONNECTOR_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+struct omap_dss_device;
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev,
+ struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector);
+bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
+
+#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index cc85c16cbc2a..1b8154e58d18 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_crtc.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -23,6 +21,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
+#include <linux/math64.h>
#include "omap_drv.h"
@@ -400,6 +399,41 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
}
+static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+
+ /* Check for bandwidth limit */
+ if (priv->max_bandwidth) {
+ /*
+ * Estimation for the bandwidth need of a given mode with one
+ * full screen plane:
+ * bandwidth = resolution * 32bpp * (pclk / (vtotal * htotal))
+ * ^^ Refresh rate ^^
+ *
+ * The interlaced mode is taken into account by using the
+ * pixelclock in the calculation.
+ *
+ * The equation is rearranged for 64bit arithmetic.
+ */
+ uint64_t bandwidth = mode->clock * 1000;
+ unsigned int bpp = 4;
+
+ bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
+ bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
+
+ /*
+ * Reject modes which would need more bandwidth if used with one
+ * full resolution plane (most common use case).
+ */
+ if (priv->max_bandwidth < bandwidth)
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -621,6 +655,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
.atomic_flush = omap_crtc_atomic_flush,
.atomic_enable = omap_crtc_atomic_enable,
.atomic_disable = omap_crtc_atomic_disable,
+ .mode_valid = omap_crtc_mode_valid,
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
new file mode 100644
index 000000000000..ad7b007c6174
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -0,0 +1,43 @@
+/*
+ * omap_crtc.h -- OMAP DRM CRTC
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_CRTC_H__
+#define __OMAPDRM_CRTC_H__
+
+#include <linux/types.h>
+
+enum omap_channel;
+
+struct drm_crtc;
+struct drm_device;
+struct drm_plane;
+struct omap_dss_device;
+struct videomode;
+
+struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
+enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
+void omap_crtc_pre_init(void);
+void omap_crtc_pre_uninit(void);
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, struct omap_dss_device *dssdev);
+int omap_crtc_wait_pending(struct drm_crtc *crtc);
+void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus);
+void omap_crtc_vblank_irq(struct drm_crtc *crtc);
+
+#endif /* __OMAPDRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 19b716745623..b42e286616b0 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_debugfs.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 9f32a83ca507..600064d5c25b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
@@ -13,6 +12,7 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+
#ifndef OMAP_DMM_PRIV_H
#define OMAP_DMM_PRIV_H
@@ -59,12 +59,12 @@
#define DMM_IRQSTAT_ERR_UPD_DATA (1<<6)
#define DMM_IRQSTAT_ERR_LUT_MISS (1<<7)
-#define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \
- DMM_IRQ_STAT_ERR_INV_DATA | \
- DMM_IRQ_STAT_ERR_UPD_AREA | \
- DMM_IRQ_STAT_ERR_UPD_CTRL | \
- DMM_IRQ_STAT_ERR_UPD_DATA | \
- DMM_IRQ_STAT_ERR_LUT_MISS)
+#define DMM_IRQSTAT_ERR_MASK (DMM_IRQSTAT_ERR_INV_DSC | \
+ DMM_IRQSTAT_ERR_INV_DATA | \
+ DMM_IRQSTAT_ERR_UPD_AREA | \
+ DMM_IRQSTAT_ERR_UPD_CTRL | \
+ DMM_IRQSTAT_ERR_UPD_DATA | \
+ DMM_IRQSTAT_ERR_LUT_MISS)
#define DMM_PATSTATUS_READY (1<<0)
#define DMM_PATSTATUS_VALID (1<<1)
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index c60a85e82c6d..4be0c94673f5 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -1,11 +1,10 @@
/*
* DMM IOMMU driver support functions for TI OMAP processors.
*
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
*
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
- *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
@@ -121,14 +120,22 @@ static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
while (true) {
r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
err = r & DMM_PATSTATUS_ERR;
- if (err)
+ if (err) {
+ dev_err(dmm->dev,
+ "%s: error (engine%d). PAT_STATUS: 0x%08x\n",
+ __func__, engine->id, r);
return -EFAULT;
+ }
if ((r & wait_mask) == wait_mask)
break;
- if (--i == 0)
+ if (--i == 0) {
+ dev_err(dmm->dev,
+ "%s: timeout (engine%d). PAT_STATUS: 0x%08x\n",
+ __func__, engine->id, r);
return -ETIMEDOUT;
+ }
udelay(1);
}
@@ -158,6 +165,11 @@ static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
for (i = 0; i < dmm->num_engines; i++) {
+ if (status & DMM_IRQSTAT_ERR_MASK)
+ dev_err(dmm->dev,
+ "irq error(engine%d): IRQSTAT 0x%02x\n",
+ i, status & 0xff);
+
if (status & DMM_IRQSTAT_LST) {
if (dmm->engines[i].async)
release_engine(&dmm->engines[i]);
@@ -298,7 +310,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
msecs_to_jiffies(100))) {
dev_err(dmm->dev, "timed out waiting for done\n");
ret = -ETIMEDOUT;
+ goto cleanup;
}
+
+ /* Check the engine status before continue */
+ ret = wait_status(engine, DMM_PATSTATUS_READY |
+ DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
}
cleanup:
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index e83c78372db8..cc78ba4fe6ab 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -1,5 +1,4 @@
/*
- *
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
* Andy Gross <andy.gross@ti.com>
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index cdf5b0601eba..dd68b2556f5b 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_drv.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -46,14 +44,6 @@
* devices
*/
-static void omap_fb_output_poll_changed(struct drm_device *dev)
-{
- struct omap_drm_private *priv = dev->dev_private;
- DBG("dev=%p", dev);
- if (priv->fbdev)
- drm_fb_helper_hotplug_event(priv->fbdev);
-}
-
static void omap_atomic_wait_for_completion(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
@@ -132,7 +122,7 @@ static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs =
static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.fb_create = omap_framebuffer_create,
- .output_poll_changed = omap_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -467,28 +457,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-/**
- * lastclose - clean up after all DRM clients have exited
- * @dev: DRM device
- *
- * Take care of cleaning up after all DRM clients have exited. In the
- * mode setting case, we want to restore the kernel's initial mode (just
- * in case the last client left us in a bad state).
- */
-static void dev_lastclose(struct drm_device *dev)
-{
- struct omap_drm_private *priv = dev->dev_private;
- int ret;
-
- DBG("lastclose: dev=%p", dev);
-
- if (priv->fbdev) {
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
- if (ret)
- DBG("failed to restore crtc mode");
- }
-}
-
static const struct vm_operations_struct omap_gem_vm_ops = {
.fault = omap_gem_fault,
.open = drm_gem_vm_open,
@@ -511,7 +479,7 @@ static struct drm_driver omap_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC | DRIVER_RENDER,
.open = dev_open,
- .lastclose = dev_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = omap_debugfs_init,
#endif
@@ -593,6 +561,11 @@ static int pdev_probe(struct platform_device *pdev)
ddev->dev_private = priv;
platform_set_drvdata(pdev, ddev);
+ /* Get memory bandwidth limits */
+ if (priv->dispc_ops->get_memory_bandwidth_limit)
+ priv->max_bandwidth =
+ priv->dispc_ops->get_memory_bandwidth_limit();
+
omap_gem_init(ddev);
ret = omap_modeset_init(ddev);
@@ -740,7 +713,7 @@ static int omap_drm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
- return omap_gem_resume(dev);
+ return omap_gem_resume(drm_dev);
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 4bd1e9070b31..0ac97fe09f9b 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_drv.h
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -17,8 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __OMAP_DRV_H__
-#define __OMAP_DRV_H__
+#ifndef __OMAPDRM_DRV_H__
+#define __OMAPDRM_DRV_H__
#include <linux/module.h>
#include <linux/types.h>
@@ -31,6 +29,15 @@
#include "dss/omapdss.h"
+#include "omap_connector.h"
+#include "omap_crtc.h"
+#include "omap_encoder.h"
+#include "omap_fb.h"
+#include "omap_fbdev.h"
+#include "omap_gem.h"
+#include "omap_irq.h"
+#include "omap_plane.h"
+
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
@@ -38,14 +45,6 @@
struct omap_drm_usergart;
-/* For KMS code that needs to wait for a certain # of IRQs:
- */
-struct omap_irq_wait;
-struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
- uint32_t irqmask, int count);
-int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
- unsigned long timeout);
-
struct omap_drm_private {
uint32_t omaprev;
@@ -83,117 +82,12 @@ struct omap_drm_private {
spinlock_t wait_lock; /* protects the wait_list */
struct list_head wait_list; /* list of omap_irq_wait */
uint32_t irq_mask; /* enabled irqs in addition to wait_list */
+
+ /* memory bandwidth limit if it is needed on the platform */
+ unsigned int max_bandwidth;
};
-#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
-void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
-void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
-void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
-#endif
-
-#ifdef CONFIG_PM
-int omap_gem_resume(struct device *dev);
-#endif
-
-int omap_irq_enable_vblank(struct drm_crtc *crtc);
-void omap_irq_disable_vblank(struct drm_crtc *crtc);
-void omap_drm_irq_uninstall(struct drm_device *dev);
-int omap_drm_irq_install(struct drm_device *dev);
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
-void omap_fbdev_free(struct drm_device *dev);
-#else
-static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
-{
- return NULL;
-}
-static inline void omap_fbdev_free(struct drm_device *dev)
-{
-}
-#endif
-
-struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
-enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
-void omap_crtc_pre_init(void);
-void omap_crtc_pre_uninit(void);
-struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, struct omap_dss_device *dssdev);
-int omap_crtc_wait_pending(struct drm_crtc *crtc);
-void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus);
-void omap_crtc_vblank_irq(struct drm_crtc *crtc);
-
-struct drm_plane *omap_plane_init(struct drm_device *dev,
- int idx, enum drm_plane_type type,
- u32 possible_crtcs);
-void omap_plane_install_properties(struct drm_plane *plane,
- struct drm_mode_object *obj);
-
-struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *dssdev);
-
-struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev,
- struct drm_encoder *encoder);
-struct drm_encoder *omap_connector_attached_encoder(
- struct drm_connector *connector);
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-
-struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
- struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
-struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-int omap_framebuffer_pin(struct drm_framebuffer *fb);
-void omap_framebuffer_unpin(struct drm_framebuffer *fb);
-void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
- struct drm_plane_state *state, struct omap_overlay_info *info);
-struct drm_connector *omap_framebuffer_get_next_connector(
- struct drm_framebuffer *fb, struct drm_connector *from);
-bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
-
-void omap_gem_init(struct drm_device *dev);
-void omap_gem_deinit(struct drm_device *dev);
-
-struct drm_gem_object *omap_gem_new(struct drm_device *dev,
- union omap_gem_size gsize, uint32_t flags);
-struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
- struct sg_table *sgt);
-int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
-void omap_gem_free_object(struct drm_gem_object *obj);
-void *omap_gem_vaddr(struct drm_gem_object *obj);
-int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
-int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int omap_gem_mmap_obj(struct drm_gem_object *obj,
- struct vm_area_struct *vma);
-int omap_gem_fault(struct vm_fault *vmf);
-int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
-void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
-void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
- enum dma_data_direction dir);
-int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr);
-void omap_gem_unpin(struct drm_gem_object *obj);
-int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
- bool remap);
-int omap_gem_put_pages(struct drm_gem_object *obj);
-uint32_t omap_gem_flags(struct drm_gem_object *obj);
-int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
- int x, int y, dma_addr_t *dma_addr);
-uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
-size_t omap_gem_mmap_size(struct drm_gem_object *obj);
-int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
-
-struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
-struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
- struct dma_buf *buffer);
-
-/* map crtc to vblank mask */
-struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
-
-#endif /* __OMAP_DRV_H__ */
+
+#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 624f5b50b755..fcdf4b0a8eec 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_encoder.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h
new file mode 100644
index 000000000000..d2f308bec494
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.h
@@ -0,0 +1,33 @@
+/*
+ * omap_encoder.h -- OMAP DRM Encoder
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_ENCODER_H__
+#define __OMAPDRM_ENCODER_H__
+
+struct drm_device;
+struct drm_encoder;
+struct omap_dss_device;
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_dss_device *dssdev);
+
+/* map crtc to vblank mask */
+struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
+
+#endif /* __OMAPDRM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index b1a762b70cbf..b2539a90e1a4 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_fb.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h
new file mode 100644
index 000000000000..94ad5f9e4404
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_fb.h
@@ -0,0 +1,46 @@
+/*
+ * omap_fb.h -- OMAP DRM Framebuffer
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_FB_H__
+#define __OMAPDRM_FB_H__
+
+struct drm_connector;
+struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_mode_fb_cmd2;
+struct drm_plane_state;
+struct omap_overlay_info;
+struct seq_file;
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+int omap_framebuffer_pin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
+ struct drm_plane_state *state, struct omap_overlay_info *info);
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from);
+bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb);
+void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+
+#endif /* __OMAPDRM_FB_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 9273118040b7..fb309d19ca1b 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_fbdev.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -84,9 +82,6 @@ static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
- /* Note: to properly handle manual update displays, we wrap the
- * basic fbdev ops which write to the framebuffer
- */
.fb_read = drm_fb_helper_sys_read,
.fb_write = drm_fb_helper_sys_write,
.fb_fillrect = drm_fb_helper_sys_fillrect,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.h b/drivers/gpu/drm/omapdrm/omap_fbdev.h
new file mode 100644
index 000000000000..1f5ba0996a1a
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.h
@@ -0,0 +1,39 @@
+/*
+ * omap_fbdev.h -- OMAP DRM FBDEV Compatibility
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_FBDEV_H__
+#define __OMAPDRM_FBDEV_H__
+
+struct drm_device;
+struct drm_fb_helper;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_free(struct drm_device *dev);
+#else
+static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+{
+ return NULL;
+}
+static inline void omap_fbdev_free(struct drm_device *dev)
+{
+}
+#endif
+
+#endif /* __OMAPDRM_FBDEV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 5c5c86ddd6f4..443469d4fa46 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_gem.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
@@ -996,10 +994,9 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
#ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */
-int omap_gem_resume(struct device *dev)
+int omap_gem_resume(struct drm_device *dev)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct omap_drm_private *priv = drm_dev->dev_private;
+ struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
int ret = 0;
@@ -1012,7 +1009,7 @@ int omap_gem_resume(struct device *dev)
omap_obj->pages, npages,
omap_obj->roll, true);
if (ret) {
- dev_err(dev, "could not repin: %d\n", ret);
+ dev_err(dev->dev, "could not repin: %d\n", ret);
return ret;
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
new file mode 100644
index 000000000000..35fa690b3d90
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -0,0 +1,99 @@
+/*
+ * omap_gem.h -- OMAP DRM GEM Object Management
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_GEM_H__
+#define __OMAPDRM_GEM_H__
+
+#include <linux/types.h>
+
+enum dma_data_direction;
+
+struct dma_buf;
+struct drm_device;
+struct drm_file;
+struct drm_gem_object;
+struct drm_mode_create_dumb;
+struct file;
+struct list_head;
+struct page;
+struct seq_file;
+struct vm_area_struct;
+struct vm_fault;
+
+union omap_gem_size;
+
+/* Initialization and Cleanup */
+void omap_gem_init(struct drm_device *dev);
+void omap_gem_deinit(struct drm_device *dev);
+
+#ifdef CONFIG_PM
+int omap_gem_resume(struct drm_device *dev);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
+#endif
+
+/* GEM Object Creation and Deletion */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags);
+struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+
+/* Dumb Buffers Interface */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+/* mmap() Interface */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+
+/* PRIME Interface */
+struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj, int flags);
+struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *buffer);
+
+int omap_gem_fault(struct vm_fault *vmf);
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
+void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
+ enum dma_data_direction dir);
+int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr);
+void omap_gem_unpin(struct drm_gem_object *obj);
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
+ bool remap);
+int omap_gem_put_pages(struct drm_gem_object *obj);
+
+uint32_t omap_gem_flags(struct drm_gem_object *obj);
+int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
+ int x, int y, dma_addr_t *dma_addr);
+int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
+
+#endif /* __OMAPDRM_GEM_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index afdbad5c866a..8e41d649e248 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 013b0bba712f..53ba424823b2 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_irq.c
- *
- * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.h b/drivers/gpu/drm/omapdrm/omap_irq.h
new file mode 100644
index 000000000000..606c09932bc0
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_irq.h
@@ -0,0 +1,39 @@
+/*
+ * omap_irq.h -- OMAP DRM IRQ Handling
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_IRQ_H__
+#define __OMAPDRM_IRQ_H__
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_device;
+struct omap_irq_wait;
+
+int omap_irq_enable_vblank(struct drm_crtc *crtc);
+void omap_irq_disable_vblank(struct drm_crtc *crtc);
+void omap_drm_irq_uninstall(struct drm_device *dev);
+int omap_drm_irq_install(struct drm_device *dev);
+
+struct omap_irq_wait *omap_irq_wait_init(struct drm_device *dev,
+ uint32_t irqmask, int count);
+int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
+ unsigned long timeout);
+
+#endif /* __OMAPDRM_IRQ_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 15e5d5d325c6..7d789d1551a1 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -1,7 +1,5 @@
/*
- * drivers/gpu/drm/omapdrm/omap_plane.c
- *
- * Copyright (C) 2011 Texas Instruments
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.h b/drivers/gpu/drm/omapdrm/omap_plane.h
new file mode 100644
index 000000000000..dc5e82ad061d
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/omap_plane.h
@@ -0,0 +1,37 @@
+/*
+ * omap_plane.h -- OMAP DRM Plane
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAPDRM_PLANE_H__
+#define __OMAPDRM_PLANE_H__
+
+#include <linux/types.h>
+
+enum drm_plane_type;
+
+struct drm_device;
+struct drm_mode_object;
+struct drm_plane;
+
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+ int idx, enum drm_plane_type type,
+ u32 possible_crtcs);
+void omap_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+
+#endif /* __OMAPDRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index c10fdfc0930f..661362d072f7 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -1,13 +1,11 @@
/*
- * tcm-sita.c
- *
* SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
*
* Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
* Lajos Molnar <molnar@ti.com>
* Andy Gross <andy.gross@ti.com>
*
- * Copyright (C) 2012 Texas Instruments, Inc.
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
index 0444f868671c..460e63dbf825 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.h
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.h
@@ -1,11 +1,9 @@
/*
- * tcm_sita.h
- *
* SImple Tiler Allocator (SiTA) private structures.
*
+ * Copyright (C) 2009-2011 Texas Instruments Incorporated - http://www.ti.com/
* Author: Ravi Ramachandra <r.ramachandra@ti.com>
*
- * Copyright (C) 2009-2011 Texas Instruments, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/gpu/drm/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index ef7df7d6fc84..d8a369a4f269 100644
--- a/drivers/gpu/drm/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
@@ -1,6 +1,4 @@
/*
- * tcm.h
- *
* TILER container manager specification and support functions for TI
* TILER driver.
*
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 726f3fb3312d..6ba4031f3919 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -28,6 +28,14 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_ILITEK_IL9322
+ tristate "Ilitek ILI9322 320x240 QVGA panels"
+ depends on OF && SPI
+ select REGMAP
+ help
+ Say Y here if you want to enable support for Ilitek IL9322
+ QVGA (320x240) RGB, YUV and ITU-T BT.656 panels.
+
config DRM_PANEL_INNOLUX_P079ZCA
tristate "Innolux P079ZCA panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 2c4e1a93e05f..6d251ebc568c 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
new file mode 100644
index 000000000000..b4ec0ecff807
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -0,0 +1,962 @@
+/*
+ * Ilitek ILI9322 TFT LCD drm_panel driver.
+ *
+ * This panel can be configured to support:
+ * - 8-bit serial RGB interface
+ * - 24-bit parallel RGB interface
+ * - 8-bit ITU-R BT.601 interface
+ * - 8-bit ITU-R BT.656 interface
+ * - Up to 320RGBx240 dots resolution TFT LCD displays
+ * - Scaling, brightness and contrast
+ *
+ * The scaling means that the display accepts a 640x480 or 720x480
+ * input and rescales it to fit to the 320x240 display. So what we
+ * present to the system is something else than what comes out on the
+ * actual display.
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Derived from drivers/drm/gpu/panel/panel-samsung-ld9040.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <linux/of_device.h>
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#define ILI9322_CHIP_ID 0x00
+#define ILI9322_CHIP_ID_MAGIC 0x96
+
+/*
+ * Voltage on the communication interface, from 0.7 (0x00)
+ * to 1.32 (0x1f) times the VREG1OUT voltage in 2% increments.
+ * 1.00 (0x0f) is the default.
+ */
+#define ILI9322_VCOM_AMP 0x01
+
+/*
+ * High voltage on the communication signals, from 0.37 (0x00) to
+ * 1.0 (0x3f) times the VREGOUT1 voltage in 1% increments.
+ * 0.83 (0x2e) is the default.
+ */
+#define ILI9322_VCOM_HIGH 0x02
+
+/*
+ * VREG1 voltage regulator from 3.6V (0x00) to 6.0V (0x18) in 0.1V
+ * increments. 5.4V (0x12) is the default. This is the reference
+ * voltage for the VCOM levels and the greyscale level.
+ */
+#define ILI9322_VREG1_VOLTAGE 0x03
+
+/* Describes the incoming signal */
+#define ILI9322_ENTRY 0x06
+/* 0 = right-to-left, 1 = left-to-right (default), horizontal flip */
+#define ILI9322_ENTRY_HDIR BIT(0)
+/* 0 = down-to-up, 1 = up-to-down (default), vertical flip */
+#define ILI9322_ENTRY_VDIR BIT(1)
+/* NTSC, PAL or autodetect */
+#define ILI9322_ENTRY_NTSC (0 << 2)
+#define ILI9322_ENTRY_PAL (1 << 2)
+#define ILI9322_ENTRY_AUTODETECT (3 << 2)
+/* Input format */
+#define ILI9322_ENTRY_SERIAL_RGB_THROUGH (0 << 4)
+#define ILI9322_ENTRY_SERIAL_RGB_ALIGNED (1 << 4)
+#define ILI9322_ENTRY_SERIAL_RGB_DUMMY_320X240 (2 << 4)
+#define ILI9322_ENTRY_SERIAL_RGB_DUMMY_360X240 (3 << 4)
+#define ILI9322_ENTRY_DISABLE_1 (4 << 4)
+#define ILI9322_ENTRY_PARALLEL_RGB_THROUGH (5 << 4)
+#define ILI9322_ENTRY_PARALLEL_RGB_ALIGNED (6 << 4)
+#define ILI9322_ENTRY_YUV_640Y_320CBCR_25_54_MHZ (7 << 4)
+#define ILI9322_ENTRY_YUV_720Y_360CBCR_27_MHZ (8 << 4)
+#define ILI9322_ENTRY_DISABLE_2 (9 << 4)
+#define ILI9322_ENTRY_ITU_R_BT_656_720X360 (10 << 4)
+#define ILI9322_ENTRY_ITU_R_BT_656_640X320 (11 << 4)
+
+/* Power control */
+#define ILI9322_POW_CTRL 0x07
+#define ILI9322_POW_CTRL_STB BIT(0) /* 0 = standby, 1 = normal */
+#define ILI9322_POW_CTRL_VGL BIT(1) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_VGH BIT(2) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_DDVDH BIT(3) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_VCOM BIT(4) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_VCL BIT(5) /* 0 = off, 1 = on */
+#define ILI9322_POW_CTRL_AUTO BIT(6) /* 0 = interactive, 1 = auto */
+#define ILI9322_POW_CTRL_STANDBY (ILI9322_POW_CTRL_VGL | \
+ ILI9322_POW_CTRL_VGH | \
+ ILI9322_POW_CTRL_DDVDH | \
+ ILI9322_POW_CTRL_VCL | \
+ ILI9322_POW_CTRL_AUTO | \
+ BIT(7))
+#define ILI9322_POW_CTRL_DEFAULT (ILI9322_POW_CTRL_STANDBY | \
+ ILI9322_POW_CTRL_STB)
+
+/* Vertical back porch bits 0..5 */
+#define ILI9322_VBP 0x08
+
+/* Horizontal back porch, 8 bits */
+#define ILI9322_HBP 0x09
+
+/*
+ * Polarity settings:
+ * 1 = positive polarity
+ * 0 = negative polarity
+ */
+#define ILI9322_POL 0x0a
+#define ILI9322_POL_DCLK BIT(0) /* 1 default */
+#define ILI9322_POL_HSYNC BIT(1) /* 0 default */
+#define ILI9322_POL_VSYNC BIT(2) /* 0 default */
+#define ILI9322_POL_DE BIT(3) /* 1 default */
+/*
+ * 0 means YCBCR are ordered Cb0,Y0,Cr0,Y1,Cb2,Y2,Cr2,Y3 (default)
+ * in RGB mode this means RGB comes in RGBRGB
+ * 1 means YCBCR are ordered Cr0,Y0,Cb0,Y1,Cr2,Y2,Cb2,Y3
+ * in RGB mode this means RGB comes in BGRBGR
+ */
+#define ILI9322_POL_YCBCR_MODE BIT(4)
+/* Formula A for YCbCR->RGB = 0, Formula B = 1 */
+#define ILI9322_POL_FORMULA BIT(5)
+/* Reverse polarity: 0 = 0..255, 1 = 255..0 */
+#define ILI9322_POL_REV BIT(6)
+
+#define ILI9322_IF_CTRL 0x0b
+#define ILI9322_IF_CTRL_HSYNC_VSYNC 0x00
+#define ILI9322_IF_CTRL_HSYNC_VSYNC_DE BIT(2)
+#define ILI9322_IF_CTRL_DE_ONLY BIT(3)
+#define ILI9322_IF_CTRL_SYNC_DISABLED (BIT(2) | BIT(3))
+#define ILI9322_IF_CTRL_LINE_INVERSION BIT(0) /* Not set means frame inv */
+
+#define ILI9322_GLOBAL_RESET 0x04
+#define ILI9322_GLOBAL_RESET_ASSERT 0x00 /* bit 0 = 0 -> reset */
+
+/*
+ * 4+4 bits of negative and positive gamma correction
+ * Upper nybble, bits 4-7 are negative gamma
+ * Lower nybble, bits 0-3 are positive gamma
+ */
+#define ILI9322_GAMMA_1 0x10
+#define ILI9322_GAMMA_2 0x11
+#define ILI9322_GAMMA_3 0x12
+#define ILI9322_GAMMA_4 0x13
+#define ILI9322_GAMMA_5 0x14
+#define ILI9322_GAMMA_6 0x15
+#define ILI9322_GAMMA_7 0x16
+#define ILI9322_GAMMA_8 0x17
+
+/**
+ * enum ili9322_input - the format of the incoming signal to the panel
+ *
+ * The panel can be connected to various input streams and four of them can
+ * be selected by electronic straps on the display. However it is possible
+ * to select another mode or override the electronic default with this
+ * setting.
+ */
+enum ili9322_input {
+ ILI9322_INPUT_SRGB_THROUGH = 0x0,
+ ILI9322_INPUT_SRGB_ALIGNED = 0x1,
+ ILI9322_INPUT_SRGB_DUMMY_320X240 = 0x2,
+ ILI9322_INPUT_SRGB_DUMMY_360X240 = 0x3,
+ ILI9322_INPUT_DISABLED_1 = 0x4,
+ ILI9322_INPUT_PRGB_THROUGH = 0x5,
+ ILI9322_INPUT_PRGB_ALIGNED = 0x6,
+ ILI9322_INPUT_YUV_640X320_YCBCR = 0x7,
+ ILI9322_INPUT_YUV_720X360_YCBCR = 0x8,
+ ILI9322_INPUT_DISABLED_2 = 0x9,
+ ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR = 0xa,
+ ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR = 0xb,
+ ILI9322_INPUT_UNKNOWN = 0xc,
+};
+
+const char *ili9322_inputs[] = {
+ "8 bit serial RGB through",
+ "8 bit serial RGB aligned",
+ "8 bit serial RGB dummy 320x240",
+ "8 bit serial RGB dummy 360x240",
+ "disabled 1",
+ "24 bit parallel RGB through",
+ "24 bit parallel RGB aligned",
+ "24 bit YUV 640Y 320CbCr",
+ "24 bit YUV 720Y 360CbCr",
+ "disabled 2",
+ "8 bit ITU-R BT.656 720Y 360CbCr",
+ "8 bit ITU-R BT.656 640Y 320CbCr",
+};
+
+/**
+ * struct ili9322_config - the system specific ILI9322 configuration
+ * @width_mm: physical panel width [mm]
+ * @height_mm: physical panel height [mm]
+ * @flip_horizontal: flip the image horizontally (right-to-left scan)
+ * (only in RGB and YUV modes)
+ * @flip_vertical: flip the image vertically (down-to-up scan)
+ * (only in RGB and YUV modes)
+ * @input: the input/entry type used in this system, if this is set to
+ * ILI9322_INPUT_UNKNOWN the driver will try to figure it out by probing
+ * the hardware
+ * @vreg1out_mv: the output in microvolts for the VREGOUT1 regulator used
+ * to drive the physical display. Valid ranges are 3600 thru 6000 in 100
+ * microvolt increments. If not specified, hardware defaults will be
+ * used (4.5V).
+ * @vcom_high_percent: the percentage of VREGOUT1 used for the peak
+ * voltage on the communications link. Valid ranges are 37 thru 100
+ * percent. If not specified, hardware defaults will be used (91%).
+ * @vcom_amplitude_percent: the percentage of VREGOUT1 used for the
+ * peak-to-peak amplitude of the communcation signals to the physical
+ * display. Valid ranges are 70 thru 132 percent in increments if two
+ * percent. Odd percentages will be truncated. If not specified, hardware
+ * defaults will be used (114%).
+ * @dclk_active_high: data/pixel clock active high, data will be clocked
+ * in on the rising edge of the DCLK (this is usually the case).
+ * @syncmode: The synchronization mode, what sync signals are emitted.
+ * See the enum for details.
+ * @de_active_high: DE (data entry) is active high
+ * @hsync_active_high: HSYNC is active high
+ * @vsync_active_high: VSYNC is active high
+ * @gamma_corr_pos: a set of 8 nybbles describing positive
+ * gamma correction for voltages V1 thru V8. Valid range 0..15
+ * @gamma_corr_neg: a set of 8 nybbles describing negative
+ * gamma correction for voltages V1 thru V8. Valid range 0..15
+ *
+ * These adjust what grayscale voltage will be output for input data V1 = 0,
+ * V2 = 16, V3 = 48, V4 = 96, V5 = 160, V6 = 208, V7 = 240 and V8 = 255.
+ * The curve is shaped like this:
+ *
+ * ^
+ * | V8
+ * | V7
+ * | V6
+ * | V5
+ * | V4
+ * | V3
+ * | V2
+ * | V1
+ * +----------------------------------------------------------->
+ * 0 16 48 96 160 208 240 255
+ *
+ * The negative and postive gamma values adjust the V1 thru V8 up/down
+ * according to the datasheet specifications. This is a property of the
+ * physical display connected to the display controller and may vary.
+ * If defined, both arrays must be supplied in full. If the properties
+ * are not supplied, hardware defaults will be used.
+ */
+struct ili9322_config {
+ u32 width_mm;
+ u32 height_mm;
+ bool flip_horizontal;
+ bool flip_vertical;
+ enum ili9322_input input;
+ u32 vreg1out_mv;
+ u32 vcom_high_percent;
+ u32 vcom_amplitude_percent;
+ bool dclk_active_high;
+ bool de_active_high;
+ bool hsync_active_high;
+ bool vsync_active_high;
+ u8 syncmode;
+ u8 gamma_corr_pos[8];
+ u8 gamma_corr_neg[8];
+};
+
+struct ili9322 {
+ struct device *dev;
+ const struct ili9322_config *conf;
+ struct drm_panel panel;
+ struct regmap *regmap;
+ struct regulator_bulk_data supplies[3];
+ struct gpio_desc *reset_gpio;
+ enum ili9322_input input;
+ struct videomode vm;
+ u8 gamma[8];
+ u8 vreg1out;
+ u8 vcom_high;
+ u8 vcom_amplitude;
+};
+
+static inline struct ili9322 *panel_to_ili9322(struct drm_panel *panel)
+{
+ return container_of(panel, struct ili9322, panel);
+}
+
+static int ili9322_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 buf[2];
+
+ /* Clear bit 7 to write */
+ memcpy(buf, data, 2);
+ buf[0] &= ~0x80;
+
+ dev_dbg(dev, "WRITE: %02x %02x\n", buf[0], buf[1]);
+ return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int ili9322_regmap_spi_read(void *context, const void *reg,
+ size_t reg_size, void *val, size_t val_size)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ u8 buf[1];
+
+ /* Set bit 7 to 1 to read */
+ memcpy(buf, reg, 1);
+ dev_dbg(dev, "READ: %02x reg size = %zu, val size = %zu\n",
+ buf[0], reg_size, val_size);
+ buf[0] |= 0x80;
+
+ return spi_write_then_read(spi, buf, 1, val, 1);
+}
+
+static struct regmap_bus ili9322_regmap_bus = {
+ .write = ili9322_regmap_spi_write,
+ .read = ili9322_regmap_spi_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static bool ili9322_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static bool ili9322_writeable_reg(struct device *dev, unsigned int reg)
+{
+ /* Just register 0 is read-only */
+ if (reg == 0x00)
+ return false;
+ return true;
+}
+
+const struct regmap_config ili9322_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x44,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = ili9322_volatile_reg,
+ .writeable_reg = ili9322_writeable_reg,
+};
+
+static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
+{
+ struct drm_connector *connector = panel->connector;
+ u8 reg;
+ int ret;
+ int i;
+
+ /* Reset display */
+ ret = regmap_write(ili->regmap, ILI9322_GLOBAL_RESET,
+ ILI9322_GLOBAL_RESET_ASSERT);
+ if (ret) {
+ dev_err(ili->dev, "can't issue GRESET (%d)\n", ret);
+ return ret;
+ }
+
+ /* Set up the main voltage regulator */
+ if (ili->vreg1out != U8_MAX) {
+ ret = regmap_write(ili->regmap, ILI9322_VREG1_VOLTAGE,
+ ili->vreg1out);
+ if (ret) {
+ dev_err(ili->dev, "can't set up VREG1OUT (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (ili->vcom_amplitude != U8_MAX) {
+ ret = regmap_write(ili->regmap, ILI9322_VCOM_AMP,
+ ili->vcom_amplitude);
+ if (ret) {
+ dev_err(ili->dev,
+ "can't set up VCOM amplitude (%d)\n", ret);
+ return ret;
+ }
+ };
+
+ if (ili->vcom_high != U8_MAX) {
+ ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH,
+ ili->vcom_high);
+ if (ret) {
+ dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret);
+ return ret;
+ }
+ };
+
+ /* Set up gamma correction */
+ for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
+ ret = regmap_write(ili->regmap, ILI9322_GAMMA_1 + i,
+ ili->gamma[i]);
+ if (ret) {
+ dev_err(ili->dev,
+ "can't write gamma V%d to 0x%02x (%d)\n",
+ i + 1, ILI9322_GAMMA_1 + i, ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Polarity and inverted color order for RGB input.
+ * None of this applies in the BT.656 mode.
+ */
+ if (ili->conf->dclk_active_high) {
+ reg = ILI9322_POL_DCLK;
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ } else {
+ reg = 0;
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ }
+ if (ili->conf->de_active_high) {
+ reg |= ILI9322_POL_DE;
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_DE_HIGH;
+ } else {
+ connector->display_info.bus_flags |=
+ DRM_BUS_FLAG_DE_LOW;
+ }
+ if (ili->conf->hsync_active_high)
+ reg |= ILI9322_POL_HSYNC;
+ if (ili->conf->vsync_active_high)
+ reg |= ILI9322_POL_VSYNC;
+ ret = regmap_write(ili->regmap, ILI9322_POL, reg);
+ if (ret) {
+ dev_err(ili->dev, "can't write POL register (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Set up interface control.
+ * This is not used in the BT.656 mode (no H/Vsync or DE signals).
+ */
+ reg = ili->conf->syncmode;
+ reg |= ILI9322_IF_CTRL_LINE_INVERSION;
+ ret = regmap_write(ili->regmap, ILI9322_IF_CTRL, reg);
+ if (ret) {
+ dev_err(ili->dev, "can't write IF CTRL register (%d)\n", ret);
+ return ret;
+ }
+
+ /* Set up the input mode */
+ reg = (ili->input << 4);
+ /* These are inverted, setting to 1 is the default, clearing flips */
+ if (!ili->conf->flip_horizontal)
+ reg |= ILI9322_ENTRY_HDIR;
+ if (!ili->conf->flip_vertical)
+ reg |= ILI9322_ENTRY_VDIR;
+ reg |= ILI9322_ENTRY_AUTODETECT;
+ ret = regmap_write(ili->regmap, ILI9322_ENTRY, reg);
+ if (ret) {
+ dev_err(ili->dev, "can't write ENTRY reg (%d)\n", ret);
+ return ret;
+ }
+ dev_info(ili->dev, "display is in %s mode, syncmode %02x\n",
+ ili9322_inputs[ili->input],
+ ili->conf->syncmode);
+
+ dev_info(ili->dev, "initialized display\n");
+
+ return 0;
+}
+
+/*
+ * This power-on sequence if from the datasheet, page 57.
+ */
+static int ili9322_power_on(struct ili9322 *ili)
+{
+ int ret;
+
+ /* Assert RESET */
+ gpiod_set_value(ili->reset_gpio, 1);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ili->supplies), ili->supplies);
+ if (ret < 0) {
+ dev_err(ili->dev, "unable to enable regulators\n");
+ return ret;
+ }
+ msleep(20);
+
+ /* De-assert RESET */
+ gpiod_set_value(ili->reset_gpio, 0);
+
+ msleep(10);
+
+ return 0;
+}
+
+static int ili9322_power_off(struct ili9322 *ili)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ili->supplies), ili->supplies);
+}
+
+static int ili9322_disable(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ int ret;
+
+ ret = regmap_write(ili->regmap, ILI9322_POW_CTRL,
+ ILI9322_POW_CTRL_STANDBY);
+ if (ret) {
+ dev_err(ili->dev, "unable to go to standby mode\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ili9322_unprepare(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+
+ return ili9322_power_off(ili);
+}
+
+static int ili9322_prepare(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ int ret;
+
+ ret = ili9322_power_on(ili);
+ if (ret < 0)
+ return ret;
+
+ ret = ili9322_init(panel, ili);
+ if (ret < 0)
+ ili9322_unprepare(panel);
+
+ return ret;
+}
+
+static int ili9322_enable(struct drm_panel *panel)
+{
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ int ret;
+
+ ret = regmap_write(ili->regmap, ILI9322_POW_CTRL,
+ ILI9322_POW_CTRL_DEFAULT);
+ if (ret) {
+ dev_err(ili->dev, "unable to enable panel\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Serial RGB modes */
+static const struct drm_display_mode srgb_320x240_mode = {
+ .clock = 2453500,
+ .hdisplay = 320,
+ .hsync_start = 320 + 359,
+ .hsync_end = 320 + 359 + 1,
+ .htotal = 320 + 359 + 1 + 241,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 1,
+ .vtotal = 262,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static const struct drm_display_mode srgb_360x240_mode = {
+ .clock = 2700000,
+ .hdisplay = 360,
+ .hsync_start = 360 + 35,
+ .hsync_end = 360 + 35 + 1,
+ .htotal = 360 + 35 + 1 + 241,
+ .vdisplay = 240,
+ .vsync_start = 240 + 21,
+ .vsync_end = 240 + 21 + 1,
+ .vtotal = 262,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* This is the only mode listed for parallel RGB in the datasheet */
+static const struct drm_display_mode prgb_320x240_mode = {
+ .clock = 6400000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 38,
+ .hsync_end = 320 + 38 + 1,
+ .htotal = 320 + 38 + 1 + 50,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 1,
+ .vtotal = 262,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* YUV modes */
+static const struct drm_display_mode yuv_640x320_mode = {
+ .clock = 2454000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 252,
+ .hsync_end = 640 + 252 + 1,
+ .htotal = 640 + 252 + 1 + 28,
+ .vdisplay = 320,
+ .vsync_start = 320 + 4,
+ .vsync_end = 320 + 4 + 1,
+ .vtotal = 320 + 4 + 1 + 18,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static const struct drm_display_mode yuv_720x360_mode = {
+ .clock = 2700000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 252,
+ .hsync_end = 720 + 252 + 1,
+ .htotal = 720 + 252 + 1 + 24,
+ .vdisplay = 360,
+ .vsync_start = 360 + 4,
+ .vsync_end = 360 + 4 + 1,
+ .vtotal = 360 + 4 + 1 + 18,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* BT.656 VGA mode, 640x480 */
+static const struct drm_display_mode itu_r_bt_656_640_mode = {
+ .clock = 2454000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 3,
+ .hsync_end = 640 + 3 + 1,
+ .htotal = 640 + 3 + 1 + 272,
+ .vdisplay = 480,
+ .vsync_start = 480 + 4,
+ .vsync_end = 480 + 4 + 1,
+ .vtotal = 500,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+/* BT.656 D1 mode 720x480 */
+static const struct drm_display_mode itu_r_bt_656_720_mode = {
+ .clock = 2700000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 3,
+ .hsync_end = 720 + 3 + 1,
+ .htotal = 720 + 3 + 1 + 272,
+ .vdisplay = 480,
+ .vsync_start = 480 + 4,
+ .vsync_end = 480 + 4 + 1,
+ .vtotal = 500,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static int ili9322_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct ili9322 *ili = panel_to_ili9322(panel);
+ struct drm_display_mode *mode;
+
+ strncpy(connector->display_info.name, "ILI9322 TFT LCD driver\0",
+ DRM_DISPLAY_INFO_LEN);
+ connector->display_info.width_mm = ili->conf->width_mm;
+ connector->display_info.height_mm = ili->conf->height_mm;
+
+ switch (ili->input) {
+ case ILI9322_INPUT_SRGB_DUMMY_320X240:
+ mode = drm_mode_duplicate(panel->drm, &srgb_320x240_mode);
+ break;
+ case ILI9322_INPUT_SRGB_DUMMY_360X240:
+ mode = drm_mode_duplicate(panel->drm, &srgb_360x240_mode);
+ break;
+ case ILI9322_INPUT_PRGB_THROUGH:
+ case ILI9322_INPUT_PRGB_ALIGNED:
+ mode = drm_mode_duplicate(panel->drm, &prgb_320x240_mode);
+ break;
+ case ILI9322_INPUT_YUV_640X320_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &yuv_640x320_mode);
+ break;
+ case ILI9322_INPUT_YUV_720X360_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &yuv_720x360_mode);
+ break;
+ case ILI9322_INPUT_ITU_R_BT656_720X360_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_720_mode);
+ break;
+ case ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR:
+ mode = drm_mode_duplicate(panel->drm, &itu_r_bt_656_640_mode);
+ break;
+ default:
+ mode = NULL;
+ break;
+ }
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ /*
+ * This is the preferred mode because most people are going
+ * to want to use the display with VGA type graphics.
+ */
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ /* Set up the polarity */
+ if (ili->conf->hsync_active_high)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ if (ili->conf->vsync_active_high)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ mode->flags |= DRM_MODE_FLAG_NVSYNC;
+
+ mode->width_mm = ili->conf->width_mm;
+ mode->height_mm = ili->conf->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs ili9322_drm_funcs = {
+ .disable = ili9322_disable,
+ .unprepare = ili9322_unprepare,
+ .prepare = ili9322_prepare,
+ .enable = ili9322_enable,
+ .get_modes = ili9322_get_modes,
+};
+
+static int ili9322_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ili9322 *ili;
+ const struct regmap_config *regmap_config;
+ u8 gamma;
+ u32 val;
+ int ret;
+ int i;
+
+ ili = devm_kzalloc(dev, sizeof(struct ili9322), GFP_KERNEL);
+ if (!ili)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ili);
+
+ ili->dev = dev;
+
+ /*
+ * Every new incarnation of this display must have a unique
+ * data entry for the system in this driver.
+ */
+ ili->conf = of_device_get_match_data(dev);
+ if (!ili->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ val = ili->conf->vreg1out_mv;
+ if (!val) {
+ /* Default HW value, do not touch (should be 4.5V) */
+ ili->vreg1out = U8_MAX;
+ } else {
+ if (val < 3600) {
+ dev_err(dev, "too low VREG1OUT\n");
+ return -EINVAL;
+ }
+ if (val > 6000) {
+ dev_err(dev, "too high VREG1OUT\n");
+ return -EINVAL;
+ }
+ if ((val % 100) != 0) {
+ dev_err(dev, "VREG1OUT is no even 100 microvolt\n");
+ return -EINVAL;
+ }
+ val -= 3600;
+ val /= 100;
+ dev_dbg(dev, "VREG1OUT = 0x%02x\n", val);
+ ili->vreg1out = val;
+ }
+
+ val = ili->conf->vcom_high_percent;
+ if (!val) {
+ /* Default HW value, do not touch (should be 91%) */
+ ili->vcom_high = U8_MAX;
+ } else {
+ if (val < 37) {
+ dev_err(dev, "too low VCOM high\n");
+ return -EINVAL;
+ }
+ if (val > 100) {
+ dev_err(dev, "too high VCOM high\n");
+ return -EINVAL;
+ }
+ val -= 37;
+ dev_dbg(dev, "VCOM high = 0x%02x\n", val);
+ ili->vcom_high = val;
+ }
+
+ val = ili->conf->vcom_amplitude_percent;
+ if (!val) {
+ /* Default HW value, do not touch (should be 114%) */
+ ili->vcom_high = U8_MAX;
+ } else {
+ if (val < 70) {
+ dev_err(dev, "too low VCOM amplitude\n");
+ return -EINVAL;
+ }
+ if (val > 132) {
+ dev_err(dev, "too high VCOM amplitude\n");
+ return -EINVAL;
+ }
+ val -= 70;
+ val >>= 1; /* Increments of 2% */
+ dev_dbg(dev, "VCOM amplitude = 0x%02x\n", val);
+ ili->vcom_amplitude = val;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
+ val = ili->conf->gamma_corr_neg[i];
+ if (val > 15) {
+ dev_err(dev, "negative gamma %u > 15, capping\n", val);
+ val = 15;
+ }
+ gamma = val << 4;
+ val = ili->conf->gamma_corr_pos[i];
+ if (val > 15) {
+ dev_err(dev, "positive gamma %u > 15, capping\n", val);
+ val = 15;
+ }
+ gamma |= val;
+ ili->gamma[i] = gamma;
+ dev_dbg(dev, "gamma V%d: 0x%02x\n", i + 1, gamma);
+ }
+
+ ili->supplies[0].supply = "vcc"; /* 2.7-3.6 V */
+ ili->supplies[1].supply = "iovcc"; /* 1.65-3.6V */
+ ili->supplies[2].supply = "vci"; /* 2.7-3.6V */
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ili->supplies),
+ ili->supplies);
+ if (ret < 0)
+ return ret;
+ ret = regulator_set_voltage(ili->supplies[0].consumer,
+ 2700000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(ili->supplies[1].consumer,
+ 1650000, 3600000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(ili->supplies[2].consumer,
+ 2700000, 3600000);
+ if (ret)
+ return ret;
+
+ ili->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ili->reset_gpio)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(ili->reset_gpio);
+ }
+
+ spi->bits_per_word = 8;
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(dev, "spi setup failed.\n");
+ return ret;
+ }
+ regmap_config = &ili9322_regmap_config;
+ ili->regmap = devm_regmap_init(dev, &ili9322_regmap_bus, dev,
+ regmap_config);
+ if (IS_ERR(ili->regmap)) {
+ dev_err(dev, "failed to allocate register map\n");
+ return PTR_ERR(ili->regmap);
+ }
+
+ ret = regmap_read(ili->regmap, ILI9322_CHIP_ID, &val);
+ if (ret) {
+ dev_err(dev, "can't get chip ID (%d)\n", ret);
+ return ret;
+ }
+ if (val != ILI9322_CHIP_ID_MAGIC) {
+ dev_err(dev, "chip ID 0x%0x2, expected 0x%02x\n", val,
+ ILI9322_CHIP_ID_MAGIC);
+ return -ENODEV;
+ }
+
+ /* Probe the system to find the display setting */
+ if (ili->conf->input == ILI9322_INPUT_UNKNOWN) {
+ ret = regmap_read(ili->regmap, ILI9322_ENTRY, &val);
+ if (ret) {
+ dev_err(dev, "can't get entry setting (%d)\n", ret);
+ return ret;
+ }
+ /* Input enum corresponds to HW setting */
+ ili->input = (val >> 4) & 0x0f;
+ if (ili->input >= ILI9322_INPUT_UNKNOWN)
+ ili->input = ILI9322_INPUT_UNKNOWN;
+ } else {
+ ili->input = ili->conf->input;
+ }
+
+ drm_panel_init(&ili->panel);
+ ili->panel.dev = dev;
+ ili->panel.funcs = &ili9322_drm_funcs;
+
+ return drm_panel_add(&ili->panel);
+}
+
+static int ili9322_remove(struct spi_device *spi)
+{
+ struct ili9322 *ili = spi_get_drvdata(spi);
+
+ ili9322_power_off(ili);
+ drm_panel_remove(&ili->panel);
+
+ return 0;
+}
+
+/*
+ * The D-Link DIR-685 panel is marked LM918A01-1A SY-B4-091116-E0199
+ */
+static const struct ili9322_config ili9322_dir_685 = {
+ .width_mm = 65,
+ .height_mm = 50,
+ .input = ILI9322_INPUT_ITU_R_BT656_640X320_YCBCR,
+ .vreg1out_mv = 4600,
+ .vcom_high_percent = 91,
+ .vcom_amplitude_percent = 114,
+ .syncmode = ILI9322_IF_CTRL_SYNC_DISABLED,
+ .dclk_active_high = true,
+ .gamma_corr_neg = { 0xa, 0x5, 0x7, 0x7, 0x7, 0x5, 0x1, 0x6 },
+ .gamma_corr_pos = { 0x7, 0x7, 0x3, 0x2, 0x3, 0x5, 0x7, 0x2 },
+};
+
+static const struct of_device_id ili9322_of_match[] = {
+ {
+ .compatible = "dlink,dir-685-panel",
+ .data = &ili9322_dir_685,
+ },
+ {
+ .compatible = "ilitek,ili9322",
+ .data = NULL,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ili9322_of_match);
+
+static struct spi_driver ili9322_driver = {
+ .probe = ili9322_probe,
+ .remove = ili9322_remove,
+ .driver = {
+ .name = "panel-ilitek-ili9322",
+ .of_match_table = ili9322_of_match,
+ },
+};
+module_spi_driver(ili9322_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("ILI9322 LCD panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index e2d57c01200b..57e38a9e7ab4 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <drm/drmP.h>
@@ -39,6 +40,7 @@ struct panel_lvds {
bool data_mirror;
struct backlight_device *backlight;
+ struct regulator *supply;
struct gpio_desc *enable_gpio;
struct gpio_desc *reset_gpio;
@@ -69,6 +71,9 @@ static int panel_lvds_unprepare(struct drm_panel *panel)
if (lvds->enable_gpio)
gpiod_set_value_cansleep(lvds->enable_gpio, 0);
+ if (lvds->supply)
+ regulator_disable(lvds->supply);
+
return 0;
}
@@ -76,6 +81,17 @@ static int panel_lvds_prepare(struct drm_panel *panel)
{
struct panel_lvds *lvds = to_panel_lvds(panel);
+ if (lvds->supply) {
+ int err;
+
+ err = regulator_enable(lvds->supply);
+ if (err < 0) {
+ dev_err(lvds->dev, "failed to enable supply: %d\n",
+ err);
+ return err;
+ }
+ }
+
if (lvds->enable_gpio)
gpiod_set_value_cansleep(lvds->enable_gpio, 1);
@@ -196,6 +212,13 @@ static int panel_lvds_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ lvds->supply = devm_regulator_get_optional(lvds->dev, "power");
+ if (IS_ERR(lvds->supply)) {
+ ret = PTR_ERR(lvds->supply);
+ dev_err(lvds->dev, "failed to request regulator: %d\n", ret);
+ return ret;
+ }
+
/* Get GPIOs and backlight controller. */
lvds->enable_gpio = devm_gpiod_get_optional(lvds->dev, "enable",
GPIOD_OUT_LOW);
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 440f53ebee8c..07fa2cdb364a 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -53,7 +53,6 @@ struct pl111_drm_dev_private {
struct drm_panel *panel;
struct drm_bridge *bridge;
struct drm_simple_display_pipe pipe;
- struct drm_fbdev_cma *fbdev;
void *regs;
u32 ienb;
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 201d57d5cb54..acb738c69873 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -64,6 +64,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_bridge.h>
@@ -137,8 +138,7 @@ static int pl111_modeset_init(struct drm_device *dev)
drm_mode_config_reset(dev);
- priv->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
+ drm_fb_cma_fbdev_init(dev, 32, 0);
drm_kms_helper_poll_init(dev);
@@ -155,17 +155,10 @@ finish:
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
-static void pl111_lastclose(struct drm_device *dev)
-{
- struct pl111_drm_dev_private *priv = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
static struct drm_driver pl111_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = pl111_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
.name = "pl111",
@@ -281,8 +274,7 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
struct pl111_drm_dev_private *priv = drm->dev_private;
drm_dev_unregister(drm);
- if (priv->fbdev)
- drm_fbdev_cma_fini(priv->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index d866f329e7d8..59cd74c3f3af 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -291,14 +291,15 @@ static struct ttm_backend_func qxl_backend_func = {
.destroy = &qxl_ttm_backend_destroy,
};
-static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
int r;
if (ttm->state != tt_unpopulated)
return 0;
- r = ttm_pool_populate(ttm);
+ r = ttm_pool_populate(ttm, ctx);
if (r)
return r;
@@ -357,8 +358,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
qxl_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, ctx->interruptible, ctx->no_wait_gpu,
- new_mem);
+ return ttm_bo_move_memcpy(bo, ctx, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
@@ -389,7 +389,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &qxl_bo_move_notify,
};
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index a6511918f632..d3045a371a55 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -1627,8 +1627,6 @@ static const u32 godavari_golden_registers[] =
static void cik_init_golden_registers(struct radeon_device *rdev)
{
- /* Some of the registers might be dependent on GRBM_GFX_INDEX */
- mutex_lock(&rdev->grbm_idx_mutex);
switch (rdev->family) {
case CHIP_BONAIRE:
radeon_program_register_sequence(rdev,
@@ -1703,7 +1701,6 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
default:
break;
}
- mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3120,7 +3117,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -3132,7 +3128,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
mask = 1;
for (i = 0; i < max_rb_num_per_se * se_num; i++) {
@@ -3143,7 +3138,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
rdev->config.cik.backend_enable_mask = enabled_rbs;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
@@ -3171,7 +3165,6 @@ static void cik_setup_rb(struct radeon_device *rdev,
WREG32(PA_SC_RASTER_CONFIG, data);
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
}
/**
@@ -3391,12 +3384,6 @@ static void cik_gpu_init(struct radeon_device *rdev)
/* set HW defaults for 3D engine */
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
- mutex_lock(&rdev->grbm_idx_mutex);
- /*
- * making sure that the following register writes will be broadcasted
- * to all the shaders
- */
- cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(SX_DEBUG_1, 0x20);
WREG32(TA_CNTL_AUX, 0x00010000);
@@ -3452,7 +3439,6 @@ static void cik_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
- mutex_unlock(&rdev->grbm_idx_mutex);
udelay(50);
}
@@ -4432,11 +4418,12 @@ static int cik_mec_init(struct radeon_device *rdev)
/*
* KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
* CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
- * Nonetheless, we assign only 1 pipe because all other pipes will
- * be handled by KFD
*/
- rdev->mec.num_mec = 1;
- rdev->mec.num_pipe = 1;
+ if (rdev->family == CHIP_KAVERI)
+ rdev->mec.num_mec = 2;
+ else
+ rdev->mec.num_mec = 1;
+ rdev->mec.num_pipe = 4;
rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
if (rdev->mec.hpd_eop_obj == NULL) {
@@ -4579,8 +4566,11 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* init the pipes */
mutex_lock(&rdev->srbm_mutex);
- for (i = 0; i < rdev->mec.num_pipe; ++i) {
- cik_srbm_select(rdev, 0, i, 0, 0);
+ for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); ++i) {
+ int me = (i < 4) ? 1 : 2;
+ int pipe = (i < 4) ? i : (i - 4);
+
+ cik_srbm_select(rdev, me, pipe, 0, 0);
eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2) ;
/* write the EOP addr */
@@ -4597,6 +4587,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
+ cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
@@ -5830,7 +5821,6 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
u32 i, j, k;
u32 mask;
- mutex_lock(&rdev->grbm_idx_mutex);
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
@@ -5842,7 +5832,6 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
}
}
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
for (k = 0; k < rdev->usec_timeout; k++) {
@@ -5977,12 +5966,10 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
WREG32(RLC_LB_PARAMS, 0x00600408);
WREG32(RLC_LB_CNTL, 0x80000004);
- mutex_unlock(&rdev->grbm_idx_mutex);
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
@@ -6049,13 +6036,11 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
WREG32(RLC_SERDES_WR_CTRL, tmp2);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6098,13 +6083,11 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
WREG32(RLC_SERDES_WR_CTRL, data);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
@@ -6148,13 +6131,11 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
tmp = cik_halt_rlc(rdev);
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
WREG32(RLC_SERDES_WR_CTRL, data);
- mutex_unlock(&rdev->grbm_idx_mutex);
cik_update_rlc(rdev, tmp);
}
@@ -6583,12 +6564,10 @@ static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
u32 mask = 0, tmp, tmp1;
int i;
- mutex_lock(&rdev->grbm_idx_mutex);
cik_select_se_sh(rdev, se, sh);
tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- mutex_unlock(&rdev->grbm_idx_mutex);
tmp &= 0xffff0000;
@@ -7074,7 +7053,8 @@ static int cik_irq_init(struct radeon_device *rdev)
int cik_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl;
- u32 cp_m1p0;
+ u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
+ u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -7107,6 +7087,13 @@ int cik_irq_set(struct radeon_device *rdev)
dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -7121,6 +7108,33 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
+ case 1:
+ cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ default:
+ DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
+ break;
+ }
+ } else if (ring->me == 2) {
+ switch (ring->pipe) {
+ case 0:
+ cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 1:
+ cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
default:
DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
break;
@@ -7137,6 +7151,33 @@ int cik_irq_set(struct radeon_device *rdev)
case 0:
cp_m1p0 |= TIME_STAMP_INT_ENABLE;
break;
+ case 1:
+ cp_m1p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m1p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ default:
+ DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
+ break;
+ }
+ } else if (ring->me == 2) {
+ switch (ring->pipe) {
+ case 0:
+ cp_m2p0 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 1:
+ cp_m2p1 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 2:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
+ case 3:
+ cp_m2p2 |= TIME_STAMP_INT_ENABLE;
+ break;
default:
DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
break;
@@ -7217,6 +7258,13 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
+ WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
+ WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
+ WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
+ WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
+ WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
+ WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
+ WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index 4e883fdc59d8..318377df09ef 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,8 +147,6 @@
#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
-#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
-
#define SQ_IND_INDEX 0x8DE0
#define SQ_CMD 0x8DEC
#define SQ_IND_DATA 0x8DE4
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a8e546569858..d34887873dea 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -731,10 +731,6 @@ struct radeon_doorbell {
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
/*
* IRQS.
@@ -2442,8 +2438,6 @@ struct radeon_device {
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
- /* GRBM index mutex. Protects concurrents access to GRBM index */
- struct mutex grbm_idx_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ffc10cadcf34..8d3e3d2e0090 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -392,37 +392,6 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
__clear_bit(doorbell, rdev->doorbell.used);
}
-/**
- * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup KFD
- *
- * @rdev: radeon_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for radeon.
- *
- * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
- * takes doorbells required for its own rings and reports the setup to KFD.
- * Radeon reserved doorbells are at the start of the doorbell aperture.
- */
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /* The first num_doorbells are used by radeon.
- * KFD takes whatever's left in the aperture. */
- if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = rdev->doorbell.base;
- *aperture_size = rdev->doorbell.size;
- *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
-
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
@@ -1341,7 +1310,6 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
mutex_init(&rdev->srbm_mutex);
- mutex_init(&rdev->grbm_idx_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 98e30d71d9e0..a0a839bc39bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -339,7 +339,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
goto out_cleanup;
}
- r = ttm_tt_bind(bo->ttm, &tmp_mem);
+ r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -347,7 +347,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, &ctx, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
@@ -380,7 +380,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -445,8 +445,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
return r;
}
@@ -722,7 +721,8 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
return (struct radeon_ttm_tt *)ttm;
}
-static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
@@ -751,17 +751,17 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
rdev = radeon_get_rdev(ttm->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_populate(ttm);
+ return ttm_agp_tt_populate(ttm, ctx);
}
#endif
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, rdev->dev);
+ return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
}
#endif
- return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm);
+ return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -845,7 +845,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int radeon_ttm_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 76d63de5921d..d85431400a0d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -207,13 +207,6 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev_unref(drm_dev);
}
-static void rockchip_drm_lastclose(struct drm_device *dev)
-{
- struct rockchip_drm_private *priv = dev->dev_private;
-
- drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
-}
-
static const struct file_operations rockchip_drm_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -228,7 +221,7 @@ static const struct file_operations rockchip_drm_driver_fops = {
static struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = rockchip_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index cd2ace0c3caa..e266539e04e5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -167,20 +167,13 @@ err_gem_object_unreference:
return ERR_PTR(ret);
}
-static void rockchip_drm_output_poll_changed(struct drm_device *dev)
-{
- struct rockchip_drm_private *private = dev->dev_private;
-
- drm_fb_helper_hotplug_event(&private->fbdev_helper);
-}
-
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
.fb_create = rockchip_user_fb_create,
- .output_poll_changed = rockchip_drm_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
new file mode 100644
index 000000000000..bd0377c0d2ee
--- /dev/null
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -0,0 +1,26 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+ccflags-y := -Iinclude/drm
+gpu-sched-y := gpu_scheduler.o sched_fence.o
+
+obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index dcb987e6d94a..2c18996d59c5 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -19,37 +19,36 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- *
*/
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <drm/drmP.h>
-#include "gpu_scheduler.h"
-
-#include "spsc_queue.h"
+#include <drm/gpu_scheduler.h>
+#include <drm/spsc_queue.h>
#define CREATE_TRACE_POINTS
-#include "gpu_sched_trace.h"
+#include <drm/gpu_scheduler_trace.h>
-#define to_amd_sched_job(sched_job) \
- container_of((sched_job), struct amd_sched_job, queue_node)
+#define to_drm_sched_job(sched_job) \
+ container_of((sched_job), struct drm_sched_job, queue_node)
-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
+static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
/* Initialize a given run queue struct */
-static void amd_sched_rq_init(struct amd_sched_rq *rq)
+static void drm_sched_rq_init(struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->current_entity = NULL;
}
-static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
- struct amd_sched_entity *entity)
+static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
{
if (!list_empty(&entity->list))
return;
@@ -58,8 +57,8 @@ static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
spin_unlock(&rq->lock);
}
-static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
- struct amd_sched_entity *entity)
+static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
+ struct drm_sched_entity *entity)
{
if (list_empty(&entity->list))
return;
@@ -77,17 +76,17 @@ static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
*
* Try to find a ready entity, returns NULL if none found.
*/
-static struct amd_sched_entity *
-amd_sched_rq_select_entity(struct amd_sched_rq *rq)
+static struct drm_sched_entity *
+drm_sched_rq_select_entity(struct drm_sched_rq *rq)
{
- struct amd_sched_entity *entity;
+ struct drm_sched_entity *entity;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- if (amd_sched_entity_is_ready(entity)) {
+ if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return entity;
@@ -97,7 +96,7 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
list_for_each_entry(entity, &rq->entities, list) {
- if (amd_sched_entity_is_ready(entity)) {
+ if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return entity;
@@ -116,22 +115,22 @@ amd_sched_rq_select_entity(struct amd_sched_rq *rq)
* Init a context entity used by scheduler when submit to HW ring.
*
* @sched The pointer to the scheduler
- * @entity The pointer to a valid amd_sched_entity
+ * @entity The pointer to a valid drm_sched_entity
* @rq The run queue this entity belongs
* @kernel If this is an entity for the kernel
* @jobs The max number of jobs in the job queue
*
* return 0 if succeed. negative error code on failure
*/
-int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
- struct amd_sched_rq *rq,
+int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq,
uint32_t jobs, atomic_t *guilty)
{
if (!(sched && entity && rq))
return -EINVAL;
- memset(entity, 0, sizeof(struct amd_sched_entity));
+ memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = rq;
entity->sched = sched;
@@ -146,6 +145,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
return 0;
}
+EXPORT_SYMBOL(drm_sched_entity_init);
/**
* Query if entity is initialized
@@ -155,8 +155,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
*
* return true if entity is initialized, false otherwise
*/
-static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
return entity->sched == sched &&
entity->rq != NULL;
@@ -169,7 +169,7 @@ static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
*
* Return true if entity don't has any unscheduled jobs.
*/
-static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
if (spsc_queue_peek(&entity->job_queue) == NULL)
@@ -185,7 +185,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
*
* Return true if entity could provide a job.
*/
-static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
+static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
return false;
@@ -204,12 +204,12 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
*
* Cleanup and free the allocated resources.
*/
-void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity)
{
int r;
- if (!amd_sched_entity_is_initialized(sched, entity))
+ if (!drm_sched_entity_is_initialized(sched, entity))
return;
/**
* The client will not queue more IBs during this fini, consume existing
@@ -219,10 +219,10 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
r = -ERESTARTSYS;
else
r = wait_event_killable(sched->job_scheduled,
- amd_sched_entity_is_idle(entity));
- amd_sched_entity_set_rq(entity, NULL);
+ drm_sched_entity_is_idle(entity));
+ drm_sched_entity_set_rq(entity, NULL);
if (r) {
- struct amd_sched_job *job;
+ struct drm_sched_job *job;
/* Park the kernel for a moment to make sure it isn't processing
* our enity.
@@ -236,37 +236,38 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
entity->dependency = NULL;
}
- while ((job = to_amd_sched_job(spsc_queue_pop(&entity->job_queue)))) {
- struct amd_sched_fence *s_fence = job->s_fence;
- amd_sched_fence_scheduled(s_fence);
+ while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
+ struct drm_sched_fence *s_fence = job->s_fence;
+ drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
- amd_sched_fence_finished(s_fence);
+ drm_sched_fence_finished(s_fence);
WARN_ON(s_fence->parent);
dma_fence_put(&s_fence->finished);
sched->ops->free_job(job);
}
}
}
+EXPORT_SYMBOL(drm_sched_entity_fini);
-static void amd_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_entity *entity =
- container_of(cb, struct amd_sched_entity, cb);
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- amd_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->sched);
}
-static void amd_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_entity *entity =
- container_of(cb, struct amd_sched_entity, cb);
+ struct drm_sched_entity *entity =
+ container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
}
-void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
- struct amd_sched_rq *rq)
+void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
+ struct drm_sched_rq *rq)
{
if (entity->rq == rq)
return;
@@ -274,37 +275,39 @@ void amd_sched_entity_set_rq(struct amd_sched_entity *entity,
spin_lock(&entity->rq_lock);
if (entity->rq)
- amd_sched_rq_remove_entity(entity->rq, entity);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
if (rq)
- amd_sched_rq_add_entity(rq, entity);
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
+EXPORT_SYMBOL(drm_sched_entity_set_rq);
-bool amd_sched_dependency_optimized(struct dma_fence* fence,
- struct amd_sched_entity *entity)
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
+ struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
- struct amd_sched_fence *s_fence;
+ struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
return false;
if (fence->context == entity->fence_context)
return true;
- s_fence = to_amd_sched_fence(fence);
+ s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched)
return true;
return false;
}
+EXPORT_SYMBOL(drm_sched_dependency_optimized);
-static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
+static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->sched;
struct dma_fence * fence = entity->dependency;
- struct amd_sched_fence *s_fence;
+ struct drm_sched_fence *s_fence;
if (fence->context == entity->fence_context) {
/* We can ignore fences from ourself */
@@ -312,7 +315,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
return false;
}
- s_fence = to_amd_sched_fence(fence);
+ s_fence = to_drm_sched_fence(fence);
if (s_fence && s_fence->sched == sched) {
/*
@@ -323,7 +326,7 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
dma_fence_put(entity->dependency);
entity->dependency = fence;
if (!dma_fence_add_callback(fence, &entity->cb,
- amd_sched_entity_clear_dep))
+ drm_sched_entity_clear_dep))
return true;
/* Ignore it when it is already scheduled */
@@ -332,25 +335,25 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
- amd_sched_entity_wakeup))
+ drm_sched_entity_wakeup))
return true;
dma_fence_put(entity->dependency);
return false;
}
-static struct amd_sched_job *
-amd_sched_entity_pop_job(struct amd_sched_entity *entity)
+static struct drm_sched_job *
+drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->sched;
- struct amd_sched_job *sched_job = to_amd_sched_job(
+ struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
if (!sched_job)
return NULL;
while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
- if (amd_sched_entity_add_dependency_cb(entity))
+ if (drm_sched_entity_add_dependency_cb(entity))
return NULL;
/* skip jobs from entity that marked guilty */
@@ -368,13 +371,13 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
*
* Returns 0 for success, negative error code otherwise.
*/
-void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
- struct amd_sched_entity *entity)
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = sched_job->sched;
+ struct drm_gpu_scheduler *sched = sched_job->sched;
bool first = false;
- trace_amd_sched_job(sched_job, entity);
+ trace_drm_sched_job(sched_job, entity);
spin_lock(&entity->queue_lock);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
@@ -385,25 +388,26 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job,
if (first) {
/* Add the entity to the run queue */
spin_lock(&entity->rq_lock);
- amd_sched_rq_add_entity(entity->rq, entity);
+ drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
- amd_sched_wakeup(sched);
+ drm_sched_wakeup(sched);
}
}
+EXPORT_SYMBOL(drm_sched_entity_push_job);
/* job_finish is called after hw fence signaled
*/
-static void amd_sched_job_finish(struct work_struct *work)
+static void drm_sched_job_finish(struct work_struct *work)
{
- struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
+ struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
- struct amd_gpu_scheduler *sched = s_job->sched;
+ struct drm_gpu_scheduler *sched = s_job->sched;
/* remove job from ring_mirror_list */
spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- struct amd_sched_job *next;
+ struct drm_sched_job *next;
spin_unlock(&sched->job_list_lock);
cancel_delayed_work_sync(&s_job->work_tdr);
@@ -411,7 +415,7 @@ static void amd_sched_job_finish(struct work_struct *work)
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node);
+ struct drm_sched_job, node);
if (next)
schedule_delayed_work(&next->work_tdr, sched->timeout);
@@ -421,42 +425,42 @@ static void amd_sched_job_finish(struct work_struct *work)
sched->ops->free_job(s_job);
}
-static void amd_sched_job_finish_cb(struct dma_fence *f,
+static void drm_sched_job_finish_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
- struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
schedule_work(&job->finish_work);
}
-static void amd_sched_job_begin(struct amd_sched_job *s_job)
+static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
- struct amd_gpu_scheduler *sched = s_job->sched;
+ struct drm_gpu_scheduler *sched = s_job->sched;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
- amd_sched_job_finish_cb);
+ drm_sched_job_finish_cb);
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node) == s_job)
+ struct drm_sched_job, node) == s_job)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
spin_unlock(&sched->job_list_lock);
}
-static void amd_sched_job_timedout(struct work_struct *work)
+static void drm_sched_job_timedout(struct work_struct *work)
{
- struct amd_sched_job *job = container_of(work, struct amd_sched_job,
+ struct drm_sched_job *job = container_of(work, struct drm_sched_job,
work_tdr.work);
job->sched->ops->timedout_job(job);
}
-void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_job *bad)
+void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
- struct amd_sched_job *s_job;
- struct amd_sched_entity *entity, *tmp;
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *entity, *tmp;
int i;;
spin_lock(&sched->job_list_lock);
@@ -471,14 +475,14 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
}
spin_unlock(&sched->job_list_lock);
- if (bad && bad->s_priority != AMD_SCHED_PRIORITY_KERNEL) {
+ if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
/* don't increase @bad's karma if it's from KERNEL RQ,
* becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
* corrupt but keep in mind that kernel jobs always considered good.
*/
- for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_KERNEL; i++ ) {
- struct amd_sched_rq *rq = &sched->sched_rq[i];
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
@@ -495,30 +499,22 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched, struct amd_sched_jo
}
}
}
+EXPORT_SYMBOL(drm_sched_hw_job_reset);
-void amd_sched_job_kickout(struct amd_sched_job *s_job)
-{
- struct amd_gpu_scheduler *sched = s_job->sched;
-
- spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
- spin_unlock(&sched->job_list_lock);
-}
-
-void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
+void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
- struct amd_sched_job *s_job, *tmp;
+ struct drm_sched_job *s_job, *tmp;
bool found_guilty = false;
int r;
spin_lock(&sched->job_list_lock);
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
- struct amd_sched_job, node);
+ struct drm_sched_job, node);
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
- struct amd_sched_fence *s_fence = s_job->s_fence;
+ struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
uint64_t guilty_context;
@@ -536,45 +532,47 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ drm_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &s_fence->cb);
+ drm_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
- amd_sched_process_job(NULL, &s_fence->cb);
+ drm_sched_process_job(NULL, &s_fence->cb);
}
spin_lock(&sched->job_list_lock);
}
spin_unlock(&sched->job_list_lock);
}
+EXPORT_SYMBOL(drm_sched_job_recovery);
/* init a sched_job with basic field */
-int amd_sched_job_init(struct amd_sched_job *job,
- struct amd_gpu_scheduler *sched,
- struct amd_sched_entity *entity,
+int drm_sched_job_init(struct drm_sched_job *job,
+ struct drm_gpu_scheduler *sched,
+ struct drm_sched_entity *entity,
void *owner)
{
job->sched = sched;
job->s_priority = entity->rq - sched->sched_rq;
- job->s_fence = amd_sched_fence_create(entity, owner);
+ job->s_fence = drm_sched_fence_create(entity, owner);
if (!job->s_fence)
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
- INIT_WORK(&job->finish_work, amd_sched_job_finish);
+ INIT_WORK(&job->finish_work, drm_sched_job_finish);
INIT_LIST_HEAD(&job->node);
- INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
+ INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
return 0;
}
+EXPORT_SYMBOL(drm_sched_job_init);
/**
* Return ture if we can push more jobs to the hw.
*/
-static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
+static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
{
return atomic_read(&sched->hw_rq_count) <
sched->hw_submission_limit;
@@ -583,27 +581,27 @@ static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
/**
* Wake up the scheduler when it is ready
*/
-static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
+static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
{
- if (amd_sched_ready(sched))
+ if (drm_sched_ready(sched))
wake_up_interruptible(&sched->wake_up_worker);
}
/**
* Select next entity to process
*/
-static struct amd_sched_entity *
-amd_sched_select_entity(struct amd_gpu_scheduler *sched)
+static struct drm_sched_entity *
+drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
- struct amd_sched_entity *entity;
+ struct drm_sched_entity *entity;
int i;
- if (!amd_sched_ready(sched))
+ if (!drm_sched_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- for (i = AMD_SCHED_PRIORITY_MAX - 1; i >= AMD_SCHED_PRIORITY_MIN; i--) {
- entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
+ for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
+ entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
if (entity)
break;
}
@@ -611,22 +609,22 @@ amd_sched_select_entity(struct amd_gpu_scheduler *sched)
return entity;
}
-static void amd_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
+static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
{
- struct amd_sched_fence *s_fence =
- container_of(cb, struct amd_sched_fence, cb);
- struct amd_gpu_scheduler *sched = s_fence->sched;
+ struct drm_sched_fence *s_fence =
+ container_of(cb, struct drm_sched_fence, cb);
+ struct drm_gpu_scheduler *sched = s_fence->sched;
dma_fence_get(&s_fence->finished);
atomic_dec(&sched->hw_rq_count);
- amd_sched_fence_finished(s_fence);
+ drm_sched_fence_finished(s_fence);
- trace_amd_sched_process_job(s_fence);
+ trace_drm_sched_process_job(s_fence);
dma_fence_put(&s_fence->finished);
wake_up_interruptible(&sched->wake_up_worker);
}
-static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
+static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
{
if (kthread_should_park()) {
kthread_parkme();
@@ -636,52 +634,52 @@ static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
return false;
}
-static int amd_sched_main(void *param)
+static int drm_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
- struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
+ struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
int r;
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
- struct amd_sched_entity *entity = NULL;
- struct amd_sched_fence *s_fence;
- struct amd_sched_job *sched_job;
+ struct drm_sched_entity *entity = NULL;
+ struct drm_sched_fence *s_fence;
+ struct drm_sched_job *sched_job;
struct dma_fence *fence;
wait_event_interruptible(sched->wake_up_worker,
- (!amd_sched_blocked(sched) &&
- (entity = amd_sched_select_entity(sched))) ||
+ (!drm_sched_blocked(sched) &&
+ (entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
if (!entity)
continue;
- sched_job = amd_sched_entity_pop_job(entity);
+ sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job)
continue;
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- amd_sched_job_begin(sched_job);
+ drm_sched_job_begin(sched_job);
fence = sched->ops->run_job(sched_job);
- amd_sched_fence_scheduled(s_fence);
+ drm_sched_fence_scheduled(s_fence);
if (fence) {
s_fence->parent = dma_fence_get(fence);
r = dma_fence_add_callback(fence, &s_fence->cb,
- amd_sched_process_job);
+ drm_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &s_fence->cb);
+ drm_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n",
r);
dma_fence_put(fence);
} else {
- amd_sched_process_job(NULL, &s_fence->cb);
+ drm_sched_process_job(NULL, &s_fence->cb);
}
wake_up(&sched->job_scheduled);
@@ -699,8 +697,8 @@ static int amd_sched_main(void *param)
*
* Return 0 on success, otherwise error code.
*/
-int amd_sched_init(struct amd_gpu_scheduler *sched,
- const struct amd_sched_backend_ops *ops,
+int drm_sched_init(struct drm_gpu_scheduler *sched,
+ const struct drm_sched_backend_ops *ops,
unsigned hw_submission,
unsigned hang_limit,
long timeout,
@@ -712,8 +710,8 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
sched->name = name;
sched->timeout = timeout;
sched->hang_limit = hang_limit;
- for (i = AMD_SCHED_PRIORITY_MIN; i < AMD_SCHED_PRIORITY_MAX; i++)
- amd_sched_rq_init(&sched->sched_rq[i]);
+ for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
+ drm_sched_rq_init(&sched->sched_rq[i]);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
@@ -723,7 +721,7 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
- sched->thread = kthread_run(amd_sched_main, sched, sched->name);
+ sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
DRM_ERROR("Failed to create scheduler for %s.\n", name);
return PTR_ERR(sched->thread);
@@ -731,14 +729,16 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
return 0;
}
+EXPORT_SYMBOL(drm_sched_init);
/**
* Destroy a gpu scheduler
*
* @sched The pointer to the scheduler
*/
-void amd_sched_fini(struct amd_gpu_scheduler *sched)
+void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
}
+EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 33f54d0a5c4f..69aab086b913 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -19,20 +19,20 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- *
*/
+
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <drm/drmP.h>
-#include "gpu_scheduler.h"
+#include <drm/gpu_scheduler.h>
static struct kmem_cache *sched_fence_slab;
-int amd_sched_fence_slab_init(void)
+static int __init drm_sched_fence_slab_init(void)
{
sched_fence_slab = kmem_cache_create(
- "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!sched_fence_slab)
return -ENOMEM;
@@ -40,36 +40,13 @@ int amd_sched_fence_slab_init(void)
return 0;
}
-void amd_sched_fence_slab_fini(void)
+static void __exit drm_sched_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(sched_fence_slab);
}
-struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
- void *owner)
-{
- struct amd_sched_fence *fence = NULL;
- unsigned seq;
-
- fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
- if (fence == NULL)
- return NULL;
-
- fence->owner = owner;
- fence->sched = entity->sched;
- spin_lock_init(&fence->lock);
-
- seq = atomic_inc_return(&entity->fence_seq);
- dma_fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
- &fence->lock, entity->fence_context, seq);
- dma_fence_init(&fence->finished, &amd_sched_fence_ops_finished,
- &fence->lock, entity->fence_context + 1, seq);
-
- return fence;
-}
-
-void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
+void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->scheduled);
@@ -81,7 +58,7 @@ void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
"was already signaled\n");
}
-void amd_sched_fence_finished(struct amd_sched_fence *fence)
+void drm_sched_fence_finished(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->finished);
@@ -93,18 +70,18 @@ void amd_sched_fence_finished(struct amd_sched_fence *fence)
"was already signaled\n");
}
-static const char *amd_sched_fence_get_driver_name(struct dma_fence *fence)
+static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
{
- return "amd_sched";
+ return "drm_sched";
}
-static const char *amd_sched_fence_get_timeline_name(struct dma_fence *f)
+static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
return (const char *)fence->sched->name;
}
-static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
+static bool drm_sched_fence_enable_signaling(struct dma_fence *f)
{
return true;
}
@@ -116,10 +93,10 @@ static bool amd_sched_fence_enable_signaling(struct dma_fence *f)
*
* Free up the fence memory after the RCU grace period.
*/
-static void amd_sched_fence_free(struct rcu_head *rcu)
+static void drm_sched_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
kmem_cache_free(sched_fence_slab, fence);
@@ -133,11 +110,11 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
*/
-static void amd_sched_fence_release_scheduled(struct dma_fence *f)
+static void drm_sched_fence_release_scheduled(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
- call_rcu(&fence->finished.rcu, amd_sched_fence_free);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free);
}
/**
@@ -147,27 +124,68 @@ static void amd_sched_fence_release_scheduled(struct dma_fence *f)
*
* Drop the extra reference from the scheduled fence to the base fence.
*/
-static void amd_sched_fence_release_finished(struct dma_fence *f)
+static void drm_sched_fence_release_finished(struct dma_fence *f)
{
- struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(&fence->scheduled);
}
-const struct dma_fence_ops amd_sched_fence_ops_scheduled = {
- .get_driver_name = amd_sched_fence_get_driver_name,
- .get_timeline_name = amd_sched_fence_get_timeline_name,
- .enable_signaling = amd_sched_fence_enable_signaling,
+const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .enable_signaling = drm_sched_fence_enable_signaling,
.signaled = NULL,
.wait = dma_fence_default_wait,
- .release = amd_sched_fence_release_scheduled,
+ .release = drm_sched_fence_release_scheduled,
};
-const struct dma_fence_ops amd_sched_fence_ops_finished = {
- .get_driver_name = amd_sched_fence_get_driver_name,
- .get_timeline_name = amd_sched_fence_get_timeline_name,
- .enable_signaling = amd_sched_fence_enable_signaling,
+const struct dma_fence_ops drm_sched_fence_ops_finished = {
+ .get_driver_name = drm_sched_fence_get_driver_name,
+ .get_timeline_name = drm_sched_fence_get_timeline_name,
+ .enable_signaling = drm_sched_fence_enable_signaling,
.signaled = NULL,
.wait = dma_fence_default_wait,
- .release = amd_sched_fence_release_finished,
+ .release = drm_sched_fence_release_finished,
};
+
+struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
+{
+ if (f->ops == &drm_sched_fence_ops_scheduled)
+ return container_of(f, struct drm_sched_fence, scheduled);
+
+ if (f->ops == &drm_sched_fence_ops_finished)
+ return container_of(f, struct drm_sched_fence, finished);
+
+ return NULL;
+}
+EXPORT_SYMBOL(to_drm_sched_fence);
+
+struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
+ void *owner)
+{
+ struct drm_sched_fence *fence = NULL;
+ unsigned seq;
+
+ fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ fence->owner = owner;
+ fence->sched = entity->sched;
+ spin_lock_init(&fence->lock);
+
+ seq = atomic_inc_return(&entity->fence_seq);
+ dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
+ &fence->lock, entity->fence_context, seq);
+ dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
+ &fence->lock, entity->fence_context + 1, seq);
+
+ return fence;
+}
+
+module_init(drm_sched_fence_slab_init);
+module_exit(drm_sched_fence_slab_fini);
+
+MODULE_DESCRIPTION("DRM GPU scheduler");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 88d1dc6408af..55b6967d27e1 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_of.h>
@@ -138,16 +139,9 @@ static int sti_atomic_check(struct drm_device *dev,
return ret;
}
-static void sti_output_poll_changed(struct drm_device *ddev)
-{
- struct sti_private *private = ddev->dev_private;
-
- drm_fbdev_cma_hotplug_event(private->fbdev);
-}
-
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = sti_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = sti_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -230,11 +224,7 @@ static void sti_cleanup(struct drm_device *ddev)
{
struct sti_private *private = ddev->dev_private;
- if (private->fbdev) {
- drm_fbdev_cma_fini(private->fbdev);
- private->fbdev = NULL;
- }
-
+ drm_fb_cma_fbdev_fini(ddev);
drm_kms_helper_poll_fini(ddev);
component_unbind_all(ddev->dev, ddev);
kfree(private);
@@ -244,8 +234,6 @@ static void sti_cleanup(struct drm_device *ddev)
static int sti_bind(struct device *dev)
{
struct drm_device *ddev;
- struct sti_private *private;
- struct drm_fbdev_cma *fbdev;
int ret;
ddev = drm_dev_alloc(&sti_driver, dev);
@@ -266,15 +254,10 @@ static int sti_bind(struct device *dev)
drm_mode_config_reset(ddev);
- private = ddev->dev_private;
if (ddev->mode_config.num_connector) {
- fbdev = drm_fbdev_cma_init(ddev, 32,
- ddev->mode_config.num_connector);
- if (IS_ERR(fbdev)) {
+ ret = drm_fb_cma_fbdev_init(ddev, 32, 0);
+ if (ret)
DRM_DEBUG_DRIVER("Warning: fails to create fbdev\n");
- fbdev = NULL;
- }
- private->fbdev = fbdev;
}
return 0;
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index abc49b43566e..4b41142a22e4 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -24,7 +24,6 @@ struct sti_private {
struct sti_compositor *compo;
struct drm_property *plane_zorder_property;
struct drm_device *drm_dev;
- struct drm_fbdev_cma *fbdev;
};
extern struct platform_driver sti_tvout_driver;
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 2d6e9ca0450b..8fe954c27fba 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -23,35 +24,19 @@
#define STM_MAX_FB_WIDTH 2048
#define STM_MAX_FB_HEIGHT 2048 /* same as width to handle orientation */
-static void drv_output_poll_changed(struct drm_device *ddev)
-{
- struct ltdc_device *ldev = ddev->dev_private;
-
- drm_fbdev_cma_hotplug_event(ldev->fbdev);
-}
-
static const struct drm_mode_config_funcs drv_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = drv_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
-static void drv_lastclose(struct drm_device *ddev)
-{
- struct ltdc_device *ldev = ddev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
-
- drm_fbdev_cma_restore_mode(ldev->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
static struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = drv_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
.date = "20170330",
@@ -78,7 +63,6 @@ static struct drm_driver drv_driver = {
static int drv_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
- struct drm_fbdev_cma *fbdev;
struct ltdc_device *ldev;
int ret;
@@ -111,14 +95,9 @@ static int drv_load(struct drm_device *ddev)
drm_kms_helper_poll_init(ddev);
if (ddev->mode_config.num_connector) {
- ldev = ddev->dev_private;
- fbdev = drm_fbdev_cma_init(ddev, 16,
- ddev->mode_config.num_connector);
- if (IS_ERR(fbdev)) {
+ ret = drm_fb_cma_fbdev_init(ddev, 16, 0);
+ if (ret)
DRM_DEBUG("Warning: fails to create fbdev\n");
- fbdev = NULL;
- }
- ldev->fbdev = fbdev;
}
platform_set_drvdata(pdev, ddev);
@@ -131,14 +110,9 @@ err:
static void drv_unload(struct drm_device *ddev)
{
- struct ltdc_device *ldev = ddev->dev_private;
-
DRM_DEBUG("%s\n", __func__);
- if (ldev->fbdev) {
- drm_fbdev_cma_fini(ldev->fbdev);
- ldev->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(ddev);
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
drm_mode_config_cleanup(ddev);
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 82dcb20cdaa3..fd02506274da 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -290,11 +290,6 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Unable to get resource\n");
- return -ENODEV;
- }
-
dsi->base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->base)) {
DRM_ERROR("Unable to get dsi registers\n");
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 394613b0fd46..6dc5d4ec4e17 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -901,12 +901,6 @@ int ltdc_load(struct drm_device *ddev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("Unable to get resource\n");
- ret = -ENODEV;
- goto err;
- }
-
ldev->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(ldev->regs)) {
DRM_ERROR("Unable to get ltdc registers\n");
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index d5da74d24995..edd1c0a446d1 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -20,7 +20,6 @@ struct ltdc_caps {
};
struct ltdc_device {
- struct drm_fbdev_cma *fbdev;
void __iomem *regs;
struct clk *pixel_clk; /* lcd pixel clock */
struct mutex err_lock; /* protecting error_status */
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 82a6ac57fbe3..2b37a6abbb1d 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -15,6 +15,7 @@ sun8i-mixer-y += sun8i_mixer.o sun8i_ui_layer.o \
sun4i-tcon-y += sun4i_crtc.o
sun4i-tcon-y += sun4i_dotclock.o
+sun4i-tcon-y += sun4i_lvds.o
sun4i-tcon-y += sun4i_tcon.o
sun4i-tcon-y += sun4i_rgb.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index d401156490f3..023f39bda633 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -17,8 +17,9 @@
#include "sun4i_dotclock.h"
struct sun4i_dclk {
- struct clk_hw hw;
- struct regmap *regmap;
+ struct clk_hw hw;
+ struct regmap *regmap;
+ struct sun4i_tcon *tcon;
};
static inline struct sun4i_dclk *hw_to_dclk(struct clk_hw *hw)
@@ -73,11 +74,13 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct sun4i_dclk *dclk = hw_to_dclk(hw);
+ struct sun4i_tcon *tcon = dclk->tcon;
unsigned long best_parent = 0;
u8 best_div = 1;
int i;
- for (i = 6; i <= 127; i++) {
+ for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
unsigned long ideal = rate * i;
unsigned long rounded;
@@ -167,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
if (!dclk)
return -ENOMEM;
+ dclk->tcon = tcon;
init.name = clk_name;
init.ops = &sun4i_dclk_ops;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 49215d91c853..4570da0227b4 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -26,20 +26,13 @@
#include "sun4i_framebuffer.h"
#include "sun4i_tcon.h"
-static void sun4i_drv_lastclose(struct drm_device *dev)
-{
- struct sun4i_drv *drv = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(drv->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
- .lastclose = sun4i_drv_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
@@ -126,10 +119,9 @@ static int sun4i_drv_bind(struct device *dev)
sun4i_remove_framebuffers();
/* Create our framebuffer */
- drv->fbdev = sun4i_framebuffer_init(drm);
- if (IS_ERR(drv->fbdev)) {
+ ret = sun4i_framebuffer_init(drm);
+ if (ret) {
dev_err(drm->dev, "Couldn't create our framebuffer\n");
- ret = PTR_ERR(drv->fbdev);
goto cleanup_mode_config;
}
@@ -347,6 +339,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun6i-a31s-display-engine" },
{ .compatible = "allwinner,sun7i-a20-display-engine" },
{ .compatible = "allwinner,sun8i-a33-display-engine" },
+ { .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.h b/drivers/gpu/drm/sun4i/sun4i_drv.h
index a960c89270cc..2825f140da54 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.h
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.h
@@ -20,8 +20,6 @@
struct sun4i_drv {
struct list_head engine_list;
struct list_head tcon_list;
-
- struct drm_fbdev_cma *fbdev;
};
#endif /* _SUN4I_DRV_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 2992f0a6b349..38a36c0dfa2f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -11,6 +11,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
@@ -18,21 +19,14 @@
#include "sun4i_drv.h"
#include "sun4i_framebuffer.h"
-static void sun4i_de_output_poll_changed(struct drm_device *drm)
-{
- struct sun4i_drv *drv = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(drv->fbdev);
-}
-
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
- .output_poll_changed = sun4i_de_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
};
-struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
+int sun4i_framebuffer_init(struct drm_device *drm)
{
drm_mode_config_reset(drm);
@@ -41,12 +35,10 @@ struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm)
drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
- return drm_fbdev_cma_init(drm, 32, drm->mode_config.num_connector);
+ return drm_fb_cma_fbdev_init(drm, 32, 0);
}
void sun4i_framebuffer_free(struct drm_device *drm)
{
- struct sun4i_drv *drv = drm->dev_private;
-
- drm_fbdev_cma_fini(drv->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
index 3afd65252ee0..7ef0aed8384c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
@@ -13,7 +13,7 @@
#ifndef _SUN4I_FRAMEBUFFER_H_
#define _SUN4I_FRAMEBUFFER_H_
-struct drm_fbdev_cma *sun4i_framebuffer_init(struct drm_device *drm);
+int sun4i_framebuffer_init(struct drm_device *drm);
void sun4i_framebuffer_free(struct drm_device *drm);
#endif /* _SUN4I_FRAMEBUFFER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
new file mode 100644
index 000000000000..be3f14d7746d
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+#include "sun4i_crtc.h"
+#include "sun4i_tcon.h"
+#include "sun4i_lvds.h"
+
+struct sun4i_lvds {
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct sun4i_tcon *tcon;
+};
+
+static inline struct sun4i_lvds *
+drm_connector_to_sun4i_lvds(struct drm_connector *connector)
+{
+ return container_of(connector, struct sun4i_lvds,
+ connector);
+}
+
+static inline struct sun4i_lvds *
+drm_encoder_to_sun4i_lvds(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct sun4i_lvds,
+ encoder);
+}
+
+static int sun4i_lvds_get_modes(struct drm_connector *connector)
+{
+ struct sun4i_lvds *lvds =
+ drm_connector_to_sun4i_lvds(connector);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ return drm_panel_get_modes(tcon->panel);
+}
+
+static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
+ .get_modes = sun4i_lvds_get_modes,
+};
+
+static void
+sun4i_lvds_connector_destroy(struct drm_connector *connector)
+{
+ struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ drm_panel_detach(tcon->panel);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = sun4i_lvds_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
+{
+ struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ DRM_DEBUG_DRIVER("Enabling LVDS output\n");
+
+ if (!IS_ERR(tcon->panel)) {
+ drm_panel_prepare(tcon->panel);
+ drm_panel_enable(tcon->panel);
+ }
+}
+
+static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
+ struct sun4i_tcon *tcon = lvds->tcon;
+
+ DRM_DEBUG_DRIVER("Disabling LVDS output\n");
+
+ if (!IS_ERR(tcon->panel)) {
+ drm_panel_disable(tcon->panel);
+ drm_panel_unprepare(tcon->panel);
+ }
+}
+
+static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
+ .disable = sun4i_lvds_encoder_disable,
+ .enable = sun4i_lvds_encoder_enable,
+};
+
+static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
+{
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ struct sun4i_lvds *lvds;
+ int ret;
+
+ lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+ lvds->tcon = tcon;
+ encoder = &lvds->encoder;
+
+ ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
+ &tcon->panel, &bridge);
+ if (ret) {
+ dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
+ return 0;
+ }
+
+ drm_encoder_helper_add(&lvds->encoder,
+ &sun4i_lvds_enc_helper_funcs);
+ ret = drm_encoder_init(drm,
+ &lvds->encoder,
+ &sun4i_lvds_enc_funcs,
+ DRM_MODE_ENCODER_LVDS,
+ NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
+ goto err_out;
+ }
+
+ /* The LVDS encoder can only work with the TCON channel 0 */
+ lvds->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
+
+ if (tcon->panel) {
+ drm_connector_helper_add(&lvds->connector,
+ &sun4i_lvds_con_helper_funcs);
+ ret = drm_connector_init(drm, &lvds->connector,
+ &sun4i_lvds_con_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't initialise the lvds connector\n");
+ goto err_cleanup_connector;
+ }
+
+ drm_mode_connector_attach_encoder(&lvds->connector,
+ &lvds->encoder);
+
+ ret = drm_panel_attach(tcon->panel, &lvds->connector);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't attach our panel\n");
+ goto err_cleanup_connector;
+ }
+ }
+
+ if (bridge) {
+ ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't attach our bridge\n");
+ goto err_cleanup_connector;
+ }
+ }
+
+ return 0;
+
+err_cleanup_connector:
+ drm_encoder_cleanup(&lvds->encoder);
+err_out:
+ return ret;
+}
+EXPORT_SYMBOL(sun4i_lvds_init);
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.h b/drivers/gpu/drm/sun4i/sun4i_lvds.h
new file mode 100644
index 000000000000..f3e90faa3082
--- /dev/null
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ */
+
+#ifndef _SUN4I_LVDS_H_
+#define _SUN4I_LVDS_H_
+
+int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon);
+
+#endif /* _SUN4I_LVDS_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index a1ed462c2430..a897f82d9e66 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -31,10 +31,52 @@
#include "sun4i_crtc.h"
#include "sun4i_dotclock.h"
#include "sun4i_drv.h"
+#include "sun4i_lvds.h"
#include "sun4i_rgb.h"
#include "sun4i_tcon.h"
#include "sunxi_engine.h"
+static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+
+ drm_connector_list_iter_begin(encoder->dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ if (connector->encoder == encoder) {
+ drm_connector_list_iter_end(&iter);
+ return connector;
+ }
+ drm_connector_list_iter_end(&iter);
+
+ return NULL;
+}
+
+static int sun4i_tcon_get_pixel_depth(const struct drm_encoder *encoder)
+{
+ struct drm_connector *connector;
+ struct drm_display_info *info;
+
+ connector = sun4i_tcon_get_connector(encoder);
+ if (!connector)
+ return -EINVAL;
+
+ info = &connector->display_info;
+ if (info->num_bus_formats != 1)
+ return -EINVAL;
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ return 18;
+
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ return 24;
+ }
+
+ return -EINVAL;
+}
+
static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
bool enabled)
{
@@ -65,13 +107,63 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
clk_disable_unprepare(clk);
}
+static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ bool enabled)
+{
+ if (enabled) {
+ u8 val;
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
+ SUN4I_TCON0_LVDS_IF_EN,
+ SUN4I_TCON0_LVDS_IF_EN);
+
+ /*
+ * As their name suggest, these values only apply to the A31
+ * and later SoCs. We'll have to rework this when merging
+ * support for the older SoCs.
+ */
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_C(2) |
+ SUN6I_TCON0_LVDS_ANA0_V(3) |
+ SUN6I_TCON0_LVDS_ANA0_PD(2) |
+ SUN6I_TCON0_LVDS_ANA0_EN_LDO);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
+
+ if (sun4i_tcon_get_pixel_depth(encoder) == 18)
+ val = 7;
+ else
+ val = 0xf;
+
+ regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+ } else {
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
+ SUN4I_TCON0_LVDS_IF_EN, 0);
+ }
+}
+
void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
+ bool is_lvds = false;
int channel;
switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ is_lvds = true;
+ /* Fallthrough */
case DRM_MODE_ENCODER_NONE:
channel = 0;
break;
@@ -84,10 +176,16 @@ void sun4i_tcon_set_status(struct sun4i_tcon *tcon,
return;
}
+ if (is_lvds && !enabled)
+ sun4i_tcon_lvds_set_status(tcon, encoder, false);
+
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_TCON_ENABLE,
enabled ? SUN4I_TCON_GCTL_TCON_ENABLE : 0);
+ if (is_lvds && enabled)
+ sun4i_tcon_lvds_set_status(tcon, encoder, true);
+
sun4i_tcon_channel_set_status(tcon, channel, enabled);
}
@@ -170,6 +268,75 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
SUN4I_TCON0_BASIC0_Y(mode->crtc_vdisplay));
}
+static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
+ const struct drm_display_mode *mode)
+{
+ unsigned int bp;
+ u8 clk_delay;
+ u32 reg, val = 0;
+
+ tcon->dclk_min_div = 7;
+ tcon->dclk_max_div = 7;
+ sun4i_tcon0_mode_set_common(tcon, mode);
+
+ /* Adjust clock delay */
+ clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_CTL_REG,
+ SUN4I_TCON0_CTL_CLK_DELAY_MASK,
+ SUN4I_TCON0_CTL_CLK_DELAY(clk_delay));
+
+ /*
+ * This is called a backporch in the register documentation,
+ * but it really is the back porch + hsync
+ */
+ bp = mode->crtc_htotal - mode->crtc_hsync_start;
+ DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
+ mode->crtc_htotal, bp);
+
+ /* Set horizontal display timings */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC1_REG,
+ SUN4I_TCON0_BASIC1_H_TOTAL(mode->htotal) |
+ SUN4I_TCON0_BASIC1_H_BACKPORCH(bp));
+
+ /*
+ * This is called a backporch in the register documentation,
+ * but it really is the back porch + hsync
+ */
+ bp = mode->crtc_vtotal - mode->crtc_vsync_start;
+ DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
+ mode->crtc_vtotal, bp);
+
+ /* Set vertical display timings */
+ regmap_write(tcon->regs, SUN4I_TCON0_BASIC2_REG,
+ SUN4I_TCON0_BASIC2_V_TOTAL(mode->crtc_vtotal * 2) |
+ SUN4I_TCON0_BASIC2_V_BACKPORCH(bp));
+
+ reg = SUN4I_TCON0_LVDS_IF_CLK_SEL_TCON0 |
+ SUN4I_TCON0_LVDS_IF_DATA_POL_NORMAL |
+ SUN4I_TCON0_LVDS_IF_CLK_POL_NORMAL;
+ if (sun4i_tcon_get_pixel_depth(encoder) == 24)
+ reg |= SUN4I_TCON0_LVDS_IF_BITWIDTH_24BITS;
+ else
+ reg |= SUN4I_TCON0_LVDS_IF_BITWIDTH_18BITS;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_IF_REG, reg);
+
+ /* Setup the polarity of the various signals */
+ if (!(mode->flags & DRM_MODE_FLAG_PHSYNC))
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (!(mode->flags & DRM_MODE_FLAG_PVSYNC))
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
+
+ /* Map output pins to channel 0 */
+ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
+ SUN4I_TCON_GCTL_IOMAP_MASK,
+ SUN4I_TCON_GCTL_IOMAP_TCON0);
+}
+
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
@@ -177,6 +344,8 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
u8 clk_delay;
u32 val = 0;
+ tcon->dclk_min_div = 6;
+ tcon->dclk_max_div = 127;
sun4i_tcon0_mode_set_common(tcon, mode);
/* Adjust clock delay */
@@ -334,6 +503,9 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
switch (encoder->encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
+ break;
case DRM_MODE_ENCODER_NONE:
sun4i_tcon0_mode_set_rgb(tcon, mode);
sun4i_tcon_set_mux(tcon, 0, encoder);
@@ -665,7 +837,9 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sunxi_engine *engine;
+ struct device_node *remote;
struct sun4i_tcon *tcon;
+ bool has_lvds_rst, has_lvds_alt, can_lvds;
int ret;
engine = sun4i_tcon_find_engine(drv, dev->of_node);
@@ -696,6 +870,54 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return ret;
}
+ /*
+ * This can only be made optional since we've had DT nodes
+ * without the LVDS reset properties.
+ *
+ * If the property is missing, just disable LVDS, and print a
+ * warning.
+ */
+ tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
+ if (IS_ERR(tcon->lvds_rst)) {
+ dev_err(dev, "Couldn't get our reset line\n");
+ return PTR_ERR(tcon->lvds_rst);
+ } else if (tcon->lvds_rst) {
+ has_lvds_rst = true;
+ reset_control_reset(tcon->lvds_rst);
+ } else {
+ has_lvds_rst = false;
+ }
+
+ /*
+ * This can only be made optional since we've had DT nodes
+ * without the LVDS reset properties.
+ *
+ * If the property is missing, just disable LVDS, and print a
+ * warning.
+ */
+ if (tcon->quirks->has_lvds_alt) {
+ tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
+ if (IS_ERR(tcon->lvds_pll)) {
+ if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
+ has_lvds_alt = false;
+ } else {
+ dev_err(dev, "Couldn't get the LVDS PLL\n");
+ return PTR_ERR(tcon->lvds_rst);
+ }
+ } else {
+ has_lvds_alt = true;
+ }
+ }
+
+ if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
+ dev_warn(dev,
+ "Missing LVDS properties, Please upgrade your DT\n");
+ dev_warn(dev, "LVDS output disabled\n");
+ can_lvds = false;
+ } else {
+ can_lvds = true;
+ }
+
ret = sun4i_tcon_init_clocks(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON clocks\n");
@@ -727,7 +949,21 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
goto err_free_clocks;
}
- ret = sun4i_rgb_init(drm, tcon);
+ /*
+ * If we have an LVDS panel connected to the TCON, we should
+ * just probe the LVDS connector. Otherwise, just probe RGB as
+ * we used to.
+ */
+ remote = of_graph_get_remote_node(dev->of_node, 1, 0);
+ if (of_device_is_compatible(remote, "panel-lvds"))
+ if (can_lvds)
+ ret = sun4i_lvds_init(drm, tcon);
+ else
+ ret = -EINVAL;
+ else
+ ret = sun4i_rgb_init(drm, tcon);
+ of_node_put(remote);
+
if (ret < 0)
goto err_free_clocks;
@@ -877,6 +1113,7 @@ static const struct sun4i_tcon_quirks sun5i_a13_quirks = {
static const struct sun4i_tcon_quirks sun6i_a31_quirks = {
.has_channel_1 = true,
+ .has_lvds_alt = true,
.needs_de_be_mux = true,
.set_mux = sun6i_tcon_set_mux,
};
@@ -893,6 +1130,10 @@ static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
};
static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
+ .has_lvds_alt = true,
+};
+
+static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
/* nothing is supported */
};
@@ -908,6 +1149,7 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
+ { .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
{ }
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 839266a38505..b761c7b823c5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -70,7 +70,21 @@
#define SUN4I_TCON0_TTL2_REG 0x78
#define SUN4I_TCON0_TTL3_REG 0x7c
#define SUN4I_TCON0_TTL4_REG 0x80
+
#define SUN4I_TCON0_LVDS_IF_REG 0x84
+#define SUN4I_TCON0_LVDS_IF_EN BIT(31)
+#define SUN4I_TCON0_LVDS_IF_BITWIDTH_MASK BIT(26)
+#define SUN4I_TCON0_LVDS_IF_BITWIDTH_18BITS (1 << 26)
+#define SUN4I_TCON0_LVDS_IF_BITWIDTH_24BITS (0 << 26)
+#define SUN4I_TCON0_LVDS_IF_CLK_SEL_MASK BIT(20)
+#define SUN4I_TCON0_LVDS_IF_CLK_SEL_TCON0 (1 << 20)
+#define SUN4I_TCON0_LVDS_IF_CLK_POL_MASK BIT(4)
+#define SUN4I_TCON0_LVDS_IF_CLK_POL_NORMAL (1 << 4)
+#define SUN4I_TCON0_LVDS_IF_CLK_POL_INV (0 << 4)
+#define SUN4I_TCON0_LVDS_IF_DATA_POL_MASK GENMASK(3, 0)
+#define SUN4I_TCON0_LVDS_IF_DATA_POL_NORMAL (0xf)
+#define SUN4I_TCON0_LVDS_IF_DATA_POL_INV (0)
+
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
@@ -131,6 +145,16 @@
#define SUN4I_TCON_CEU_RANGE_G_REG 0x144
#define SUN4I_TCON_CEU_RANGE_B_REG 0x148
#define SUN4I_TCON_MUX_CTRL_REG 0x200
+
+#define SUN4I_TCON0_LVDS_ANA0_REG 0x220
+#define SUN6I_TCON0_LVDS_ANA0_EN_MB BIT(31)
+#define SUN6I_TCON0_LVDS_ANA0_EN_LDO BIT(30)
+#define SUN6I_TCON0_LVDS_ANA0_EN_DRVC BIT(24)
+#define SUN6I_TCON0_LVDS_ANA0_EN_DRVD(x) (((x) & 0xf) << 20)
+#define SUN6I_TCON0_LVDS_ANA0_C(x) (((x) & 3) << 17)
+#define SUN6I_TCON0_LVDS_ANA0_V(x) (((x) & 3) << 8)
+#define SUN6I_TCON0_LVDS_ANA0_PD(x) (((x) & 3) << 4)
+
#define SUN4I_TCON1_FILL_CTL_REG 0x300
#define SUN4I_TCON1_FILL_BEG0_REG 0x304
#define SUN4I_TCON1_FILL_END0_REG 0x308
@@ -149,6 +173,7 @@ struct sun4i_tcon;
struct sun4i_tcon_quirks {
bool has_channel_1; /* a33 does not have channel 1 */
+ bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
bool needs_de_be_mux; /* sun6i needs mux to select backend */
/* callback to handle tcon muxing options */
@@ -167,11 +192,17 @@ struct sun4i_tcon {
struct clk *sclk0;
struct clk *sclk1;
+ /* Possible mux for the LVDS clock */
+ struct clk *lvds_pll;
+
/* Pixel clock */
struct clk *dclk;
+ u8 dclk_max_div;
+ u8 dclk_min_div;
/* Reset control */
struct reset_control *lcd_rst;
+ struct reset_control *lvds_rst;
struct drm_panel *panel;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 29ceeb016d72..2cbb2de6d39c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -398,6 +398,15 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
ret = PTR_ERR(mixer->mod_clk);
goto err_disable_bus_clk;
}
+
+ /*
+ * It seems that we need to enforce that rate for whatever
+ * reason for the mixer to be functional. Make sure it's the
+ * case.
+ */
+ if (mixer->cfg->mod_rate)
+ clk_set_rate(mixer->mod_clk, mixer->cfg->mod_rate);
+
clk_prepare_enable(mixer->mod_clk);
list_add_tail(&mixer->engine.list, &drv->engine_list);
@@ -469,15 +478,27 @@ static int sun8i_mixer_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
+ .ccsc = 0,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
.ccsc = 0,
+ .mod_rate = 150000000,
};
static const struct of_device_id sun8i_mixer_of_table[] = {
{
+ .compatible = "allwinner,sun8i-a83t-de2-mixer-0",
+ .data = &sun8i_a83t_mixer0_cfg,
+ },
+ {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index bc58040a88f9..f34e70c42adf 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -121,12 +121,15 @@ struct de2_fmt_info {
* Set value to 0 if this is first mixer or second mixer with VEP support.
* Set value to 1 if this is second mixer without VEP support. Other values
* are invalid.
+ * @mod_rate: module clock rate that needs to be set in order to have
+ * a functional block.
*/
struct sun8i_mixer_cfg {
int vi_num;
int ui_num;
int scaler_mask;
int ccsc;
+ unsigned long mod_rate;
};
struct sun8i_mixer {
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ff931d3508a9..d50bddb2e447 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -88,7 +88,7 @@ static void tegra_atomic_state_free(struct drm_atomic_state *state)
static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
.fb_create = tegra_fb_create,
#ifdef CONFIG_DRM_FBDEV_EMULATION
- .output_poll_changed = tegra_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
#endif
.atomic_check = tegra_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
@@ -287,15 +287,6 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
kfree(context);
}
-static void tegra_drm_lastclose(struct drm_device *drm)
-{
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- struct tegra_drm *tegra = drm->dev_private;
-
- tegra_fbdev_restore_mode(tegra->fbdev);
-#endif
-}
-
static struct host1x_bo *
host1x_bo_lookup(struct drm_file *file, u32 handle)
{
@@ -1102,7 +1093,7 @@ static struct drm_driver tegra_drm_driver = {
.unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
- .lastclose = tegra_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = tegra_debugfs_init,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index da3d8c141aee..73b661ce7086 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -203,10 +203,6 @@ int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
void tegra_drm_fb_suspend(struct drm_device *drm);
void tegra_drm_fb_resume(struct drm_device *drm);
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
-void tegra_fb_output_poll_changed(struct drm_device *drm);
-#endif
extern struct platform_driver tegra_display_hub_driver;
extern struct platform_driver tegra_dc_driver;
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index e05fde7172f8..001cb77e2f59 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -385,20 +385,6 @@ static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
drm_fb_helper_fini(&fbdev->base);
tegra_fbdev_free(fbdev);
}
-
-void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
-{
- if (fbdev)
- drm_fb_helper_restore_fbdev_mode_unlocked(&fbdev->base);
-}
-
-void tegra_fb_output_poll_changed(struct drm_device *drm)
-{
- struct tegra_drm *tegra = drm->dev_private;
-
- if (tegra->fbdev)
- drm_fb_helper_hotplug_event(&tegra->fbdev->base);
-}
#endif
int tegra_drm_fb_prepare(struct drm_device *drm)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 6ef4d1a1e3a9..8bf6bb93dc79 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -51,12 +51,8 @@ struct tilcdc_crtc {
ktime_t last_vblank;
unsigned int hvtotal_us;
- struct drm_framebuffer *curr_fb;
struct drm_framebuffer *next_fb;
- /* for deferred fb unref's: */
- struct drm_flip_work unref_work;
-
/* Only set if an external encoder is connected */
bool simulate_vesa_sync;
@@ -70,20 +66,8 @@ struct tilcdc_crtc {
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
-static void unref_worker(struct drm_flip_work *work, void *val)
-{
- struct tilcdc_crtc *tilcdc_crtc =
- container_of(work, struct tilcdc_crtc, unref_work);
- struct drm_device *dev = tilcdc_crtc->base.dev;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_framebuffer_put(val);
- mutex_unlock(&dev->mode_config.mutex);
-}
-
static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_gem_cma_object *gem;
@@ -108,12 +92,6 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
dma_base_and_ceiling = (u64)end << 32 | start;
tilcdc_write64(dev, LCDC_DMA_FB_BASE_ADDR_0_REG, dma_base_and_ceiling);
-
- if (tilcdc_crtc->curr_fb)
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
-
- tilcdc_crtc->curr_fb = fb;
}
/*
@@ -294,7 +272,7 @@ static void tilcdc_crtc_set_clk(struct drm_crtc *crtc)
LCDC_V2_CORE_CLK_EN);
}
-uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
+static uint tilcdc_mode_hvtotal(const struct drm_display_mode *mode)
{
return (uint) div_u64(1000llu * mode->htotal * mode->vtotal,
mode->clock);
@@ -464,8 +442,6 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
set_scanout(crtc, fb);
- drm_framebuffer_get(fb);
-
crtc->hwmode = crtc->state->adjusted_mode;
tilcdc_crtc->hvtotal_us =
@@ -524,7 +500,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
int ret;
mutex_lock(&tilcdc_crtc->enable_lock);
@@ -554,20 +529,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
pm_runtime_put_sync(dev->dev);
- if (tilcdc_crtc->next_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->next_fb);
- tilcdc_crtc->next_fb = NULL;
- }
-
- if (tilcdc_crtc->curr_fb) {
- drm_flip_work_queue(&tilcdc_crtc->unref_work,
- tilcdc_crtc->curr_fb);
- tilcdc_crtc->curr_fb = NULL;
- }
-
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
}
@@ -614,7 +575,6 @@ out:
static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
{
- struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
tilcdc_crtc_shutdown(crtc);
@@ -623,7 +583,6 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
of_node_put(crtc->port);
drm_crtc_cleanup(crtc);
- drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
}
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
@@ -638,9 +597,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
return -EBUSY;
}
- drm_framebuffer_get(fb);
-
- crtc->primary->fb = fb;
tilcdc_crtc->event = event;
mutex_lock(&tilcdc_crtc->enable_lock);
@@ -936,8 +892,6 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
now = ktime_get();
- drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
-
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
tilcdc_crtc->last_vblank = now;
@@ -1064,9 +1018,6 @@ int tilcdc_crtc_create(struct drm_device *dev)
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
- drm_flip_work_init(&tilcdc_crtc->unref_work,
- "unref", unref_worker);
-
spin_lock_init(&tilcdc_crtc->irq_lock);
INIT_WORK(&tilcdc_crtc->recover_work, tilcdc_crtc_recover_work);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index bc4feb3a84b9..1afde61f1247 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -69,12 +69,6 @@ static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
-static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
- drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
static int tilcdc_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -146,7 +140,7 @@ static int tilcdc_commit(struct drm_device *dev,
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = tilcdc_fb_create,
- .output_poll_changed = tilcdc_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = tilcdc_atomic_check,
.atomic_commit = tilcdc_commit,
};
@@ -198,8 +192,7 @@ static void tilcdc_fini(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
- if (priv->fbdev)
- drm_fbdev_cma_fini(priv->fbdev);
+ drm_fb_cma_fbdev_fini(dev);
drm_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
@@ -405,12 +398,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
drm_mode_config_reset(ddev);
- priv->fbdev = drm_fbdev_cma_init(ddev, bpp,
- ddev->mode_config.num_connector);
- if (IS_ERR(priv->fbdev)) {
- ret = PTR_ERR(priv->fbdev);
+ ret = drm_fb_cma_fbdev_init(ddev, bpp, 0);
+ if (ret)
goto init_failed;
- }
drm_kms_helper_poll_init(ddev);
@@ -427,12 +417,6 @@ init_failed:
return ret;
}
-static void tilcdc_lastclose(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
static irqreturn_t tilcdc_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -537,7 +521,7 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
DRIVER_PRIME | DRIVER_ATOMIC),
- .lastclose = tilcdc_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = tilcdc_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_print_info = drm_gem_cma_print_info,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 8caa11bc7aec..ead512216669 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -79,8 +79,6 @@ struct tilcdc_drm_private {
struct workqueue_struct *wq;
- struct drm_fbdev_cma *fbdev;
-
struct drm_crtc *crtc;
unsigned int num_encoders;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
index 9d528c0a67a4..5048ebb86835 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_regs.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -133,7 +133,7 @@ static inline void tilcdc_write64(struct drm_device *dev, u32 reg, u64 data)
struct tilcdc_drm_private *priv = dev->dev_private;
volatile void __iomem *addr = priv->mmio + reg;
-#ifdef iowrite64
+#if defined(iowrite64) && !defined(iowrite64_is_nonatomic)
iowrite64(data, addr);
#else
__iowmb();
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 90c5bd5ef81b..b0e567d416b3 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -52,3 +52,13 @@ config TINYDRM_ST7586
* LEGO MINDSTORMS EV3
If M is selected the module will be called st7586.
+
+config TINYDRM_ST7735R
+ tristate "DRM support for Sitronix ST7735R display panels"
+ depends on DRM_TINYDRM && SPI
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver Sitronix ST7735R with one of the following LCDs:
+ * JD-T18003-T01 1.8" 128x160 TFT
+
+ If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 8aeee532474f..49a111929724 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
+obj-$(CONFIG_TINYDRM_ST7735R) += st7735r.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index bd7b82824a34..4c6616278c48 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <linux/device.h>
@@ -36,23 +37,6 @@
*/
/**
- * tinydrm_lastclose - DRM lastclose helper
- * @drm: DRM device
- *
- * This function ensures that fbdev is restored when drm_lastclose() is called
- * on the last drm_release(). Drivers can use this as their
- * &drm_driver->lastclose callback.
- */
-void tinydrm_lastclose(struct drm_device *drm)
-{
- struct tinydrm_device *tdev = drm->dev_private;
-
- DRM_DEBUG_KMS("\n");
- drm_fbdev_cma_restore_mode(tdev->fbdev_cma);
-}
-EXPORT_SYMBOL(tinydrm_lastclose);
-
-/**
* tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
* another driver's scatter/gather table of pinned pages
* @drm: DRM device to import into
@@ -214,35 +198,24 @@ EXPORT_SYMBOL(devm_tinydrm_init);
static int tinydrm_register(struct tinydrm_device *tdev)
{
struct drm_device *drm = tdev->drm;
- int bpp = drm->mode_config.preferred_depth;
- struct drm_fbdev_cma *fbdev;
int ret;
ret = drm_dev_register(tdev->drm, 0);
if (ret)
return ret;
- fbdev = drm_fbdev_cma_init_with_funcs(drm, bpp ? bpp : 32,
- drm->mode_config.num_connector,
- tdev->fb_funcs);
- if (IS_ERR(fbdev))
- DRM_ERROR("Failed to initialize fbdev: %ld\n", PTR_ERR(fbdev));
- else
- tdev->fbdev_cma = fbdev;
+ ret = drm_fb_cma_fbdev_init_with_funcs(drm, 0, 0, tdev->fb_funcs);
+ if (ret)
+ DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
return 0;
}
static void tinydrm_unregister(struct tinydrm_device *tdev)
{
- struct drm_fbdev_cma *fbdev_cma = tdev->fbdev_cma;
-
drm_atomic_helper_shutdown(tdev->drm);
- /* don't restore fbdev in lastclose, keep pipeline disabled */
- tdev->fbdev_cma = NULL;
+ drm_fb_cma_fbdev_fini(tdev->drm);
drm_dev_unregister(tdev->drm);
- if (fbdev_cma)
- drm_fbdev_cma_fini(fbdev_cma);
}
static void devm_tinydrm_register_release(void *data)
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 3b766a26aa61..c0cf49849302 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,6 +20,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -381,7 +382,7 @@ static struct drm_driver ili9225_driver = {
DRIVER_ATOMIC,
.fops = &ili9225_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = tinydrm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
@@ -390,13 +391,13 @@ static struct drm_driver ili9225_driver = {
};
static const struct of_device_id ili9225_of_match[] = {
- { .compatible = "ilitek,ili9225-2.2in-176x220" },
+ { .compatible = "vot,v220hf01a-t" },
{},
};
MODULE_DEVICE_TABLE(of, ili9225_of_match);
static const struct spi_device_id ili9225_id[] = {
- { "ili9225-2.2in-176x220", 0 },
+ { "v220hf01a-t", 0 },
{ },
};
MODULE_DEVICE_TABLE(spi, ili9225_id);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 70ae4f76f455..674d407640be 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -9,6 +9,7 @@
* (at your option) any later version.
*/
+#include <drm/drm_fb_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/ili9341.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -140,7 +141,7 @@ static struct drm_driver mi0283qt_driver = {
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = tinydrm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 0a2c60da5c0e..5aebfceb740e 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,6 +17,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -320,7 +321,7 @@ static struct drm_driver st7586_driver = {
DRIVER_ATOMIC,
.fops = &st7586_fops,
TINYDRM_GEM_DRIVER_OPS,
- .lastclose = tinydrm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
new file mode 100644
index 000000000000..98ff447f40b4
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Sitronix ST7735R panels
+ *
+ * Copyright 2017 David Lechner <david@lechnology.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+
+#define ST7735R_FRMCTR1 0xb1
+#define ST7735R_FRMCTR2 0xb2
+#define ST7735R_FRMCTR3 0xb3
+#define ST7735R_INVCTR 0xb4
+#define ST7735R_PWCTR1 0xc0
+#define ST7735R_PWCTR2 0xc1
+#define ST7735R_PWCTR3 0xc2
+#define ST7735R_PWCTR4 0xc3
+#define ST7735R_PWCTR5 0xc4
+#define ST7735R_VMCTR1 0xc5
+#define ST7735R_GAMCTRP1 0xe0
+#define ST7735R_GAMCTRN1 0xe1
+
+#define ST7735R_MY BIT(7)
+#define ST7735R_MX BIT(6)
+#define ST7735R_MV BIT(5)
+
+static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct device *dev = tdev->drm->dev;
+ int ret;
+ u8 addr_mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ mipi_dbi_hw_reset(mipi);
+
+ ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
+ return;
+ }
+
+ msleep(150);
+
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(500);
+
+ mipi_dbi_command(mipi, ST7735R_FRMCTR1, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(mipi, ST7735R_FRMCTR2, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(mipi, ST7735R_FRMCTR3, 0x01, 0x2c, 0x2d, 0x01, 0x2c,
+ 0x2d);
+ mipi_dbi_command(mipi, ST7735R_INVCTR, 0x07);
+ mipi_dbi_command(mipi, ST7735R_PWCTR1, 0xa2, 0x02, 0x84);
+ mipi_dbi_command(mipi, ST7735R_PWCTR2, 0xc5);
+ mipi_dbi_command(mipi, ST7735R_PWCTR3, 0x0a, 0x00);
+ mipi_dbi_command(mipi, ST7735R_PWCTR4, 0x8a, 0x2a);
+ mipi_dbi_command(mipi, ST7735R_PWCTR5, 0x8a, 0xee);
+ mipi_dbi_command(mipi, ST7735R_VMCTR1, 0x0e);
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
+ switch (mipi->rotation) {
+ default:
+ addr_mode = ST7735R_MX | ST7735R_MY;
+ break;
+ case 90:
+ addr_mode = ST7735R_MX | ST7735R_MV;
+ break;
+ case 180:
+ addr_mode = 0;
+ break;
+ case 270:
+ addr_mode = ST7735R_MY | ST7735R_MV;
+ break;
+ }
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ MIPI_DCS_PIXEL_FMT_16BIT);
+ mipi_dbi_command(mipi, ST7735R_GAMCTRP1, 0x02, 0x1c, 0x07, 0x12, 0x37,
+ 0x32, 0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01,
+ 0x03, 0x10);
+ mipi_dbi_command(mipi, ST7735R_GAMCTRN1, 0x03, 0x1d, 0x07, 0x06, 0x2e,
+ 0x2c, 0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00,
+ 0x02, 0x10);
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+
+ msleep(100);
+
+ mipi_dbi_command(mipi, MIPI_DCS_ENTER_NORMAL_MODE);
+
+ msleep(20);
+
+ mipi_dbi_pipe_enable(pipe, crtc_state);
+}
+
+static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
+ .enable = jd_t18003_t01_pipe_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = tinydrm_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode jd_t18003_t01_mode = {
+ TINYDRM_MODE(128, 160, 28, 35),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
+
+static struct drm_driver st7735r_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
+ DRIVER_ATOMIC,
+ .fops = &st7735r_fops,
+ TINYDRM_GEM_DRIVER_OPS,
+ .lastclose = drm_fb_helper_lastclose,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "st7735r",
+ .desc = "Sitronix ST7735R",
+ .date = "20171128",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id st7735r_of_match[] = {
+ { .compatible = "jianda,jd-t18003-t01" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, st7735r_of_match);
+
+static const struct spi_device_id st7735r_id[] = {
+ { "jd-t18003-t01", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, st7735r_id);
+
+static int st7735r_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(mipi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(mipi->reset);
+ }
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->backlight = tinydrm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ /* Cannot read from Adafruit 1.8" display via SPI */
+ mipi->read_commands = NULL;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs,
+ &st7735r_driver, &jd_t18003_t01_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void st7735r_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver st7735r_spi_driver = {
+ .driver = {
+ .name = "st7735r",
+ .owner = THIS_MODULE,
+ .of_match_table = st7735r_of_match,
+ },
+ .id_table = st7735r_id,
+ .probe = st7735r_probe,
+ .shutdown = st7735r_shutdown,
+};
+module_spi_driver(st7735r_spi_driver);
+
+MODULE_DESCRIPTION("Sitronix ST7735R DRM driver");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 028ab6007873..3e795a099d06 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -133,12 +133,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_agp_tt_create);
-int ttm_agp_tt_populate(struct ttm_tt *ttm)
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
if (ttm->state != tt_unpopulated)
return 0;
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
EXPORT_SYMBOL(ttm_agp_tt_populate);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 97c3da6d5f17..2eb71ffe95a6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -42,11 +42,6 @@
#include <linux/atomic.h>
#include <linux/reservation.h>
-#define TTM_ASSERT_LOCKED(param)
-#define TTM_DEBUG(fmt, arg...)
-#define TTM_BO_HASH_ORDER 13
-
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
static struct attribute ttm_bo_count = {
@@ -165,7 +160,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -216,7 +211,7 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
@@ -233,7 +228,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
int ret = 0;
uint32_t page_flags = 0;
- TTM_ASSERT_LOCKED(&bo->mutex);
+ reservation_object_assert_held(bo->resv);
bo->ttm = NULL;
if (bdev->need_dma32)
@@ -305,7 +300,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
goto out_err;
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(bo->ttm, mem);
+ ret = ttm_tt_bind(bo->ttm, mem, ctx);
if (ret)
goto out_err;
}
@@ -324,13 +319,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
- ret = ttm_bo_move_ttm(bo, ctx->interruptible,
- ctx->no_wait_gpu, mem);
+ ret = ttm_bo_move_ttm(bo, ctx, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, ctx, mem);
else
- ret = ttm_bo_move_memcpy(bo, ctx->interruptible,
- ctx->no_wait_gpu, mem);
+ ret = ttm_bo_move_memcpy(bo, ctx, mem);
if (ret) {
if (bdev->driver->move_notify) {
@@ -588,12 +581,19 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
ddestroy);
kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
- spin_unlock(&glob->lru_lock);
- reservation_object_lock(bo->resv, NULL);
+ if (remove_all || bo->resv != &bo->ttm_resv) {
+ spin_unlock(&glob->lru_lock);
+ reservation_object_lock(bo->resv, NULL);
- spin_lock(&glob->lru_lock);
- ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ spin_lock(&glob->lru_lock);
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+
+ } else if (reservation_object_trylock(bo->resv)) {
+ ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ } else {
+ spin_unlock(&glob->lru_lock);
+ }
kref_put(&bo->list_kref, ttm_bo_release_list);
spin_lock(&glob->lru_lock);
@@ -662,7 +662,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int ret = 0;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -708,8 +708,35 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_eviction_valuable);
+/**
+ * Check the target bo is allowable to be evicted or swapout, including cases:
+ *
+ * a. if share same reservation object with ctx->resv, have assumption
+ * reservation objects should already be locked, so not lock again and
+ * return true directly when either the opreation allow_reserved_eviction
+ * or the target bo already is in delayed free list;
+ *
+ * b. Otherwise, trylock it.
+ */
+static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx, bool *locked)
+{
+ bool ret = false;
+
+ *locked = false;
+ if (bo->resv == ctx->resv) {
+ reservation_object_assert_held(bo->resv);
+ if (ctx->allow_reserved_eviction || !list_empty(&bo->ddestroy))
+ ret = true;
+ } else {
+ *locked = reservation_object_trylock(bo->resv);
+ ret = *locked;
+ }
+
+ return ret;
+}
+
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
- struct reservation_object *resv,
uint32_t mem_type,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx)
@@ -724,14 +751,8 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
- if (bo->resv == resv) {
- if (list_empty(&bo->ddestroy))
- continue;
- } else {
- locked = reservation_object_trylock(bo->resv);
- if (!locked)
- continue;
- }
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked))
+ continue;
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
@@ -836,7 +857,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, bo->resv, mem_type, place, ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, place, ctx);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -1018,7 +1039,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
int ret = 0;
struct ttm_mem_reg mem;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1088,7 +1109,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
int ret;
uint32_t new_flags;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
/*
* Check whether we need to move buffer.
*/
@@ -1134,7 +1155,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
bool locked;
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
if (ret) {
pr_err("Out of kernel memory\n");
if (destroy)
@@ -1182,7 +1203,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->sg = sg;
if (resv) {
bo->resv = resv;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->resv);
} else {
bo->resv = &bo->ttm_resv;
}
@@ -1204,7 +1225,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
- locked = ww_mutex_trylock(&bo->resv->lock);
+ locked = reservation_object_trylock(bo->resv);
WARN_ON(!locked);
}
@@ -1333,8 +1354,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, NULL, mem_type,
- NULL, &ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@@ -1455,7 +1475,6 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
- ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
__free_page(glob->dummy_read_page);
kfree(glob);
}
@@ -1480,6 +1499,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
mutex_init(&glob->device_list_mutex);
spin_lock_init(&glob->lru_lock);
glob->mem_glob = bo_ref->mem_glob;
+ glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1490,14 +1510,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&glob->swap_lru[i]);
INIT_LIST_HEAD(&glob->device_list);
-
- ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
- ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
- if (unlikely(ret != 0)) {
- pr_err("Could not register buffer object swapout\n");
- goto out_no_shrink;
- }
-
atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add(
@@ -1505,8 +1517,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
return ret;
-out_no_shrink:
- __free_page(glob->dummy_read_page);
out_no_drp:
kfree(glob);
return ret;
@@ -1541,12 +1551,12 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
cancel_delayed_work_sync(&bdev->wq);
if (ttm_bo_delayed_delete(bdev, true))
- TTM_DEBUG("Delayed destroy list was clean\n");
+ pr_debug("Delayed destroy list was clean\n");
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&bdev->man[0].lru[0]))
- TTM_DEBUG("Swap list %d was clean\n", i);
+ pr_debug("Swap list %d was clean\n", i);
spin_unlock(&glob->lru_lock);
drm_vma_offset_manager_destroy(&bdev->vma_manager);
@@ -1689,21 +1699,20 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
-
-static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_global *glob =
- container_of(shrink, struct ttm_bo_global, shrink);
struct ttm_buffer_object *bo;
int ret = -EBUSY;
+ bool locked;
unsigned i;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
- if (!ret)
+ if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
+ ret = 0;
break;
+ }
}
if (!ret)
break;
@@ -1775,10 +1784,16 @@ out:
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
+EXPORT_SYMBOL(ttm_bo_swapout);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
{
- while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
+ while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index e7a519f1849b..153de1bf0232 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,7 +45,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
@@ -53,7 +53,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
int ret;
if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
@@ -73,7 +73,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
return ret;
if (new_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_bind(ttm, new_mem);
+ ret = ttm_tt_bind(ttm, new_mem, ctx);
if (unlikely(ret != 0))
return ret;
}
@@ -329,7 +329,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait_gpu,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -345,7 +345,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
unsigned long add = 0;
int dir;
- ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
+ ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
if (ret)
return ret;
@@ -376,7 +376,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* TTM might be null for moves within the same region.
*/
if (ttm && ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
if (ret)
goto out1;
}
@@ -485,7 +485,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->acc_size = 0;
fbo->resv = &fbo->ttm_resv;
reservation_object_init(fbo->resv);
- ret = ww_mutex_trylock(&fbo->resv->lock);
+ ret = reservation_object_trylock(fbo->resv);
WARN_ON(!ret);
*new_obj = fbo;
@@ -545,14 +545,19 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+ struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_tt *ttm = bo->ttm;
+ pgprot_t prot;
int ret;
BUG_ON(!ttm);
if (ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c8ebb757e36b..8e68e70b02b9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -92,6 +92,17 @@ out_unlock:
return ret;
}
+static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ if (bdev->driver->io_mem_pfn)
+ return bdev->driver->io_mem_pfn(bo, page_offset);
+
+ return ttm_bo_default_io_mem_pfn(bo, page_offset);
+}
+
static int ttm_bo_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
@@ -215,12 +226,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
} else {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
ttm = bo->ttm;
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
/* Allocate all page at once, most common usage */
- if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
}
@@ -234,7 +250,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
if (bo->mem.bus.is_iomem) {
/* Iomem should not be marked encrypted */
cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
- pfn = bdev->driver->io_mem_pfn(bo, page_offset);
+ pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e96374990398..aa0c38136958 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -211,35 +211,33 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
*/
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
- uint64_t extra)
+ uint64_t extra, struct ttm_operation_ctx *ctx)
{
int ret;
- struct ttm_mem_shrink *shrink;
spin_lock(&glob->lock);
- if (glob->shrink == NULL)
- goto out;
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
- shrink = glob->shrink;
spin_unlock(&glob->lock);
- ret = shrink->do_shrink(shrink);
+ ret = ttm_bo_swapout(glob->bo_glob, ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
- goto out;
+ break;
}
-out:
+
spin_unlock(&glob->lock);
}
-
-
static void ttm_shrink_work(struct work_struct *work)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_mem_global *glob =
container_of(work, struct ttm_mem_global, work);
- ttm_shrink(glob, true, 0ULL);
+ ttm_shrink(glob, true, 0ULL, &ctx);
}
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
@@ -514,7 +512,7 @@ out_unlock:
static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
struct ttm_mem_zone *single_zone,
uint64_t memory,
- bool no_wait, bool interruptible)
+ struct ttm_operation_ctx *ctx)
{
int count = TTM_MEMORY_ALLOC_RETRIES;
@@ -522,33 +520,32 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
single_zone,
memory, true)
!= 0)) {
- if (no_wait)
+ if (ctx->no_wait_gpu)
return -ENOMEM;
if (unlikely(count-- == 0))
return -ENOMEM;
- ttm_shrink(glob, false, memory + (memory >> 2) + 16);
+ ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
}
return 0;
}
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- bool no_wait, bool interruptible)
+ struct ttm_operation_ctx *ctx)
{
/**
* Normal allocations of kernel memory are registered in
* all zones.
*/
- return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
- interruptible);
+ return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size)
+ struct page *page, uint64_t size,
+ struct ttm_operation_ctx *ctx)
{
-
struct ttm_mem_zone *zone = NULL;
/**
@@ -563,7 +560,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
zone = glob->zone_kernel;
#endif
- return ttm_mem_global_alloc_zone(glob, zone, size, false, false);
+ return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
}
void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 26a7ad0f4789..1aa2baa83959 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -325,6 +325,10 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_ref_object *ref;
struct drm_hash_item *hash;
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret = -EINVAL;
if (base->tfile != tfile && !base->shareable)
@@ -350,7 +354,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
return -EPERM;
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
- false, false);
+ &ctx);
if (unlikely(ret != 0))
return ret;
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
@@ -686,7 +690,10 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
dma_buf = prime->dma_buf;
if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
exp_info.ops = &tdev->ops;
exp_info.size = prime->size;
exp_info.flags = flags;
@@ -696,7 +703,7 @@ int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
* Need to create a new dma_buf, with memory accounting.
*/
ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
- false, true);
+ &ctx);
if (unlikely(ret != 0)) {
mutex_unlock(&prime->mutex);
goto out_unref;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 44343a2bf55c..f1a3d55ead83 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed += (nr_free_pool - shrink_pages) << pool->order;
if (freed >= sc->nr_to_scan)
break;
+ shrink_pages <<= pool->order;
}
mutex_unlock(&lock);
return freed;
@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int r = 0;
unsigned i, j, cpages;
unsigned npages = 1 << order;
- unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC);
+ unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
@@ -1057,7 +1058,7 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
-int ttm_pool_populate(struct ttm_tt *ttm)
+int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
unsigned i;
@@ -1075,7 +1076,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- PAGE_SIZE);
+ PAGE_SIZE, ctx);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
@@ -1112,12 +1113,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+ struct ttm_operation_ctx *ctx)
{
unsigned i, j;
int r;
- r = ttm_pool_populate(&tt->ttm);
+ r = ttm_pool_populate(&tt->ttm, ctx);
if (r)
return r;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bda00b2ab51c..3ac53918881e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -923,7 +923,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
@@ -962,7 +963,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
@@ -998,7 +999,7 @@ skip_huge:
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 8ebc8d3560c3..5a046a3c543a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -261,7 +261,8 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
}
}
-int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem,
+ struct ttm_operation_ctx *ctx)
{
int ret = 0;
@@ -271,7 +272,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound)
return 0;
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tve200/tve200_drm.h b/drivers/gpu/drm/tve200/tve200_drm.h
index 628b79324c48..5c270055bd58 100644
--- a/drivers/gpu/drm/tve200/tve200_drm.h
+++ b/drivers/gpu/drm/tve200/tve200_drm.h
@@ -103,7 +103,6 @@ struct tve200_drm_dev_private {
struct drm_panel *panel;
struct drm_bridge *bridge;
struct drm_simple_display_pipe pipe;
- struct drm_fbdev_cma *fbdev;
void *regs;
struct clk *pclk;
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index bd6c9454d767..44911d921864 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -46,6 +46,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_of.h>
@@ -130,8 +131,7 @@ static int tve200_modeset_init(struct drm_device *dev)
* Passing in 16 here will make the RGB656 mode the default
* Passing in 32 will use XRGB8888 mode
*/
- priv->fbdev = drm_fbdev_cma_init(dev, 16,
- dev->mode_config.num_connector);
+ drm_fb_cma_fbdev_init(dev, 16, 0);
drm_kms_helper_poll_init(dev);
goto finish;
@@ -146,17 +146,10 @@ finish:
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
-static void tve200_lastclose(struct drm_device *dev)
-{
- struct tve200_drm_dev_private *priv = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
static struct drm_driver tve200_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = tve200_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
.name = "tve200",
@@ -270,8 +263,7 @@ static int tve200_remove(struct platform_device *pdev)
struct tve200_drm_dev_private *priv = drm->dev_private;
drm_dev_unregister(drm);
- if (priv->fbdev)
- drm_fbdev_cma_fini(priv->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 4ae45d7dac42..2decc8e2c79f 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -637,7 +637,8 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
mutex_lock(&bo->madv_lock);
switch (bo->madv) {
case VC4_MADV_WILLNEED:
- refcount_inc(&bo->usecnt);
+ if (!refcount_inc_not_zero(&bo->usecnt))
+ refcount_set(&bo->usecnt, 1);
ret = 0;
break;
case VC4_MADV_DONTNEED:
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index e3c29729da2e..ceb385fd69c5 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -111,13 +111,6 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static void vc4_lastclose(struct drm_device *dev)
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
-
- drm_fbdev_cma_restore_mode(vc4->fbdev);
-}
-
static const struct vm_operations_struct vc4_vm_ops = {
.fault = vc4_fault,
.open = drm_gem_vm_open,
@@ -159,7 +152,7 @@ static struct drm_driver vc4_drm_driver = {
DRIVER_HAVE_IRQ |
DRIVER_RENDER |
DRIVER_PRIME),
- .lastclose = vc4_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = vc4_irq,
.irq_preinstall = vc4_irq_preinstall,
.irq_postinstall = vc4_irq_postinstall,
@@ -301,12 +294,10 @@ static void vc4_drm_unbind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = platform_get_drvdata(pdev);
- struct vc4_dev *vc4 = to_vc4_dev(drm);
drm_dev_unregister(drm);
- if (vc4->fbdev)
- drm_fbdev_cma_fini(vc4->fbdev);
+ drm_fb_cma_fbdev_fini(drm);
drm_mode_config_cleanup(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 9c0d380c96f2..3af22936d9b3 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -39,8 +39,6 @@ struct vc4_dev {
struct vc4_dsi *dsi1;
struct vc4_vec *vec;
- struct drm_fbdev_cma *fbdev;
-
struct vc4_hang_state *hang_state;
/* The kernel-space BO cache. Tracks buffers that have been
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 6c32c89a83a9..638540943c61 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -888,8 +888,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
/* If we got force-completed because of GPU reset rather than
* through our IRQ handler, signal the fence now.
*/
- if (exec->fence)
+ if (exec->fence) {
dma_fence_signal(exec->fence);
+ dma_fence_put(exec->fence);
+ }
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++) {
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 61b2e5377993..26eddbb62893 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev)
list_move_tail(&exec->head, &vc4->job_done_list);
if (exec->fence) {
dma_fence_signal_locked(exec->fence);
+ dma_fence_put(exec->fence);
exec->fence = NULL;
}
vc4_submit_next_render_job(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 50c4959b5bd3..4256f294c346 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -19,17 +19,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "vc4_drv.h"
-static void vc4_output_poll_changed(struct drm_device *dev)
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
-
- drm_fbdev_cma_hotplug_event(vc4->fbdev);
-}
-
static void
vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
@@ -194,7 +188,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
}
static const struct drm_mode_config_funcs vc4_mode_funcs = {
- .output_poll_changed = vc4_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = vc4_atomic_commit,
.fb_create = vc4_fb_create,
@@ -224,12 +218,8 @@ int vc4_kms_load(struct drm_device *dev)
drm_mode_config_reset(dev);
- if (dev->mode_config.num_connector) {
- vc4->fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
- if (IS_ERR(vc4->fbdev))
- vc4->fbdev = NULL;
- }
+ if (dev->mode_config.num_connector)
+ drm_fb_cma_fbdev_init(dev, 32, 0);
drm_kms_helper_poll_init(dev);
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 6f66b7347cd0..0b90cdb3d9fe 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -124,13 +124,17 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
int ret;
struct page **pages = bo->tbo.ttm->pages;
int nr_pages = bo->tbo.num_pages;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
/* wtf swapping */
if (bo->pages)
return 0;
if (bo->tbo.ttm->state == tt_unpopulated)
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
+ bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!bo->pages)
goto out;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 488c6bd032fc..36655b709eb2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -324,12 +324,13 @@ static struct ttm_backend_func virtio_gpu_backend_func = {
.destroy = &virtio_gpu_ttm_backend_destroy,
};
-static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
+static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
if (ttm->state != tt_unpopulated)
return 0;
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
@@ -430,7 +431,6 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 9c42e96da510..55d32ae43aa4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -1202,10 +1202,14 @@ struct vmw_ctx_binding_state *
vmw_binding_state_alloc(struct vmw_private *dev_priv)
{
struct vmw_ctx_binding_state *cbs;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
- false, false);
+ &ctx);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index c7056322211c..22231bc9e845 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -394,6 +394,10 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
struct vmw_piter iter;
dma_addr_t old;
int ret = 0;
@@ -417,8 +421,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
sgt_size = ttm_round_pot(sizeof(struct sg_table));
}
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
- ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
- true);
+ ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -632,7 +635,7 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm)
}
-static int vmw_ttm_populate(struct ttm_tt *ttm)
+static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
@@ -646,15 +649,16 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
- ret = ttm_mem_global_alloc(glob, size, false, true);
+ ret = ttm_mem_global_alloc(glob, size, ctx);
if (unlikely(ret != 0))
return ret;
- ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+ ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
+ ctx);
if (unlikely(ret != 0))
ttm_mem_global_free(glob, size);
} else
- ret = ttm_pool_populate(ttm);
+ ret = ttm_pool_populate(ttm, ctx);
return ret;
}
@@ -859,5 +863,4 @@ struct ttm_bo_driver vmw_bo_driver = {
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
- .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 4212b3e673bc..3767ac335aca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -746,6 +746,10 @@ static int vmw_context_define(struct drm_device *dev, void *data,
struct vmw_resource *tmp;
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
if (!dev_priv->has_dx && dx) {
@@ -768,7 +772,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_context_size,
- false, true);
+ &ttm_opt_ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for context"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 92df0b08c194..cbf54ea7b4c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -573,6 +573,10 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
u32 type)
{
struct vmw_cotable *vcotbl;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
u32 num_entries;
@@ -580,7 +584,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- cotable_acc_size, false, true);
+ cotable_acc_size, &ttm_opt_ctx);
if (unlikely(ret))
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index d6b1c509ae01..6c5c75cf5e6c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -588,6 +588,10 @@ int vmw_user_fence_create(struct drm_file *file_priv,
struct vmw_user_fence *ufence;
struct vmw_fence_obj *tmp;
struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret;
/*
@@ -596,7 +600,7 @@ int vmw_user_fence_create(struct drm_file *file_priv,
*/
ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
- false, false);
+ &ctx);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index b17f08fc50d3..736ca47e28ea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -240,6 +240,10 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
unsigned long offset;
unsigned long bo_size;
struct vmw_otable *otables = batch->otables;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
SVGAOTableType i;
int ret;
@@ -264,7 +268,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
if (unlikely(ret != 0))
goto out_unreserve;
ret = vmw_bo_map_dma(batch->otable_bo);
@@ -430,6 +434,11 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
struct vmw_mob *mob)
{
int ret;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
BUG_ON(mob->pt_bo != NULL);
ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
@@ -442,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
if (unlikely(ret != 0))
goto out_unreserve;
ret = vmw_bo_map_dma(mob->pt_bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 004e18b8832c..73b8e9a16368 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -607,6 +607,10 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_dx_shader *shader;
struct vmw_resource *res;
struct vmw_private *dev_priv = ctx->dev_priv;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
if (!vmw_shader_dx_size)
@@ -616,7 +620,7 @@ int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
return -EINVAL;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size,
- false, true);
+ &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
@@ -730,6 +734,10 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
/*
@@ -742,7 +750,7 @@ static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
- false, true);
+ &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
@@ -800,6 +808,10 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
{
struct vmw_shader *shader;
struct vmw_resource *res;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
/*
@@ -812,7 +824,7 @@ static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_shader_size,
- false, true);
+ &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 051d3b39b0ea..a0cb310665cc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -149,6 +149,10 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
struct vmw_resource *res;
struct vmw_resource *tmp;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
size_t alloc_size;
size_t account_size;
int ret;
@@ -162,7 +166,7 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
- false, true);
+ &ctx);
ttm_read_unlock(&dev_priv->reservation_sem);
if (ret) {
if (ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 5a73eebd0f35..d3573c37c436 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -329,6 +329,10 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
struct vmw_private *dev_priv = ctx->dev_priv;
struct vmw_resource *res;
struct vmw_view *view;
+ struct ttm_operation_ctx ttm_opt_ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
size_t size;
int ret;
@@ -345,7 +349,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
size = offsetof(struct vmw_view, cmd) + cmd_size;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true);
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for view"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 6ac094ee8983..db1bb166845e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -700,6 +700,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
int ret;
int i, j;
uint32_t cur_bo_offset;
@@ -741,7 +745,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- size, false, true);
+ size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface"
@@ -1479,6 +1483,10 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_surface *user_srf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false
+ };
struct vmw_surface *srf;
int ret;
u32 num_layers;
@@ -1525,7 +1533,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
return ret;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- user_accounting_size, false, true);
+ user_accounting_size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for surface"
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index e8b8266c0cde..6f4205e80378 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -29,37 +29,19 @@
#include "zx_drm_drv.h"
#include "zx_vou.h"
-struct zx_drm_private {
- struct drm_fbdev_cma *fbdev;
-};
-
-static void zx_drm_fb_output_poll_changed(struct drm_device *drm)
-{
- struct zx_drm_private *priv = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = zx_drm_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
-static void zx_drm_lastclose(struct drm_device *drm)
-{
- struct zx_drm_private *priv = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(priv->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
static struct drm_driver zx_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = zx_drm_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -83,18 +65,12 @@ static struct drm_driver zx_drm_driver = {
static int zx_drm_bind(struct device *dev)
{
struct drm_device *drm;
- struct zx_drm_private *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
drm = drm_dev_alloc(&zx_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- drm->dev_private = priv;
dev_set_drvdata(dev, drm);
drm_mode_config_init(drm);
@@ -125,12 +101,9 @@ static int zx_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- priv->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
- if (IS_ERR(priv->fbdev)) {
- ret = PTR_ERR(priv->fbdev);
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret) {
DRM_DEV_ERROR(dev, "failed to init cma fbdev: %d\n", ret);
- priv->fbdev = NULL;
goto out_poll_fini;
}
@@ -141,10 +114,7 @@ static int zx_drm_bind(struct device *dev)
return 0;
out_fbdev_fini:
- if (priv->fbdev) {
- drm_fbdev_cma_fini(priv->fbdev);
- priv->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
out_poll_fini:
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
@@ -152,7 +122,6 @@ out_unbind:
component_unbind_all(dev, drm);
out_unregister:
dev_set_drvdata(dev, NULL);
- drm->dev_private = NULL;
drm_dev_unref(drm);
return ret;
}
@@ -160,18 +129,13 @@ out_unregister:
static void zx_drm_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct zx_drm_private *priv = drm->dev_private;
drm_dev_unregister(drm);
- if (priv->fbdev) {
- drm_fbdev_cma_fini(priv->fbdev);
- priv->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
component_unbind_all(dev, drm);
dev_set_drvdata(dev, NULL);
- drm->dev_private = NULL;
drm_dev_unref(drm);
}
diff --git a/drivers/gpu/ipu-v3/Kconfig b/drivers/gpu/ipu-v3/Kconfig
index 87a20b3dcf7a..fe6f8c5b4445 100644
--- a/drivers/gpu/ipu-v3/Kconfig
+++ b/drivers/gpu/ipu-v3/Kconfig
@@ -1,7 +1,9 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
- depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
+ depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM || COMPILE_TEST
depends on DRM || !DRM # if DRM=m, this can't be 'y'
+ select BITREVERSE
+ select GENERIC_ALLOCATOR if DRM
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 1cb82f445f91..bb9c087e6c0d 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/bitrev.h>
#include <linux/io.h>
+#include <linux/sizes.h>
#include <drm/drm_fourcc.h>
#include "ipu-prv.h"
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 321eb983c2f5..67cc820253a9 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -17,6 +17,7 @@
#include <linux/bitrev.h>
#include <linux/io.h>
#include <linux/err.h>
+#include <linux/sizes.h>
#include "ipu-prv.h"
/* IC Register Offsets */
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index c860a7997cb5..f1cec3d70498 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -49,6 +49,10 @@
#define IPU_PRE_TPR_CTRL 0x070
#define IPU_PRE_TPR_CTRL_TILE_FORMAT(v) ((v & 0xff) << 0)
#define IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK 0xff
+#define IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT (1 << 0)
+#define IPU_PRE_TPR_CTRL_TILE_FORMAT_SPLIT_BUF (1 << 4)
+#define IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF (1 << 5)
+#define IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED (1 << 6)
#define IPU_PRE_PREFETCH_ENG_CTRL 0x080
#define IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN (1 << 0)
@@ -147,7 +151,7 @@ int ipu_pre_get(struct ipu_pre *pre)
val = IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN |
IPU_PRE_CTRL_HANDSHAKE_EN |
IPU_PRE_CTRL_TPR_REST_SEL |
- IPU_PRE_CTRL_BLOCK_16 | IPU_PRE_CTRL_SDW_UPDATE;
+ IPU_PRE_CTRL_SDW_UPDATE;
writel(val, pre->regs + IPU_PRE_CTRL);
pre->in_use = true;
@@ -163,14 +167,17 @@ void ipu_pre_put(struct ipu_pre *pre)
void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
unsigned int height, unsigned int stride, u32 format,
- unsigned int bufaddr)
+ uint64_t modifier, unsigned int bufaddr)
{
const struct drm_format_info *info = drm_format_info(format);
u32 active_bpp = info->cpp[0] >> 1;
u32 val;
/* calculate safe window for ctrl register updates */
- pre->safe_window_end = height - 2;
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ pre->safe_window_end = height - 2;
+ else
+ pre->safe_window_end = DIV_ROUND_UP(height, 4) - 1;
writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
@@ -203,9 +210,25 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
writel(pre->buffer_paddr, pre->regs + IPU_PRE_STORE_ENG_ADDR);
+ val = readl(pre->regs + IPU_PRE_TPR_CTRL);
+ val &= ~IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK;
+ if (modifier != DRM_FORMAT_MOD_LINEAR) {
+ /* only support single buffer formats for now */
+ val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SINGLE_BUF;
+ if (modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)
+ val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_SUPER_TILED;
+ if (info->cpp[0] == 2)
+ val |= IPU_PRE_TPR_CTRL_TILE_FORMAT_16_BIT;
+ }
+ writel(val, pre->regs + IPU_PRE_TPR_CTRL);
+
val = readl(pre->regs + IPU_PRE_CTRL);
val |= IPU_PRE_CTRL_EN_REPEAT | IPU_PRE_CTRL_ENABLE |
IPU_PRE_CTRL_SDW_UPDATE;
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ val &= ~IPU_PRE_CTRL_BLOCK_EN;
+ else
+ val |= IPU_PRE_CTRL_BLOCK_EN;
writel(val, pre->regs + IPU_PRE_CTRL);
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 0013ca9f72c8..067365c733c6 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <video/imx-ipu-v3.h>
@@ -132,28 +133,25 @@ bool ipu_prg_format_supported(struct ipu_soc *ipu, uint32_t format,
if (info->num_planes != 1)
return false;
- return true;
+ switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case DRM_FORMAT_MOD_VIVANTE_TILED:
+ case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
+ return true;
+ default:
+ return false;
+ }
}
EXPORT_SYMBOL_GPL(ipu_prg_format_supported);
int ipu_prg_enable(struct ipu_soc *ipu)
{
struct ipu_prg *prg = ipu->prg_priv;
- int ret;
if (!prg)
return 0;
- ret = clk_prepare_enable(prg->clk_axi);
- if (ret)
- goto fail_disable_ipg;
-
- return 0;
-
-fail_disable_ipg:
- clk_disable_unprepare(prg->clk_ipg);
-
- return ret;
+ return pm_runtime_get_sync(prg->dev);
}
EXPORT_SYMBOL_GPL(ipu_prg_enable);
@@ -164,7 +162,7 @@ void ipu_prg_disable(struct ipu_soc *ipu)
if (!prg)
return;
- clk_disable_unprepare(prg->clk_axi);
+ pm_runtime_put(prg->dev);
}
EXPORT_SYMBOL_GPL(ipu_prg_disable);
@@ -255,7 +253,7 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
if (!chan->enabled || prg_chan < 0)
return;
- clk_prepare_enable(prg->clk_ipg);
+ pm_runtime_get_sync(prg->dev);
val = readl(prg->regs + IPU_PRG_CTL);
val |= IPU_PRG_CTL_BYPASS(prg_chan);
@@ -264,7 +262,7 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
val = IPU_PRG_REG_UPDATE_REG_UPDATE;
writel(val, prg->regs + IPU_PRG_REG_UPDATE);
- clk_disable_unprepare(prg->clk_ipg);
+ pm_runtime_put(prg->dev);
ipu_prg_put_pre(prg, prg_chan);
@@ -275,7 +273,7 @@ EXPORT_SYMBOL_GPL(ipu_prg_channel_disable);
int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
unsigned int axi_id, unsigned int width,
unsigned int height, unsigned int stride,
- u32 format, unsigned long *eba)
+ u32 format, uint64_t modifier, unsigned long *eba)
{
int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
@@ -296,14 +294,10 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
return ret;
ipu_pre_configure(prg->pres[chan->used_pre],
- width, height, stride, format, *eba);
+ width, height, stride, format, modifier, *eba);
- ret = clk_prepare_enable(prg->clk_ipg);
- if (ret) {
- ipu_prg_put_pre(prg, prg_chan);
- return ret;
- }
+ pm_runtime_get_sync(prg->dev);
val = (stride - 1) & IPU_PRG_STRIDE_STRIDE_MASK;
writel(val, prg->regs + IPU_PRG_STRIDE(prg_chan));
@@ -336,7 +330,7 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
(val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)),
5, 1000);
- clk_disable_unprepare(prg->clk_ipg);
+ pm_runtime_put(prg->dev);
chan->enabled = true;
return 0;
@@ -384,6 +378,12 @@ static int ipu_prg_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = clk_prepare_enable(prg->clk_axi);
+ if (ret) {
+ clk_disable_unprepare(prg->clk_ipg);
+ return ret;
+ }
+
/* init to free running mode */
val = readl(prg->regs + IPU_PRG_CTL);
val |= IPU_PRG_CTL_SHADOW_EN;
@@ -392,7 +392,8 @@ static int ipu_prg_probe(struct platform_device *pdev)
/* disable address threshold */
writel(0xffffffff, prg->regs + IPU_PRG_THD);
- clk_disable_unprepare(prg->clk_ipg);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
prg->dev = dev;
platform_set_drvdata(pdev, prg);
@@ -414,6 +415,40 @@ static int ipu_prg_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int prg_suspend(struct device *dev)
+{
+ struct ipu_prg *prg = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(prg->clk_axi);
+ clk_disable_unprepare(prg->clk_ipg);
+
+ return 0;
+}
+
+static int prg_resume(struct device *dev)
+{
+ struct ipu_prg *prg = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(prg->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(prg->clk_axi);
+ if (ret) {
+ clk_disable_unprepare(prg->clk_ipg);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops prg_pm_ops = {
+ SET_RUNTIME_PM_OPS(prg_suspend, prg_resume, NULL)
+};
+
static const struct of_device_id ipu_prg_dt_ids[] = {
{ .compatible = "fsl,imx6qp-prg", },
{ /* sentinel */ },
@@ -424,6 +459,7 @@ struct platform_driver ipu_prg_drv = {
.remove = ipu_prg_remove,
.driver = {
.name = "imx-ipu-prg",
+ .pm = &prg_pm_ops,
.of_match_table = ipu_prg_dt_ids,
},
};
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index ac4b8d658500..d6beee99b6b8 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -269,8 +269,8 @@ int ipu_pre_get(struct ipu_pre *pre);
void ipu_pre_put(struct ipu_pre *pre);
u32 ipu_pre_get_baddr(struct ipu_pre *pre);
void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
- unsigned int height,
- unsigned int stride, u32 format, unsigned int bufaddr);
+ unsigned int height, unsigned int stride, u32 format,
+ uint64_t modifier, unsigned int bufaddr);
void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr);
struct ipu_prg *ipu_prg_lookup_by_phandle(struct device *dev, const char *name,