diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2017-01-24 09:57:18 -0800 | 
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2017-01-24 09:57:18 -0800 | 
| commit | 62ed8ceda1699acae01b666497f004bfd3d67a6f (patch) | |
| tree | fe38c83c49dfd568b540666948ef78cb9d082c38 /drivers/gpu/drm/amd/amdgpu/vi.c | |
| parent | 1c3415a06b1016a596bfe59e0cfee56c773aa958 (diff) | |
| parent | 7a308bb3016f57e5be11a677d15b821536419d36 (diff) | |
Merge tag 'v4.10-rc5' into for-linus
Sync up with mainline to apply fixup to a commit that came through
power supply tree.
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vi.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vi.c | 863 | 
1 files changed, 442 insertions, 421 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 03a31c53aec3..c2ac54f11341 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -77,11 +77,18 @@  #if defined(CONFIG_DRM_AMD_ACP)  #include "amdgpu_acp.h"  #endif +#include "dce_virtual.h" +MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); +MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/fiji_smc.bin");  MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");  MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");  MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");  MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); +MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");  /*   * Indirect registers accessor @@ -117,8 +124,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)  	u32 r;  	spin_lock_irqsave(&adev->smc_idx_lock, flags); -	WREG32(mmSMC_IND_INDEX_0, (reg)); -	r = RREG32(mmSMC_IND_DATA_0); +	WREG32(mmSMC_IND_INDEX_11, (reg)); +	r = RREG32(mmSMC_IND_DATA_11);  	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);  	return r;  } @@ -128,8 +135,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)  	unsigned long flags;  	spin_lock_irqsave(&adev->smc_idx_lock, flags); -	WREG32(mmSMC_IND_INDEX_0, (reg)); -	WREG32(mmSMC_IND_DATA_0, (v)); +	WREG32(mmSMC_IND_INDEX_11, (reg)); +	WREG32(mmSMC_IND_DATA_11, (v));  	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);  } @@ -306,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)  		break;  	case CHIP_POLARIS11:  	case CHIP_POLARIS10: +	case CHIP_POLARIS12:  	default:  		break;  	} @@ -433,29 +441,32 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,  	/* take the smc lock since we are using the smc index */  	spin_lock_irqsave(&adev->smc_idx_lock, flags);  	/* set rom index to 0 */ -	WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX); -	WREG32(mmSMC_IND_DATA_0, 0); +	WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); +	WREG32(mmSMC_IND_DATA_11, 0);  	/* set index to data for continous read */ -	WREG32(mmSMC_IND_INDEX_0, ixROM_DATA); +	WREG32(mmSMC_IND_INDEX_11, ixROM_DATA);  	for (i = 0; i < length_dw; i++) -		dw_ptr[i] = RREG32(mmSMC_IND_DATA_0); +		dw_ptr[i] = RREG32(mmSMC_IND_DATA_11);  	spin_unlock_irqrestore(&adev->smc_idx_lock, flags);  	return true;  } -static u32 vi_get_virtual_caps(struct amdgpu_device *adev) +static void vi_detect_hw_virtualization(struct amdgpu_device *adev)  { -	u32 caps = 0; -	u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); +	uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); +	/* bit0: 0 means pf and 1 means vf */ +	/* bit31: 0 means disable IOV and 1 means enable */ +	if (reg & 1) +		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF; -	if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) -		caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; +	if (reg & 0x80000000) +		adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; -	if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) -		caps |= AMDGPU_VIRT_CAPS_IS_VF; - -	return caps; +	if (reg == 0) { +		if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ +			adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE; +	}  }  static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { @@ -549,21 +560,100 @@ static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] =  	{mmPA_SC_RASTER_CONFIG_1, false, true},  }; -static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, -					 u32 sh_num, u32 reg_offset) +static uint32_t vi_get_register_value(struct amdgpu_device *adev, +				      bool indexed, u32 se_num, +				      u32 sh_num, u32 reg_offset)  { -	uint32_t val; +	if (indexed) { +		uint32_t val; +		unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; +		unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; -	mutex_lock(&adev->grbm_idx_mutex); -	if (se_num != 0xffffffff || sh_num != 0xffffffff) -		amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); +		switch (reg_offset) { +		case mmCC_RB_BACKEND_DISABLE: +			return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; +		case mmGC_USER_RB_BACKEND_DISABLE: +			return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; +		case mmPA_SC_RASTER_CONFIG: +			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; +		case mmPA_SC_RASTER_CONFIG_1: +			return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; +		} -	val = RREG32(reg_offset); +		mutex_lock(&adev->grbm_idx_mutex); +		if (se_num != 0xffffffff || sh_num != 0xffffffff) +			amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); -	if (se_num != 0xffffffff || sh_num != 0xffffffff) -		amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); -	mutex_unlock(&adev->grbm_idx_mutex); -	return val; +		val = RREG32(reg_offset); + +		if (se_num != 0xffffffff || sh_num != 0xffffffff) +			amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); +		mutex_unlock(&adev->grbm_idx_mutex); +		return val; +	} else { +		unsigned idx; + +		switch (reg_offset) { +		case mmGB_ADDR_CONFIG: +			return adev->gfx.config.gb_addr_config; +		case mmMC_ARB_RAMCFG: +			return adev->gfx.config.mc_arb_ramcfg; +		case mmGB_TILE_MODE0: +		case mmGB_TILE_MODE1: +		case mmGB_TILE_MODE2: +		case mmGB_TILE_MODE3: +		case mmGB_TILE_MODE4: +		case mmGB_TILE_MODE5: +		case mmGB_TILE_MODE6: +		case mmGB_TILE_MODE7: +		case mmGB_TILE_MODE8: +		case mmGB_TILE_MODE9: +		case mmGB_TILE_MODE10: +		case mmGB_TILE_MODE11: +		case mmGB_TILE_MODE12: +		case mmGB_TILE_MODE13: +		case mmGB_TILE_MODE14: +		case mmGB_TILE_MODE15: +		case mmGB_TILE_MODE16: +		case mmGB_TILE_MODE17: +		case mmGB_TILE_MODE18: +		case mmGB_TILE_MODE19: +		case mmGB_TILE_MODE20: +		case mmGB_TILE_MODE21: +		case mmGB_TILE_MODE22: +		case mmGB_TILE_MODE23: +		case mmGB_TILE_MODE24: +		case mmGB_TILE_MODE25: +		case mmGB_TILE_MODE26: +		case mmGB_TILE_MODE27: +		case mmGB_TILE_MODE28: +		case mmGB_TILE_MODE29: +		case mmGB_TILE_MODE30: +		case mmGB_TILE_MODE31: +			idx = (reg_offset - mmGB_TILE_MODE0); +			return adev->gfx.config.tile_mode_array[idx]; +		case mmGB_MACROTILE_MODE0: +		case mmGB_MACROTILE_MODE1: +		case mmGB_MACROTILE_MODE2: +		case mmGB_MACROTILE_MODE3: +		case mmGB_MACROTILE_MODE4: +		case mmGB_MACROTILE_MODE5: +		case mmGB_MACROTILE_MODE6: +		case mmGB_MACROTILE_MODE7: +		case mmGB_MACROTILE_MODE8: +		case mmGB_MACROTILE_MODE9: +		case mmGB_MACROTILE_MODE10: +		case mmGB_MACROTILE_MODE11: +		case mmGB_MACROTILE_MODE12: +		case mmGB_MACROTILE_MODE13: +		case mmGB_MACROTILE_MODE14: +		case mmGB_MACROTILE_MODE15: +			idx = (reg_offset - mmGB_MACROTILE_MODE0); +			return adev->gfx.config.macrotile_mode_array[idx]; +		default: +			return RREG32(reg_offset); +		} +	}  }  static int vi_read_register(struct amdgpu_device *adev, u32 se_num, @@ -583,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,  	case CHIP_TONGA:  	case CHIP_POLARIS11:  	case CHIP_POLARIS10: +	case CHIP_POLARIS12:  	case CHIP_CARRIZO:  	case CHIP_STONEY:  		asic_register_table = cz_allowed_read_registers; @@ -598,10 +689,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,  			if (reg_offset != asic_register_entry->reg_offset)  				continue;  			if (!asic_register_entry->untouched) -				*value = asic_register_entry->grbm_indexed ? -					vi_read_indexed_register(adev, se_num, -								 sh_num, reg_offset) : -					RREG32(reg_offset); +				*value = vi_get_register_value(adev, +							       asic_register_entry->grbm_indexed, +							       se_num, sh_num, reg_offset);  			return 0;  		}  	} @@ -611,10 +701,9 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,  			continue;  		if (!vi_allowed_read_registers[i].untouched) -			*value = vi_allowed_read_registers[i].grbm_indexed ? -				vi_read_indexed_register(adev, se_num, -							 sh_num, reg_offset) : -				RREG32(reg_offset); +			*value = vi_get_register_value(adev, +						       vi_allowed_read_registers[i].grbm_indexed, +						       se_num, sh_num, reg_offset);  		return 0;  	}  	return -EINVAL; @@ -645,18 +734,6 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)  	return -EINVAL;  } -static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) -{ -	u32 tmp = RREG32(mmBIOS_SCRATCH_3); - -	if (hung) -		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; -	else -		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; - -	WREG32(mmBIOS_SCRATCH_3, tmp); -} -  /**   * vi_asic_reset - soft reset GPU   * @@ -670,11 +747,11 @@ static int vi_asic_reset(struct amdgpu_device *adev)  {  	int r; -	vi_set_bios_scratch_engine_hung(adev, true); +	amdgpu_atombios_scratch_regs_engine_hung(adev, true);  	r = vi_gpu_pci_config_reset(adev); -	vi_set_bios_scratch_engine_hung(adev, false); +	amdgpu_atombios_scratch_regs_engine_hung(adev, false);  	return r;  } @@ -774,368 +851,6 @@ static void vi_enable_doorbell_aperture(struct amdgpu_device *adev,  	WREG32(mmBIF_DOORBELL_APER_EN, tmp);  } -/* topaz has no DCE, UVD, VCE */ -static const struct amdgpu_ip_block_version topaz_ip_blocks[] = -{ -	/* ORDER MATTERS! */ -	{ -		.type = AMD_IP_BLOCK_TYPE_COMMON, -		.major = 2, -		.minor = 0, -		.rev = 0, -		.funcs = &vi_common_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GMC, -		.major = 7, -		.minor = 4, -		.rev = 0, -		.funcs = &gmc_v7_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_IH, -		.major = 2, -		.minor = 4, -		.rev = 0, -		.funcs = &iceland_ih_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SMC, -		.major = 7, -		.minor = 1, -		.rev = 0, -		.funcs = &amdgpu_pp_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GFX, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &gfx_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SDMA, -		.major = 2, -		.minor = 4, -		.rev = 0, -		.funcs = &sdma_v2_4_ip_funcs, -	}, -}; - -static const struct amdgpu_ip_block_version tonga_ip_blocks[] = -{ -	/* ORDER MATTERS! */ -	{ -		.type = AMD_IP_BLOCK_TYPE_COMMON, -		.major = 2, -		.minor = 0, -		.rev = 0, -		.funcs = &vi_common_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GMC, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &gmc_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_IH, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &tonga_ih_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SMC, -		.major = 7, -		.minor = 1, -		.rev = 0, -		.funcs = &amdgpu_pp_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_DCE, -		.major = 10, -		.minor = 0, -		.rev = 0, -		.funcs = &dce_v10_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GFX, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &gfx_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SDMA, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &sdma_v3_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_UVD, -		.major = 5, -		.minor = 0, -		.rev = 0, -		.funcs = &uvd_v5_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_VCE, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &vce_v3_0_ip_funcs, -	}, -}; - -static const struct amdgpu_ip_block_version fiji_ip_blocks[] = -{ -	/* ORDER MATTERS! */ -	{ -		.type = AMD_IP_BLOCK_TYPE_COMMON, -		.major = 2, -		.minor = 0, -		.rev = 0, -		.funcs = &vi_common_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GMC, -		.major = 8, -		.minor = 5, -		.rev = 0, -		.funcs = &gmc_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_IH, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &tonga_ih_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SMC, -		.major = 7, -		.minor = 1, -		.rev = 0, -		.funcs = &amdgpu_pp_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_DCE, -		.major = 10, -		.minor = 1, -		.rev = 0, -		.funcs = &dce_v10_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GFX, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &gfx_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SDMA, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &sdma_v3_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_UVD, -		.major = 6, -		.minor = 0, -		.rev = 0, -		.funcs = &uvd_v6_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_VCE, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &vce_v3_0_ip_funcs, -	}, -}; - -static const struct amdgpu_ip_block_version polaris11_ip_blocks[] = -{ -	/* ORDER MATTERS! */ -	{ -		.type = AMD_IP_BLOCK_TYPE_COMMON, -		.major = 2, -		.minor = 0, -		.rev = 0, -		.funcs = &vi_common_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GMC, -		.major = 8, -		.minor = 1, -		.rev = 0, -		.funcs = &gmc_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_IH, -		.major = 3, -		.minor = 1, -		.rev = 0, -		.funcs = &tonga_ih_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SMC, -		.major = 7, -		.minor = 2, -		.rev = 0, -		.funcs = &amdgpu_pp_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_DCE, -		.major = 11, -		.minor = 2, -		.rev = 0, -		.funcs = &dce_v11_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GFX, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &gfx_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SDMA, -		.major = 3, -		.minor = 1, -		.rev = 0, -		.funcs = &sdma_v3_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_UVD, -		.major = 6, -		.minor = 3, -		.rev = 0, -		.funcs = &uvd_v6_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_VCE, -		.major = 3, -		.minor = 4, -		.rev = 0, -		.funcs = &vce_v3_0_ip_funcs, -	}, -}; - -static const struct amdgpu_ip_block_version cz_ip_blocks[] = -{ -	/* ORDER MATTERS! */ -	{ -		.type = AMD_IP_BLOCK_TYPE_COMMON, -		.major = 2, -		.minor = 0, -		.rev = 0, -		.funcs = &vi_common_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GMC, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &gmc_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_IH, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &cz_ih_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SMC, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &amdgpu_pp_ip_funcs -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_DCE, -		.major = 11, -		.minor = 0, -		.rev = 0, -		.funcs = &dce_v11_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_GFX, -		.major = 8, -		.minor = 0, -		.rev = 0, -		.funcs = &gfx_v8_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_SDMA, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &sdma_v3_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_UVD, -		.major = 6, -		.minor = 0, -		.rev = 0, -		.funcs = &uvd_v6_0_ip_funcs, -	}, -	{ -		.type = AMD_IP_BLOCK_TYPE_VCE, -		.major = 3, -		.minor = 0, -		.rev = 0, -		.funcs = &vce_v3_0_ip_funcs, -	}, -#if defined(CONFIG_DRM_AMD_ACP) -	{ -		.type = AMD_IP_BLOCK_TYPE_ACP, -		.major = 2, -		.minor = 2, -		.rev = 0, -		.funcs = &acp_ip_funcs, -	}, -#endif -}; - -int vi_set_ip_blocks(struct amdgpu_device *adev) -{ -	switch (adev->asic_type) { -	case CHIP_TOPAZ: -		adev->ip_blocks = topaz_ip_blocks; -		adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); -		break; -	case CHIP_FIJI: -		adev->ip_blocks = fiji_ip_blocks; -		adev->num_ip_blocks = ARRAY_SIZE(fiji_ip_blocks); -		break; -	case CHIP_TONGA: -		adev->ip_blocks = tonga_ip_blocks; -		adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); -		break; -	case CHIP_POLARIS11: -	case CHIP_POLARIS10: -		adev->ip_blocks = polaris11_ip_blocks; -		adev->num_ip_blocks = ARRAY_SIZE(polaris11_ip_blocks); -		break; -	case CHIP_CARRIZO: -	case CHIP_STONEY: -		adev->ip_blocks = cz_ip_blocks; -		adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); -		break; -	default: -		/* FIXME: not supported yet */ -		return -EINVAL; -	} - -	return 0; -} -  #define ATI_REV_ID_FUSE_MACRO__ADDRESS      0xC0014044  #define ATI_REV_ID_FUSE_MACRO__SHIFT        9  #define ATI_REV_ID_FUSE_MACRO__MASK         0x00001E00 @@ -1154,13 +869,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =  {  	.read_disabled_bios = &vi_read_disabled_bios,  	.read_bios_from_rom = &vi_read_bios_from_rom, +	.detect_hw_virtualization = vi_detect_hw_virtualization,  	.read_register = &vi_read_register,  	.reset = &vi_asic_reset,  	.set_vga_state = &vi_vga_set_state,  	.get_xclk = &vi_get_xclk,  	.set_uvd_clocks = &vi_set_uvd_clocks,  	.set_vce_clocks = &vi_set_vce_clocks, -	.get_virtual_caps = &vi_get_virtual_caps,  };  static int vi_common_early_init(void *handle) @@ -1214,25 +929,79 @@ static int vi_common_early_init(void *handle)  			AMD_CG_SUPPORT_HDP_LS |  			AMD_CG_SUPPORT_ROM_MGCG |  			AMD_CG_SUPPORT_MC_MGCG | -			AMD_CG_SUPPORT_MC_LS; +			AMD_CG_SUPPORT_MC_LS | +			AMD_CG_SUPPORT_UVD_MGCG;  		adev->pg_flags = 0;  		adev->external_rev_id = adev->rev_id + 0x3c;  		break;  	case CHIP_TONGA: -		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; +		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | +			AMD_CG_SUPPORT_GFX_CGCG | +			AMD_CG_SUPPORT_GFX_CGLS | +			AMD_CG_SUPPORT_SDMA_MGCG | +			AMD_CG_SUPPORT_SDMA_LS | +			AMD_CG_SUPPORT_BIF_LS | +			AMD_CG_SUPPORT_HDP_MGCG | +			AMD_CG_SUPPORT_HDP_LS | +			AMD_CG_SUPPORT_ROM_MGCG | +			AMD_CG_SUPPORT_MC_MGCG | +			AMD_CG_SUPPORT_MC_LS | +			AMD_CG_SUPPORT_DRM_LS | +			AMD_CG_SUPPORT_UVD_MGCG;  		adev->pg_flags = 0;  		adev->external_rev_id = adev->rev_id + 0x14;  		break;  	case CHIP_POLARIS11: -		adev->cg_flags = 0; +		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | +			AMD_CG_SUPPORT_GFX_RLC_LS | +			AMD_CG_SUPPORT_GFX_CP_LS | +			AMD_CG_SUPPORT_GFX_CGCG | +			AMD_CG_SUPPORT_GFX_CGLS | +			AMD_CG_SUPPORT_GFX_3D_CGCG | +			AMD_CG_SUPPORT_GFX_3D_CGLS | +			AMD_CG_SUPPORT_SDMA_MGCG | +			AMD_CG_SUPPORT_SDMA_LS | +			AMD_CG_SUPPORT_BIF_MGCG | +			AMD_CG_SUPPORT_BIF_LS | +			AMD_CG_SUPPORT_HDP_MGCG | +			AMD_CG_SUPPORT_HDP_LS | +			AMD_CG_SUPPORT_ROM_MGCG | +			AMD_CG_SUPPORT_MC_MGCG | +			AMD_CG_SUPPORT_MC_LS | +			AMD_CG_SUPPORT_DRM_LS | +			AMD_CG_SUPPORT_UVD_MGCG | +			AMD_CG_SUPPORT_VCE_MGCG;  		adev->pg_flags = 0;  		adev->external_rev_id = adev->rev_id + 0x5A;  		break;  	case CHIP_POLARIS10: -		adev->cg_flags = 0; +		adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | +			AMD_CG_SUPPORT_GFX_RLC_LS | +			AMD_CG_SUPPORT_GFX_CP_LS | +			AMD_CG_SUPPORT_GFX_CGCG | +			AMD_CG_SUPPORT_GFX_CGLS | +			AMD_CG_SUPPORT_GFX_3D_CGCG | +			AMD_CG_SUPPORT_GFX_3D_CGLS | +			AMD_CG_SUPPORT_SDMA_MGCG | +			AMD_CG_SUPPORT_SDMA_LS | +			AMD_CG_SUPPORT_BIF_MGCG | +			AMD_CG_SUPPORT_BIF_LS | +			AMD_CG_SUPPORT_HDP_MGCG | +			AMD_CG_SUPPORT_HDP_LS | +			AMD_CG_SUPPORT_ROM_MGCG | +			AMD_CG_SUPPORT_MC_MGCG | +			AMD_CG_SUPPORT_MC_LS | +			AMD_CG_SUPPORT_DRM_LS | +			AMD_CG_SUPPORT_UVD_MGCG | +			AMD_CG_SUPPORT_VCE_MGCG;  		adev->pg_flags = 0;  		adev->external_rev_id = adev->rev_id + 0x50;  		break; +	case CHIP_POLARIS12: +		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG; +		adev->pg_flags = 0; +		adev->external_rev_id = adev->rev_id + 0x64; +		break;  	case CHIP_CARRIZO:  		adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |  			AMD_CG_SUPPORT_GFX_MGCG | @@ -1248,8 +1017,18 @@ static int vi_common_early_init(void *handle)  			AMD_CG_SUPPORT_HDP_MGCG |  			AMD_CG_SUPPORT_HDP_LS |  			AMD_CG_SUPPORT_SDMA_MGCG | -			AMD_CG_SUPPORT_SDMA_LS; +			AMD_CG_SUPPORT_SDMA_LS | +			AMD_CG_SUPPORT_VCE_MGCG; +		/* rev0 hardware requires workarounds to support PG */  		adev->pg_flags = 0; +		if (adev->rev_id != 0x00) { +			adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | +				AMD_PG_SUPPORT_GFX_SMG | +				AMD_PG_SUPPORT_GFX_PIPELINE | +				AMD_PG_SUPPORT_CP | +				AMD_PG_SUPPORT_UVD | +				AMD_PG_SUPPORT_VCE; +		}  		adev->external_rev_id = adev->rev_id + 0x1;  		break;  	case CHIP_STONEY: @@ -1267,14 +1046,25 @@ static int vi_common_early_init(void *handle)  			AMD_CG_SUPPORT_HDP_MGCG |  			AMD_CG_SUPPORT_HDP_LS |  			AMD_CG_SUPPORT_SDMA_MGCG | -			AMD_CG_SUPPORT_SDMA_LS; -		adev->external_rev_id = adev->rev_id + 0x1; +			AMD_CG_SUPPORT_SDMA_LS | +			AMD_CG_SUPPORT_VCE_MGCG; +		adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | +			AMD_PG_SUPPORT_GFX_SMG | +			AMD_PG_SUPPORT_GFX_PIPELINE | +			AMD_PG_SUPPORT_CP | +			AMD_PG_SUPPORT_UVD | +			AMD_PG_SUPPORT_VCE; +		adev->external_rev_id = adev->rev_id + 0x61;  		break;  	default:  		/* FIXME: not supported yet */  		return -EINVAL;  	} +	/* in early init stage, vbios code won't work */ +	if (adev->asic_funcs->detect_hw_virtualization) +		amdgpu_asic_detect_hw_virtualization(adev); +  	if (amdgpu_smc_load_fw && smc_enabled)  		adev->firmware.smu_load = true; @@ -1418,6 +1208,124 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,  		WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);  } +static int vi_common_set_clockgating_state_by_smu(void *handle, +					   enum amd_clockgating_state state) +{ +	uint32_t msg_id, pp_state = 0; +	uint32_t pp_support_state = 0; +	struct amdgpu_device *adev = (struct amdgpu_device *)handle; +	void *pp_handle = adev->powerplay.pp_handle; + +	if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { +		if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { +			pp_support_state = AMD_CG_SUPPORT_MC_LS; +			pp_state = PP_STATE_LS; +		} +		if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { +			pp_support_state |= AMD_CG_SUPPORT_MC_MGCG; +			pp_state |= PP_STATE_CG; +		} +		if (state == AMD_CG_STATE_UNGATE) +			pp_state = 0; +		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, +			       PP_BLOCK_SYS_MC, +			       pp_support_state, +			       pp_state); +		amd_set_clockgating_by_smu(pp_handle, msg_id); +	} + +	if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { +		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { +			pp_support_state = AMD_CG_SUPPORT_SDMA_LS; +			pp_state = PP_STATE_LS; +		} +		if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { +			pp_support_state |= AMD_CG_SUPPORT_SDMA_MGCG; +			pp_state |= PP_STATE_CG; +		} +		if (state == AMD_CG_STATE_UNGATE) +			pp_state = 0; +		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, +			       PP_BLOCK_SYS_SDMA, +			       pp_support_state, +			       pp_state); +		amd_set_clockgating_by_smu(pp_handle, msg_id); +	} + +	if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { +		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { +			pp_support_state = AMD_CG_SUPPORT_HDP_LS; +			pp_state = PP_STATE_LS; +		} +		if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { +			pp_support_state |= AMD_CG_SUPPORT_HDP_MGCG; +			pp_state |= PP_STATE_CG; +		} +		if (state == AMD_CG_STATE_UNGATE) +			pp_state = 0; +		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, +			       PP_BLOCK_SYS_HDP, +			       pp_support_state, +			       pp_state); +		amd_set_clockgating_by_smu(pp_handle, msg_id); +	} + + +	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { +		if (state == AMD_CG_STATE_UNGATE) +			pp_state = 0; +		else +			pp_state = PP_STATE_LS; + +		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, +			       PP_BLOCK_SYS_BIF, +			       PP_STATE_SUPPORT_LS, +			        pp_state); +		amd_set_clockgating_by_smu(pp_handle, msg_id); +	} +	if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { +		if (state == AMD_CG_STATE_UNGATE) +			pp_state = 0; +		else +			pp_state = PP_STATE_CG; + +		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, +			       PP_BLOCK_SYS_BIF, +			       PP_STATE_SUPPORT_CG, +			       pp_state); +		amd_set_clockgating_by_smu(pp_handle, msg_id); +	} + +	if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { + +		if (state == AMD_CG_STATE_UNGATE) +			pp_state = 0; +		else +			pp_state = PP_STATE_LS; + +		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, +			       PP_BLOCK_SYS_DRM, +			       PP_STATE_SUPPORT_LS, +			       pp_state); +		amd_set_clockgating_by_smu(pp_handle, msg_id); +	} + +	if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { + +		if (state == AMD_CG_STATE_UNGATE) +			pp_state = 0; +		else +			pp_state = PP_STATE_CG; + +		msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, +			       PP_BLOCK_SYS_ROM, +			       PP_STATE_SUPPORT_CG, +			       pp_state); +		amd_set_clockgating_by_smu(pp_handle, msg_id); +	} +	return 0; +} +  static int vi_common_set_clockgating_state(void *handle,  					   enum amd_clockgating_state state)  { @@ -1443,6 +1351,11 @@ static int vi_common_set_clockgating_state(void *handle,  		vi_update_hdp_light_sleep(adev,  				state == AMD_CG_STATE_GATE ? true : false);  		break; +	case CHIP_TONGA: +	case CHIP_POLARIS10: +	case CHIP_POLARIS11: +	case CHIP_POLARIS12: +		vi_common_set_clockgating_state_by_smu(adev, state);  	default:  		break;  	} @@ -1455,7 +1368,7 @@ static int vi_common_set_powergating_state(void *handle,  	return 0;  } -const struct amd_ip_funcs vi_common_ip_funcs = { +static const struct amd_ip_funcs vi_common_ip_funcs = {  	.name = "vi_common",  	.early_init = vi_common_early_init,  	.late_init = NULL, @@ -1472,3 +1385,111 @@ const struct amd_ip_funcs vi_common_ip_funcs = {  	.set_powergating_state = vi_common_set_powergating_state,  }; +static const struct amdgpu_ip_block_version vi_common_ip_block = +{ +	.type = AMD_IP_BLOCK_TYPE_COMMON, +	.major = 1, +	.minor = 0, +	.rev = 0, +	.funcs = &vi_common_ip_funcs, +}; + +int vi_set_ip_blocks(struct amdgpu_device *adev) +{ +	switch (adev->asic_type) { +	case CHIP_TOPAZ: +		/* topaz has no DCE, UVD, VCE */ +		amdgpu_ip_block_add(adev, &vi_common_ip_block); +		amdgpu_ip_block_add(adev, &gmc_v7_4_ip_block); +		amdgpu_ip_block_add(adev, &iceland_ih_ip_block); +		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); +		if (adev->enable_virtual_display) +			amdgpu_ip_block_add(adev, &dce_virtual_ip_block); +		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &sdma_v2_4_ip_block); +		break; +	case CHIP_FIJI: +		amdgpu_ip_block_add(adev, &vi_common_ip_block); +		amdgpu_ip_block_add(adev, &gmc_v8_5_ip_block); +		amdgpu_ip_block_add(adev, &tonga_ih_ip_block); +		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); +		if (adev->enable_virtual_display) +			amdgpu_ip_block_add(adev, &dce_virtual_ip_block); +		else +			amdgpu_ip_block_add(adev, &dce_v10_1_ip_block); +		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); +		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); +		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); +		break; +	case CHIP_TONGA: +		amdgpu_ip_block_add(adev, &vi_common_ip_block); +		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &tonga_ih_ip_block); +		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); +		if (adev->enable_virtual_display) +			amdgpu_ip_block_add(adev, &dce_virtual_ip_block); +		else +			amdgpu_ip_block_add(adev, &dce_v10_0_ip_block); +		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); +		amdgpu_ip_block_add(adev, &uvd_v5_0_ip_block); +		amdgpu_ip_block_add(adev, &vce_v3_0_ip_block); +		break; +	case CHIP_POLARIS11: +	case CHIP_POLARIS10: +	case CHIP_POLARIS12: +		amdgpu_ip_block_add(adev, &vi_common_ip_block); +		amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block); +		amdgpu_ip_block_add(adev, &tonga_ih_ip_block); +		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); +		if (adev->enable_virtual_display) +			amdgpu_ip_block_add(adev, &dce_virtual_ip_block); +		else +			amdgpu_ip_block_add(adev, &dce_v11_2_ip_block); +		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &sdma_v3_1_ip_block); +		amdgpu_ip_block_add(adev, &uvd_v6_3_ip_block); +		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); +		break; +	case CHIP_CARRIZO: +		amdgpu_ip_block_add(adev, &vi_common_ip_block); +		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &cz_ih_ip_block); +		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); +		if (adev->enable_virtual_display) +			amdgpu_ip_block_add(adev, &dce_virtual_ip_block); +		else +			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); +		amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); +		amdgpu_ip_block_add(adev, &uvd_v6_0_ip_block); +		amdgpu_ip_block_add(adev, &vce_v3_1_ip_block); +#if defined(CONFIG_DRM_AMD_ACP) +		amdgpu_ip_block_add(adev, &acp_ip_block); +#endif +		break; +	case CHIP_STONEY: +		amdgpu_ip_block_add(adev, &vi_common_ip_block); +		amdgpu_ip_block_add(adev, &gmc_v8_0_ip_block); +		amdgpu_ip_block_add(adev, &cz_ih_ip_block); +		amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); +		if (adev->enable_virtual_display) +			amdgpu_ip_block_add(adev, &dce_virtual_ip_block); +		else +			amdgpu_ip_block_add(adev, &dce_v11_0_ip_block); +		amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block); +		amdgpu_ip_block_add(adev, &sdma_v3_0_ip_block); +		amdgpu_ip_block_add(adev, &uvd_v6_2_ip_block); +		amdgpu_ip_block_add(adev, &vce_v3_4_ip_block); +#if defined(CONFIG_DRM_AMD_ACP) +		amdgpu_ip_block_add(adev, &acp_ip_block); +#endif +		break; +	default: +		/* FIXME: not supported yet */ +		return -EINVAL; +	} + +	return 0; +}  | 
