diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 163 | 
1 files changed, 146 insertions, 17 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d5b421330145..65531463f88e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -38,6 +38,7 @@  #include "amdgpu_i2c.h"  #include "atom.h"  #include "amdgpu_atombios.h" +#include "amd_pcie.h"  #ifdef CONFIG_DRM_AMDGPU_CIK  #include "cik.h"  #endif @@ -949,6 +950,15 @@ static bool amdgpu_check_pot_argument(int arg)   */  static void amdgpu_check_arguments(struct amdgpu_device *adev)  { +	if (amdgpu_sched_jobs < 4) { +		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", +			 amdgpu_sched_jobs); +		amdgpu_sched_jobs = 4; +	} else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){ +		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n", +			 amdgpu_sched_jobs); +		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs); +	}  	/* vramlimit must be a power of two */  	if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {  		dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", @@ -1214,12 +1224,14 @@ static int amdgpu_early_init(struct amdgpu_device *adev)  		} else {  			if (adev->ip_blocks[i].funcs->early_init) {  				r = adev->ip_blocks[i].funcs->early_init((void *)adev); -				if (r == -ENOENT) +				if (r == -ENOENT) {  					adev->ip_block_status[i].valid = false; -				else if (r) +				} else if (r) { +					DRM_ERROR("early_init %d failed %d\n", i, r);  					return r; -				else +				} else {  					adev->ip_block_status[i].valid = true; +				}  			} else {  				adev->ip_block_status[i].valid = true;  			} @@ -1237,20 +1249,28 @@ static int amdgpu_init(struct amdgpu_device *adev)  		if (!adev->ip_block_status[i].valid)  			continue;  		r = adev->ip_blocks[i].funcs->sw_init((void *)adev); -		if (r) +		if (r) { +			DRM_ERROR("sw_init %d failed %d\n", i, r);  			return r; +		}  		adev->ip_block_status[i].sw = true;  		/* need to do gmc hw init early so we can allocate gpu mem */  		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {  			r = amdgpu_vram_scratch_init(adev); -			if (r) +			if (r) { +				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);  				return r; +			}  			r = adev->ip_blocks[i].funcs->hw_init((void *)adev); -			if (r) +			if (r) { +				DRM_ERROR("hw_init %d failed %d\n", i, r);  				return r; +			}  			r = amdgpu_wb_init(adev); -			if (r) +			if (r) { +				DRM_ERROR("amdgpu_wb_init failed %d\n", r);  				return r; +			}  			adev->ip_block_status[i].hw = true;  		}  	} @@ -1262,8 +1282,10 @@ static int amdgpu_init(struct amdgpu_device *adev)  		if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)  			continue;  		r = adev->ip_blocks[i].funcs->hw_init((void *)adev); -		if (r) +		if (r) { +			DRM_ERROR("hw_init %d failed %d\n", i, r);  			return r; +		}  		adev->ip_block_status[i].hw = true;  	} @@ -1280,12 +1302,16 @@ static int amdgpu_late_init(struct amdgpu_device *adev)  		/* enable clockgating to save power */  		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,  								    AMD_CG_STATE_GATE); -		if (r) +		if (r) { +			DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r);  			return r; +		}  		if (adev->ip_blocks[i].funcs->late_init) {  			r = adev->ip_blocks[i].funcs->late_init((void *)adev); -			if (r) +			if (r) { +				DRM_ERROR("late_init %d failed %d\n", i, r);  				return r; +			}  		}  	} @@ -1306,10 +1332,15 @@ static int amdgpu_fini(struct amdgpu_device *adev)  		/* ungate blocks before hw fini so that we can shutdown the blocks safely */  		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,  								    AMD_CG_STATE_UNGATE); -		if (r) +		if (r) { +			DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);  			return r; +		}  		r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);  		/* XXX handle errors */ +		if (r) { +			DRM_DEBUG("hw_fini %d failed %d\n", i, r); +		}  		adev->ip_block_status[i].hw = false;  	} @@ -1318,6 +1349,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)  			continue;  		r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);  		/* XXX handle errors */ +		if (r) { +			DRM_DEBUG("sw_fini %d failed %d\n", i, r); +		}  		adev->ip_block_status[i].sw = false;  		adev->ip_block_status[i].valid = false;  	} @@ -1335,9 +1369,15 @@ static int amdgpu_suspend(struct amdgpu_device *adev)  		/* ungate blocks so that suspend can properly shut them down */  		r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,  								    AMD_CG_STATE_UNGATE); +		if (r) { +			DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r); +		}  		/* XXX handle errors */  		r = adev->ip_blocks[i].funcs->suspend(adev);  		/* XXX handle errors */ +		if (r) { +			DRM_ERROR("suspend %d failed %d\n", i, r); +		}  	}  	return 0; @@ -1351,8 +1391,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)  		if (!adev->ip_block_status[i].valid)  			continue;  		r = adev->ip_blocks[i].funcs->resume(adev); -		if (r) +		if (r) { +			DRM_ERROR("resume %d failed %d\n", i, r);  			return r; +		}  	}  	return 0; @@ -1484,8 +1526,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,  		return -EINVAL;  	}  	r = amdgpu_atombios_init(adev); -	if (r) +	if (r) { +		dev_err(adev->dev, "amdgpu_atombios_init failed\n");  		return r; +	}  	/* Post card if necessary */  	if (!amdgpu_card_posted(adev)) { @@ -1499,21 +1543,26 @@ int amdgpu_device_init(struct amdgpu_device *adev,  	/* Initialize clocks */  	r = amdgpu_atombios_get_clock_info(adev); -	if (r) +	if (r) { +		dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");  		return r; +	}  	/* init i2c buses */  	amdgpu_atombios_i2c_init(adev);  	/* Fence driver */  	r = amdgpu_fence_driver_init(adev); -	if (r) +	if (r) { +		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");  		return r; +	}  	/* init the mode config */  	drm_mode_config_init(adev->ddev);  	r = amdgpu_init(adev);  	if (r) { +		dev_err(adev->dev, "amdgpu_init failed\n");  		amdgpu_fini(adev);  		return r;  	} @@ -1528,7 +1577,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,  		return r;  	} -	r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx); +	r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);  	if (r) {  		dev_err(adev->dev, "failed to create kernel context (%d).\n", r);  		return r; @@ -1570,8 +1619,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,  	 * explicit gating rather than handling it automatically.  	 */  	r = amdgpu_late_init(adev); -	if (r) +	if (r) { +		dev_err(adev->dev, "amdgpu_late_init failed\n");  		return r; +	}  	return 0;  } @@ -1788,6 +1839,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)  	}  	drm_kms_helper_poll_enable(dev); +	drm_helper_hpd_irq_event(dev);  	if (fbcon) {  		amdgpu_fbdev_set_suspend(adev, 0); @@ -1881,6 +1933,83 @@ retry:  	return r;  } +void amdgpu_get_pcie_info(struct amdgpu_device *adev) +{ +	u32 mask; +	int ret; + +	if (pci_is_root_bus(adev->pdev->bus)) +		return; + +	if (amdgpu_pcie_gen2 == 0) +		return; + +	if (adev->flags & AMD_IS_APU) +		return; + +	ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); +	if (!ret) { +		adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | +					  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | +					  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); + +		if (mask & DRM_PCIE_SPEED_25) +			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; +		if (mask & DRM_PCIE_SPEED_50) +			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; +		if (mask & DRM_PCIE_SPEED_80) +			adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; +	} +	ret = drm_pcie_get_max_link_width(adev->ddev, &mask); +	if (!ret) { +		switch (mask) { +		case 32: +			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); +			break; +		case 16: +			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); +			break; +		case 12: +			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); +			break; +		case 8: +			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); +			break; +		case 4: +			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); +			break; +		case 2: +			adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | +						  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); +			break; +		case 1: +			adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; +			break; +		default: +			break; +		} +	} +}  /*   * Debugfs | 
