diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 156 | 
1 files changed, 107 insertions, 49 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 39cd545976b7..faa3e7102156 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)  void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,  			       uint32_t *buf, size_t size, bool write)  { -	uint64_t last;  	unsigned long flags; +	uint32_t hi = ~0; +	uint64_t last; + + +#ifdef CONFIG_64BIT +	last = min(pos + size, adev->gmc.visible_vram_size); +	if (last > pos) { +		void __iomem *addr = adev->mman.aper_base_kaddr + pos; +		size_t count = last - pos; + +		if (write) { +			memcpy_toio(addr, buf, count); +			mb(); +			amdgpu_asic_flush_hdp(adev, NULL); +		} else { +			amdgpu_asic_invalidate_hdp(adev, NULL); +			mb(); +			memcpy_fromio(buf, addr, count); +		} + +		if (count == size) +			return; + +		pos += count; +		buf += count / 4; +		size -= count; +	} +#endif + +	spin_lock_irqsave(&adev->mmio_idx_lock, flags); +	for (last = pos + size; pos < last; pos += 4) { +		uint32_t tmp = pos >> 31; -	last = size - 4; -	for (last += pos; pos <= last; pos += 4) { -		spin_lock_irqsave(&adev->mmio_idx_lock, flags);  		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); -		WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); +		if (tmp != hi) { +			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp); +			hi = tmp; +		}  		if (write)  			WREG32_NO_KIQ(mmMM_DATA, *buf++);  		else  			*buf++ = RREG32_NO_KIQ(mmMM_DATA); -		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);  	} +	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);  }  /* @@ -275,6 +306,26 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)  		BUG();  } +void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags) +{ +	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + +	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) +		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); +	else { +		unsigned long flags; + +		spin_lock_irqsave(&adev->mmio_idx_lock, flags); +		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); +		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); +		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); +	} + +	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { +		udelay(500); +	} +} +  /**   * amdgpu_mm_wreg - write to a memory mapped IO register   * @@ -288,8 +339,6 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)  void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,  		    uint32_t acc_flags)  { -	trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); -  	if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {  		adev->last_mm_index = v;  	} @@ -297,20 +346,26 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,  	if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))  		return amdgpu_kiq_wreg(adev, reg, v); -	if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) -		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); -	else { -		unsigned long flags; +	amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags); +} -		spin_lock_irqsave(&adev->mmio_idx_lock, flags); -		writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); -		writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); -		spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); -	} +/* + * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range + * + * this function is invoked only the debugfs register access + * */ +void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v, +		    uint32_t acc_flags) +{ +	if (amdgpu_sriov_fullaccess(adev) && +		adev->gfx.rlc.funcs && +		adev->gfx.rlc.funcs->is_rlcg_access_range) { -	if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) { -		udelay(500); +		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) +			return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);  	} + +	amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);  }  /** @@ -1136,7 +1191,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)  	* locking inversion with the driver load path. And the access here is  	* completely racy anyway. So don't bother with locking for now.  	*/ -	return dev->open_count == 0; +	return atomic_read(&dev->open_count) == 0;  }  static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { @@ -2344,15 +2399,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)  		}  		adev->ip_blocks[i].status.hw = false;  		/* handle putting the SMC in the appropriate state */ -		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { -			r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); -			if (r) { -				DRM_ERROR("SMC failed to set mp1 state %d, %d\n", -					  adev->mp1_state, r); -				return r; +		if(!amdgpu_sriov_vf(adev)){ +			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { +				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); +				if (r) { +					DRM_ERROR("SMC failed to set mp1 state %d, %d\n", +							adev->mp1_state, r); +					return r; +				}  			}  		} -  		adev->ip_blocks[i].status.hw = false;  	} @@ -2686,6 +2742,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)  		if (adev->asic_reset_res)  			goto fail; + +		if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count) +			adev->mmhub.funcs->reset_ras_error_count(adev);  	} else {  		task_barrier_full(&hive->tb); @@ -2800,7 +2859,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,  	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;  	if (amdgpu_emu_mode == 1) -		adev->usec_timeout *= 2; +		adev->usec_timeout *= 10;  	adev->gmc.gart_size = 512 * 1024 * 1024;  	adev->accel_working = false;  	adev->num_rings = 0; @@ -3088,22 +3147,6 @@ fence_driver_init:  	} else  		adev->ucode_sysfs_en = true; -	r = amdgpu_debugfs_gem_init(adev); -	if (r) -		DRM_ERROR("registering gem debugfs failed (%d).\n", r); - -	r = amdgpu_debugfs_regs_init(adev); -	if (r) -		DRM_ERROR("registering register debugfs failed (%d).\n", r); - -	r = amdgpu_debugfs_firmware_init(adev); -	if (r) -		DRM_ERROR("registering firmware debugfs failed (%d).\n", r); - -	r = amdgpu_debugfs_init(adev); -	if (r) -		DRM_ERROR("Creating debugfs files failed (%d).\n", r); -  	if ((amdgpu_testing & 1)) {  		if (adev->accel_working)  			amdgpu_test_moves(adev); @@ -3177,6 +3220,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)  	flush_delayed_work(&adev->delayed_init_work);  	adev->shutdown = true; +	/* make sure IB test finished before entering exclusive mode +	 * to avoid preemption on IB test +	 * */ +	if (amdgpu_sriov_vf(adev)) +		amdgpu_virt_request_full_gpu(adev, false); +  	/* disable all interrupts */  	amdgpu_irq_disable_all(adev);  	if (adev->mode_info.mode_config_initialized){ @@ -3219,13 +3268,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)  	adev->rmmio = NULL;  	amdgpu_device_doorbell_fini(adev); -	amdgpu_debugfs_regs_cleanup(adev);  	device_remove_file(adev->dev, &dev_attr_pcie_replay_count);  	if (adev->ucode_sysfs_en)  		amdgpu_ucode_sysfs_fini(adev);  	if (IS_ENABLED(CONFIG_PERF_EVENTS))  		amdgpu_pmu_fini(adev); -	amdgpu_debugfs_preempt_cleanup(adev);  	if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)  		amdgpu_discovery_fini(adev);  } @@ -3309,7 +3356,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)  		}  	} -	amdgpu_amdkfd_suspend(adev); +	amdgpu_amdkfd_suspend(adev, !fbcon);  	amdgpu_ras_suspend(adev); @@ -3393,7 +3440,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)  			}  		}  	} -	r = amdgpu_amdkfd_resume(adev); +	r = amdgpu_amdkfd_resume(adev, !fbcon);  	if (r)  		return r; @@ -3866,8 +3913,15 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,  		}  	} -	if (!r && amdgpu_ras_intr_triggered()) +	if (!r && amdgpu_ras_intr_triggered()) { +		list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { +			if (tmp_adev->mmhub.funcs && +			    tmp_adev->mmhub.funcs->reset_ras_error_count) +				tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev); +		} +  		amdgpu_ras_intr_cleared(); +	}  	list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {  		if (need_full_reset) { @@ -3913,6 +3967,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,  				if (r)  					goto out; +				amdgpu_fbdev_set_suspend(tmp_adev, 0); +  				/* must succeed. */  				amdgpu_ras_resume(tmp_adev); @@ -4086,6 +4142,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,  		 */  		amdgpu_unregister_gpu_instance(tmp_adev); +		amdgpu_fbdev_set_suspend(adev, 1); +  		/* disable ras on ALL IPs */  		if (!(in_ras_intr && !use_baco) &&  		      amdgpu_device_ip_need_full_reset(tmp_adev))  | 
