diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 169 | 
1 files changed, 120 insertions, 49 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 94a6c42f29ea..6ed36a2c5f73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,  	return -EACCES;  } +static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio) +{ +	switch (prio) { +	case DRM_SCHED_PRIORITY_HIGH_HW: +	case DRM_SCHED_PRIORITY_KERNEL: +		return AMDGPU_GFX_PIPE_PRIO_HIGH; +	default: +		return AMDGPU_GFX_PIPE_PRIO_NORMAL; +	} +} +  static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)  {  	struct amdgpu_device *adev = ctx->adev;  	struct amdgpu_ctx_entity *entity;  	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;  	unsigned num_scheds = 0; +	enum gfx_pipe_priority hw_prio;  	enum drm_sched_priority priority;  	int r; @@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const  	priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?  				ctx->init_priority : ctx->override_priority;  	switch (hw_ip) { -		case AMDGPU_HW_IP_GFX: -			sched = &adev->gfx.gfx_ring[0].sched; -			scheds = &sched; -			num_scheds = 1; -			break; -		case AMDGPU_HW_IP_COMPUTE: -			scheds = adev->gfx.compute_sched; -			num_scheds = adev->gfx.num_compute_sched; -			break; -		case AMDGPU_HW_IP_DMA: -			scheds = adev->sdma.sdma_sched; -			num_scheds = adev->sdma.num_sdma_sched; -			break; -		case AMDGPU_HW_IP_UVD: -			sched = &adev->uvd.inst[0].ring.sched; -			scheds = &sched; -			num_scheds = 1; -			break; -		case AMDGPU_HW_IP_VCE: -			sched = &adev->vce.ring[0].sched; -			scheds = &sched; -			num_scheds = 1; -			break; -		case AMDGPU_HW_IP_UVD_ENC: -			sched = &adev->uvd.inst[0].ring_enc[0].sched; -			scheds = &sched; -			num_scheds = 1; -			break; -		case AMDGPU_HW_IP_VCN_DEC: -			scheds = adev->vcn.vcn_dec_sched; -			num_scheds =  adev->vcn.num_vcn_dec_sched; -			break; -		case AMDGPU_HW_IP_VCN_ENC: -			scheds = adev->vcn.vcn_enc_sched; -			num_scheds =  adev->vcn.num_vcn_enc_sched; -			break; -		case AMDGPU_HW_IP_VCN_JPEG: -			scheds = adev->jpeg.jpeg_sched; -			num_scheds =  adev->jpeg.num_jpeg_sched; -			break; +	case AMDGPU_HW_IP_GFX: +		sched = &adev->gfx.gfx_ring[0].sched; +		scheds = &sched; +		num_scheds = 1; +		break; +	case AMDGPU_HW_IP_COMPUTE: +		hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); +		scheds = adev->gfx.compute_prio_sched[hw_prio]; +		num_scheds = adev->gfx.num_compute_sched[hw_prio]; +		break; +	case AMDGPU_HW_IP_DMA: +		scheds = adev->sdma.sdma_sched; +		num_scheds = adev->sdma.num_sdma_sched; +		break; +	case AMDGPU_HW_IP_UVD: +		sched = &adev->uvd.inst[0].ring.sched; +		scheds = &sched; +		num_scheds = 1; +		break; +	case AMDGPU_HW_IP_VCE: +		sched = &adev->vce.ring[0].sched; +		scheds = &sched; +		num_scheds = 1; +		break; +	case AMDGPU_HW_IP_UVD_ENC: +		sched = &adev->uvd.inst[0].ring_enc[0].sched; +		scheds = &sched; +		num_scheds = 1; +		break; +	case AMDGPU_HW_IP_VCN_DEC: +		sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched, +					    adev->vcn.num_vcn_dec_sched); +		scheds = &sched; +		num_scheds = 1; +		break; +	case AMDGPU_HW_IP_VCN_ENC: +		sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched, +					    adev->vcn.num_vcn_enc_sched); +		scheds = &sched; +		num_scheds = 1; +		break; +	case AMDGPU_HW_IP_VCN_JPEG: +		scheds = adev->jpeg.jpeg_sched; +		num_scheds =  adev->jpeg.num_jpeg_sched; +		break;  	}  	r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, @@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,  	return fence;  } +static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx, +					    struct amdgpu_ctx_entity *aentity, +					    int hw_ip, +					    enum drm_sched_priority priority) +{ +	struct amdgpu_device *adev = ctx->adev; +	enum gfx_pipe_priority hw_prio; +	struct drm_gpu_scheduler **scheds = NULL; +	unsigned num_scheds; + +	/* set sw priority */ +	drm_sched_entity_set_priority(&aentity->entity, priority); + +	/* set hw priority */ +	if (hw_ip == AMDGPU_HW_IP_COMPUTE) { +		hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority); +		scheds = adev->gfx.compute_prio_sched[hw_prio]; +		num_scheds = adev->gfx.num_compute_sched[hw_prio]; +		drm_sched_entity_modify_sched(&aentity->entity, scheds, +					      num_scheds); +	} +} +  void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,  				  enum drm_sched_priority priority)  { @@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,  			ctx->init_priority : ctx->override_priority;  	for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {  		for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { -			struct drm_sched_entity *entity; -  			if (!ctx->entities[i][j])  				continue; -			entity = &ctx->entities[i][j]->entity; -			drm_sched_entity_set_priority(entity, ctx_prio); +			amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j], +						       i, ctx_prio);  		}  	}  } @@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)  	mutex_destroy(&mgr->lock);  } + +static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev) +{ +	int num_compute_sched_normal = 0; +	int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1; +	int i; + +	/* use one drm sched array, gfx.compute_sched to store both high and +	 * normal priority drm compute schedulers */ +	for (i = 0; i < adev->gfx.num_compute_rings; i++) { +		if (!adev->gfx.compute_ring[i].has_high_prio) +			adev->gfx.compute_sched[num_compute_sched_normal++] = +				&adev->gfx.compute_ring[i].sched; +		else +			adev->gfx.compute_sched[num_compute_sched_high--] = +				&adev->gfx.compute_ring[i].sched; +	} + +	/* compute ring only has two priority for now */ +	i = AMDGPU_GFX_PIPE_PRIO_NORMAL; +	adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; +	adev->gfx.num_compute_sched[i] = num_compute_sched_normal; + +	i = AMDGPU_GFX_PIPE_PRIO_HIGH; +	if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) { +		/* When compute has no high priority rings then use */ +		/* normal priority sched array */ +		adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0]; +		adev->gfx.num_compute_sched[i] = num_compute_sched_normal; +	} else { +		adev->gfx.compute_prio_sched[i] = +			&adev->gfx.compute_sched[num_compute_sched_high - 1]; +		adev->gfx.num_compute_sched[i] = +			adev->gfx.num_compute_rings - num_compute_sched_normal; +	} +} +  void amdgpu_ctx_init_sched(struct amdgpu_device *adev)  {  	int i, j; +	amdgpu_ctx_init_compute_sched(adev);  	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {  		adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;  		adev->gfx.num_gfx_sched++;  	} -	for (i = 0; i < adev->gfx.num_compute_rings; i++) { -		adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched; -		adev->gfx.num_compute_sched++; -	} -  	for (i = 0; i < adev->sdma.num_instances; i++) {  		adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;  		adev->sdma.num_sdma_sched++;  | 
