diff options
Diffstat (limited to 'drivers/gpu')
196 files changed, 4714 insertions, 1573 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 9c2d9495cb3c..315cbdf61979 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -8,7 +8,6 @@ menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA - select DRM_NOMODESET select DRM_PANEL_ORIENTATION_QUIRKS select HDMI select FB_CMDLINE @@ -19,6 +18,7 @@ menuconfig DRM # gallium uses SYS_kcmp for os_same_file_description() to de-duplicate # device and dmabuf fd. Let's make sure that is available for our userspace. select KCMP + select VIDEO_NOMODESET help Kernel-level support for the Direct Rendering Infrastructure (DRI) introduced in XFree86 4.0. If you say Y here, you need to select @@ -458,11 +458,6 @@ config DRM_EXPORT_FOR_TESTS config DRM_PANEL_ORIENTATION_QUIRKS tristate -# Separate option because nomodeset parameter is global and expected built-in -config DRM_NOMODESET - bool - default n - config DRM_LIB_RANDOM bool default n diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 6e55c47288e4..f92cd7892711 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -72,7 +72,6 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \ drm_privacy_screen_x86.o obj-$(CONFIG_DRM) += drm.o -obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o # @@ -117,7 +116,9 @@ drm_kms_helper-y := \ drm_self_refresh_helper.o \ drm_simple_kms_helper.o drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o -drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o +drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \ + drm_fbdev_generic.o \ + drm_fb_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o # diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 8816853e50c0..f99d4873bf22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -673,7 +673,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, goto err; } - ret = amdgpu_job_alloc(adev, 1, &job, NULL); + ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job); if (ret) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 491d4846fc02..e1320edfc527 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -26,7 +26,6 @@ #include <drm/display/drm_dp_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 275da612cd87..459150460815 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -293,12 +293,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } for (i = 0; i < p->gang_size; ++i) { - ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); - if (ret) - goto free_all_kdata; - - ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], - &fpriv->vm); + ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, + num_ibs[i], &p->jobs[i]); if (ret) goto free_all_kdata; } @@ -432,7 +428,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, dma_fence_put(old); } - r = amdgpu_sync_fence(&p->gang_leader->sync, fence); + r = amdgpu_sync_fence(&p->sync, fence); dma_fence_put(fence); if (r) return r; @@ -454,9 +450,20 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, return r; } - r = amdgpu_sync_fence(&p->gang_leader->sync, fence); - dma_fence_put(fence); + r = amdgpu_sync_fence(&p->sync, fence); + if (r) + goto error; + /* + * When we have an explicit dependency it might be necessary to insert a + * pipeline sync to make sure that all caches etc are flushed and the + * next job actually sees the results from the previous one. + */ + if (fence->context == p->gang_leader->base.entity->fence_context) + r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); + +error: + dma_fence_put(fence); return r; } @@ -1105,7 +1112,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update); + r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update); if (r) return r; @@ -1116,7 +1123,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); + r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); if (r) return r; } @@ -1135,7 +1142,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); + r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update); if (r) return r; } @@ -1148,7 +1155,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_sync_fence(&job->sync, vm->last_update); + r = amdgpu_sync_fence(&p->sync, vm->last_update); if (r) return r; @@ -1180,7 +1187,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - struct amdgpu_job *leader = p->gang_leader; struct amdgpu_bo_list_entry *e; unsigned int i; int r; @@ -1192,14 +1198,14 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) sync_mode = amdgpu_bo_explicit_sync(bo) ? AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; - r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode, + r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode, &fpriv->vm); if (r) return r; } - for (i = 0; i < p->gang_size - 1; ++i) { - r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); + for (i = 0; i < p->gang_size; ++i) { + r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]); if (r) return r; } @@ -1245,7 +1251,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, struct dma_fence *fence; fence = &p->jobs[i]->base.s_fence->scheduled; - r = amdgpu_sync_fence(&leader->sync, fence); + r = drm_sched_job_add_dependency(&leader->base, fence); if (r) goto error_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h index cbaa19b2b8a3..207e801c24ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h @@ -75,6 +75,8 @@ struct amdgpu_cs_parser { unsigned num_post_deps; struct amdgpu_cs_post_dep *post_deps; + + struct amdgpu_sync sync; }; int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3d5d5d49cfab..b2b1c66bfe39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -37,6 +37,7 @@ #include <linux/pci-p2pdma.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/vgaarb.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 83a271578265..b22471b3bd63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -39,8 +39,8 @@ #include <linux/pm_runtime.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> @@ -1249,7 +1249,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, const struct drm_mode_config_funcs amdgpu_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, }; static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 72b77b3ac4fb..5697d456d7d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -25,6 +25,7 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_aperture.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem.h> #include <drm/drm_vblank.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 258cffe3c06a..774c77bb8f4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -182,7 +182,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, need_ctx_switch = ring->current_ctx != fence_ctx; if (ring->funcs->emit_pipeline_sync && job && - ((tmp = amdgpu_sync_get_fence(&job->sched_sync)) || + ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) || (amdgpu_sriov_vf(adev) && need_ctx_switch) || amdgpu_vm_need_pipeline_sync(ring, job))) { need_pipe_sync = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 03d115d2b5ed..2a9a2593dc18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies * @idle: resulting idle VMID + * @fence: fence to wait for if no id could be grabbed * * Try to find an idle VMID, if none is idle add a fence to wait to the sync * object. Returns -ENOMEM when we are out of memory. */ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct amdgpu_vmid **idle) + struct amdgpu_vmid **idle, + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct dma_fence **fences; unsigned i; - int r; - if (!dma_fence_is_signaled(ring->vmid_wait)) - return amdgpu_sync_fence(sync, ring->vmid_wait); + if (!dma_fence_is_signaled(ring->vmid_wait)) { + *fence = dma_fence_get(ring->vmid_wait); + return 0; + } fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); if (!fences) @@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, return -ENOMEM; } - r = amdgpu_sync_fence(sync, &array->base); + *fence = dma_fence_get(&array->base); dma_fence_put(ring->vmid_wait); ring->vmid_wait = &array->base; - return r; + return 0; } kfree(fences); @@ -243,19 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID * @id: resulting VMID + * @fence: fence to wait for if no id could be grabbed * * Try to assign a reserved VMID. */ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct dma_fence *fence, struct amdgpu_job *job, - struct amdgpu_vmid **id) + struct amdgpu_vmid **id, + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -282,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); if (tmp) { *id = NULL; - return amdgpu_sync_fence(sync, tmp); + *fence = dma_fence_get(tmp); + return 0; } needs_flush = true; } @@ -290,7 +290,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, /* Good we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(&(*id)->active, fence); + r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished); if (r) return r; @@ -304,19 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID * @id: resulting VMID + * @fence: fence to wait for if no id could be grabbed * * Try to reuse a VMID for this submission. */ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, - struct dma_fence *fence, struct amdgpu_job *job, - struct amdgpu_vmid **id) + struct amdgpu_vmid **id, + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -352,7 +350,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, /* Good, we can use this VMID. Remember this submission as * user of the VMID. */ - r = amdgpu_sync_fence(&(*id)->active, fence); + r = amdgpu_sync_fence(&(*id)->active, + &job->base.s_fence->finished); if (r) return r; @@ -370,15 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, * * @vm: vm to allocate id for * @ring: ring we want to submit job to - * @sync: sync object where we add dependencies - * @fence: fence protecting ID from reuse * @job: job who wants to use the VMID + * @fence: fence to wait for if no id could be grabbed * * Allocate an id for the vm, adding fences to the sync obj as necessary. */ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job) + struct amdgpu_job *job, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -388,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int r = 0; mutex_lock(&id_mgr->lock); - r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle); + r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence); if (r || !idle) goto error; if (vm->reserved_vmid[vmhub]) { - r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id); + r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); if (r || !id) goto error; } else { - r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id); + r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); if (r) goto error; @@ -406,7 +403,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, id = idle; /* Remember this submission as user of the VMID */ - r = amdgpu_sync_fence(&id->active, fence); + r = amdgpu_sync_fence(&id->active, + &job->base.s_fence->finished); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h index 06c8a0034fa5..57efe61dceed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h @@ -84,8 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned vmhub); int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, - struct amdgpu_sync *sync, struct dma_fence *fence, - struct amdgpu_job *job); + struct amdgpu_job *job, struct dma_fence **fence); void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, unsigned vmid); void amdgpu_vmid_reset_all(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index cd968e781077..032651a655f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -88,8 +88,9 @@ exit: return DRM_GPU_SCHED_STAT_NOMINAL; } -int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job, struct amdgpu_vm *vm) +int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct drm_sched_entity *entity, void *owner, + unsigned int num_ibs, struct amdgpu_job **job) { if (num_ibs == 0) return -EINVAL; @@ -105,28 +106,34 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->base.sched = &adev->rings[0]->sched; (*job)->vm = vm; - amdgpu_sync_create(&(*job)->sync); - amdgpu_sync_create(&(*job)->sched_sync); + amdgpu_sync_create(&(*job)->explicit_sync); (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; - return 0; + if (!entity) + return 0; + + return drm_sched_job_init(&(*job)->base, entity, owner); } -int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - enum amdgpu_ib_pool_type pool_type, - struct amdgpu_job **job) +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, + struct drm_sched_entity *entity, void *owner, + size_t size, enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job) { int r; - r = amdgpu_job_alloc(adev, 1, job, NULL); + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); if (r) return r; (*job)->num_ibs = 1; r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); - if (r) + if (r) { + if (entity) + drm_sched_job_cleanup(&(*job)->base); kfree(*job); + } return r; } @@ -166,9 +173,7 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) drm_sched_job_cleanup(s_job); - amdgpu_sync_free(&job->sync); - amdgpu_sync_free(&job->sched_sync); - + amdgpu_sync_free(&job->explicit_sync); dma_fence_put(&job->hw_fence); } @@ -190,9 +195,11 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job, void amdgpu_job_free(struct amdgpu_job *job) { + if (job->base.entity) + drm_sched_job_cleanup(&job->base); + amdgpu_job_free_resources(job); - amdgpu_sync_free(&job->sync); - amdgpu_sync_free(&job->sched_sync); + amdgpu_sync_free(&job->explicit_sync); if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); @@ -202,25 +209,16 @@ void amdgpu_job_free(struct amdgpu_job *job) dma_fence_put(&job->hw_fence); } -int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, - void *owner, struct dma_fence **f) +struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) { - int r; - - if (!f) - return -EINVAL; - - r = drm_sched_job_init(&job->base, entity, owner); - if (r) - return r; + struct dma_fence *f; drm_sched_job_arm(&job->base); - - *f = dma_fence_get(&job->base.s_fence->finished); + f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); drm_sched_entity_push_job(&job->base); - return 0; + return f; } int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, @@ -238,30 +236,19 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, return 0; } -static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job, - struct drm_sched_entity *s_entity) +static struct dma_fence * +amdgpu_job_prepare_job(struct drm_sched_job *sched_job, + struct drm_sched_entity *s_entity) { struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched); struct amdgpu_job *job = to_amdgpu_job(sched_job); - struct amdgpu_vm *vm = job->vm; - struct dma_fence *fence; + struct dma_fence *fence = NULL; int r; - fence = amdgpu_sync_get_fence(&job->sync); - if (fence && drm_sched_dependency_optimized(fence, s_entity)) { - r = amdgpu_sync_fence(&job->sched_sync, fence); - if (r) - DRM_ERROR("Error adding fence (%d)\n", r); - } - - while (fence == NULL && vm && !job->vmid) { - r = amdgpu_vmid_grab(vm, ring, &job->sync, - &job->base.s_fence->finished, - job); + while (!fence && job->vm && !job->vmid) { + r = amdgpu_vmid_grab(job->vm, ring, job, &fence); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); - - fence = amdgpu_sync_get_fence(&job->sync); } if (!fence && job->gang_submit) @@ -281,8 +268,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) job = to_amdgpu_job(sched_job); finished = &job->base.s_fence->finished; - BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); - trace_amdgpu_sched_run_job(job); /* Skip job if VRAM is lost and never resubmit gangs */ @@ -341,7 +326,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) } const struct drm_sched_backend_ops amdgpu_sched_ops = { - .dependency = amdgpu_job_dependency, + .prepare_job = amdgpu_job_prepare_job, .run_job = amdgpu_job_run, .timedout_job = amdgpu_job_timedout, .free_job = amdgpu_job_free_cb diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index ab7b150e5d50..a372802ea4e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -47,8 +47,7 @@ enum amdgpu_ib_pool_type; struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; - struct amdgpu_sync sync; - struct amdgpu_sync sched_sync; + struct amdgpu_sync explicit_sync; struct dma_fence hw_fence; struct dma_fence *gang_submit; uint32_t preamble_status; @@ -78,18 +77,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) return to_amdgpu_ring(job->base.entity->rq->sched); } -int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job, struct amdgpu_vm *vm); -int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); +int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct drm_sched_entity *entity, void *owner, + unsigned int num_ibs, struct amdgpu_job **job); +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, + struct drm_sched_entity *entity, void *owner, + size_t size, enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job); void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, struct amdgpu_bo *gws, struct amdgpu_bo *oa); void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_set_gang_leader(struct amdgpu_job *job, struct amdgpu_job *leader); void amdgpu_job_free(struct amdgpu_job *job); -int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, - void *owner, struct dma_fence **f); +struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job); int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 8f472517d181..6f81ed4fb0d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, const unsigned ib_size_dw = 16; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; - ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, + PACKETJ_TYPE0); ib->ptr[1] = 0xDEADBEEF; for (i = 2; i < 16; i += 2) { ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d75e0370a074..ba348046fcec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include <drm/amdgpu_drm.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 37322550d750..8a39300b1a84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -36,7 +36,6 @@ #include <drm/drm_encoder.h> #include <drm/drm_fixed.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_probe_helper.h> #include <linux/i2c.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 090e66a1b284..bac7976975bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -259,6 +259,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return 0; } +/* Free the entry back to the slab */ +static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e) +{ + hash_del(&e->node); + dma_fence_put(e->fence); + kmem_cache_free(amdgpu_sync_slab, e); +} + /** * amdgpu_sync_peek_fence - get the next fence not signaled yet * @@ -280,9 +288,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct drm_sched_fence *s_fence = to_drm_sched_fence(f); if (dma_fence_is_signaled(f)) { - hash_del(&e->node); - dma_fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); + amdgpu_sync_entry_free(e); continue; } if (ring && s_fence) { @@ -355,15 +361,42 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone) if (r) return r; } else { - hash_del(&e->node); - dma_fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); + amdgpu_sync_entry_free(e); } } return 0; } +/** + * amdgpu_sync_push_to_job - push fences into job + * @sync: sync object to get the fences from + * @job: job to push the fences into + * + * Add all unsignaled fences from sync to job. + */ +int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job) +{ + struct amdgpu_sync_entry *e; + struct hlist_node *tmp; + struct dma_fence *f; + int i, r; + + hash_for_each_safe(sync->fences, i, tmp, e, node) { + f = e->fence; + if (dma_fence_is_signaled(f)) { + amdgpu_sync_entry_free(e); + continue; + } + + dma_fence_get(f); + r = drm_sched_job_add_dependency(&job->base, f); + if (r) + return r; + } + return 0; +} + int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) { struct amdgpu_sync_entry *e; @@ -375,9 +408,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) if (r) return r; - hash_del(&e->node); - dma_fence_put(e->fence); - kmem_cache_free(amdgpu_sync_slab, e); + amdgpu_sync_entry_free(e); } return 0; @@ -396,11 +427,8 @@ void amdgpu_sync_free(struct amdgpu_sync *sync) struct hlist_node *tmp; unsigned int i; - hash_for_each_safe(sync->fences, i, tmp, e, node) { - hash_del(&e->node); - dma_fence_put(e->fence); - kmem_cache_free(amdgpu_sync_slab, e); - } + hash_for_each_safe(sync->fences, i, tmp, e, node) + amdgpu_sync_entry_free(e); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h index 2d5c613cda10..cf1e9e858efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h @@ -30,6 +30,7 @@ struct dma_fence; struct dma_resv; struct amdgpu_device; struct amdgpu_ring; +struct amdgpu_job; enum amdgpu_sync_mode { AMDGPU_SYNC_ALWAYS, @@ -54,6 +55,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, struct amdgpu_ring *ring); struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone); +int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job); int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c8169cea3032..7b5074e776f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -190,7 +190,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct amdgpu_device *adev = ring->adev; unsigned offset, num_pages, num_dw, num_bytes; uint64_t src_addr, dst_addr; - struct dma_fence *fence; struct amdgpu_job *job; void *cpu_addr; uint64_t flags; @@ -230,7 +229,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED, &job); if (r) return r; @@ -270,18 +271,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, } } - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - - dma_fence_put(fence); - - return r; - -error_free: - amdgpu_job_free(job); - return r; + dma_fence_put(amdgpu_job_submit(job)); + return 0; } /** @@ -1395,7 +1386,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, } static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, - unsigned long offset, void *buf, int len, int write) + unsigned long offset, void *buf, + int len, int write) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); @@ -1419,26 +1411,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, memcpy(adev->mman.sdma_access_ptr, buf, len); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4, AMDGPU_IB_POOL_DELAYED, + &job); if (r) goto out; amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); - src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; + src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + + src_mm.start; dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); if (write) swap(src_addr, dst_addr); - amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, + PAGE_SIZE, false); amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) { - amdgpu_job_free(job); - goto out; - } + fence = amdgpu_job_submit(job); if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) r = -ETIMEDOUT; @@ -1988,7 +1981,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, AMDGPU_IB_POOL_DELAYED; int r; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4, pool, job); if (r) return r; @@ -1998,17 +1993,11 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, adev->gart.bo); (*job)->vm_needs_flush = true; } - if (resv) { - r = amdgpu_sync_resv(adev, &(*job)->sync, resv, - AMDGPU_SYNC_ALWAYS, - AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("sync failed (%d).\n", r); - amdgpu_job_free(*job); - return r; - } - } - return 0; + if (!resv) + return 0; + + return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, + DMA_RESV_USAGE_BOOKKEEP); } int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, @@ -2053,8 +2042,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, if (direct_submit) r = amdgpu_job_submit_direct(job, ring, fence); else - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); + *fence = amdgpu_job_submit(job); if (r) goto error_free; @@ -2099,16 +2087,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); - if (r) - goto error_free; - + *fence = amdgpu_job_submit(job); return 0; - -error_free: - amdgpu_job_free(job); - return r; } int amdgpu_fill_buffer(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6eac649499d3..e00bb654e24b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + 64, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) return r; @@ -1175,16 +1177,13 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; } else { - r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv, - AMDGPU_SYNC_ALWAYS, - AMDGPU_FENCE_OWNER_UNDEFINED); + r = drm_sched_job_add_resv_dependencies(&job->base, + bo->tbo.base.resv, + DMA_RESV_USAGE_KERNEL); if (r) goto err_free; - r = amdgpu_job_submit(job, &adev->uvd.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err_free; + f = amdgpu_job_submit(job); } amdgpu_bo_reserve(bo, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 02cb3a12dd76..b239e874f2d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; @@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence *f = NULL; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + ib_size_dw * 4, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) @@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) r = amdgpu_job_submit_direct(job, ring, &f); else - r = amdgpu_job_submit(job, &ring->adev->vce.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); + f = amdgpu_job_submit(job); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index c448c1bdf84d..33f3415096f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { + u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; - r = amdgpu_job_alloc_with_ib(adev, 64, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + 64, AMDGPU_IB_POOL_DIRECT, + &job); if (r) goto err; @@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) goto err; @@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; @@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 69e105fa41f6..59cf64216fbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) return r; } +/* Allocate a new job for @count PTE updates */ +static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, + unsigned int count) +{ + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE + : AMDGPU_IB_POOL_DELAYED; + struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate + : &p->vm->delayed; + unsigned int ndw; + int r; + + /* estimate how many dw we need */ + ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; + if (p->pages_addr) + ndw += count * 2; + ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); + + r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, + ndw * 4, pool, &p->job); + if (r) + return r; + + p->num_dw_left = ndw; + return 0; +} + /** * amdgpu_vm_sdma_prepare - prepare SDMA command submission * @@ -61,21 +87,22 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode) { - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE - : AMDGPU_IB_POOL_DELAYED; - unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; + struct amdgpu_sync sync; int r; - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); + r = amdgpu_vm_sdma_alloc_job(p, 0); if (r) return r; - p->num_dw_left = ndw; - if (!resv) return 0; - return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm); + amdgpu_sync_create(&sync); + r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm); + if (!r) + r = amdgpu_sync_push_to_job(&sync, p->job); + amdgpu_sync_free(&sync); + return r; } /** @@ -91,20 +118,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { struct amdgpu_ib *ib = p->job->ibs; - struct drm_sched_entity *entity; struct amdgpu_ring *ring; struct dma_fence *f; - int r; - entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; - ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); + ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, + sched); WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > p->num_dw_left); - r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); - if (r) - goto error; + f = amdgpu_job_submit(p->job); if (p->unlocked) { struct dma_fence *tmp = dma_fence_get(f); @@ -127,10 +150,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, } dma_fence_put(f); return 0; - -error: - amdgpu_job_free(p->job); - return r; } /** @@ -210,8 +229,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, uint64_t flags) { struct amdgpu_bo *bo = &vmbo->bo; - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE - : AMDGPU_IB_POOL_DELAYED; struct dma_resv_iter cursor; unsigned int i, ndw, nptes; struct dma_fence *fence; @@ -221,7 +238,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, /* Wait for PD/PT moves to be completed */ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL); dma_resv_for_each_fence_unlocked(&cursor, fence) { - r = amdgpu_sync_fence(&p->job->sync, fence); + r = drm_sched_job_add_dependency(&p->job->base, fence); if (r) { dma_resv_iter_end(&cursor); return r; @@ -238,19 +255,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (r) return r; - /* estimate how many dw we need */ - ndw = 32; - if (p->pages_addr) - ndw += count * 2; - ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); - ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); - - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, - &p->job); + r = amdgpu_vm_sdma_alloc_job(p, count); if (r) return r; - - p->num_dw_left = ndw; } if (!p->pages_addr) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index a2452fc304c5..248f1a4e915f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 6ac680c4c6e2..cd9c19060d89 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 354ae09cc9a2..76323deecc58 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -23,6 +23,7 @@ #include <linux/pci.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 33977b0ba19d..01cf3ab111cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -21,6 +21,7 @@ * */ +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index a83efdc8aa0c..21e46817d82d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * translation. Avoid this by doing the invalidation from the SDMA * itself. */ - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, &job); if (r) goto error_alloc; @@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, job->vm_needs_flush = true; job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; amdgpu_ring_pad_ib(ring, &job->ibs[0]); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_submit; + fence = amdgpu_job_submit(job); mutex_unlock(&adev->mman.gtt_window_lock); @@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; -error_submit: - amdgpu_job_free(job); - error_alloc: mutex_unlock(&adev->mman.gtt_window_lock); DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 375c440957dc..5fe872f4bea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -216,8 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -280,8 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e668b3baa8c6..e407be6cb63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) * * Open up a stream for HW test */ -static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, +static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) { @@ -224,8 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -276,7 +276,7 @@ err: * * Close up a stream for HW test or if userspace failed to do so */ -static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, +static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) { @@ -287,8 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 77227761e669..10048ce16aea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -64,8 +64,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = npages * 8; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, - AMDGPU_IB_POOL_DELAYED, &job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4 + num_bytes, + AMDGPU_IB_POOL_DELAYED, + &job); if (r) return r; @@ -88,18 +91,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, cpu_addr = &job->ibs[0].ptr[num_dw]; amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - + fence = amdgpu_job_submit(job); dma_fence_put(fence); return r; - -error_free: - amdgpu_job_free(job); - return r; } /** diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ef4f788edabe..a8772082883e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -82,7 +82,6 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> #include <drm/drm_vblank.h> @@ -2820,7 +2819,6 @@ const struct amdgpu_ip_block_version dm_ip_block = static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, .get_format_info = amd_get_format_info, - .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = amdgpu_dm_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index 9fce4239d4ad..3f4e719eebd8 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -9,7 +9,7 @@ #include <linux/platform_device.h> #include <linux/component.h> #include <linux/pm_runtime.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_module.h> #include <drm/drm_of.h> #include "komeda_dev.h" diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 451746ebbe71..62dc64550793 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -10,7 +10,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> @@ -59,7 +58,6 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data) static const struct drm_driver komeda_kms_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .lastclose = drm_fb_helper_lastclose, DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create), .fops = &komeda_cma_fops, .name = "komeda", diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 7030339fa232..3cfefadc7c9d 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -19,7 +19,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_of.h> @@ -275,7 +274,7 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, dest_h = drm_rect_height(&new_plane_state->dst); scanout_start = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0); - hdlcd = plane->dev->dev_private; + hdlcd = drm_to_hdlcd_priv(plane->dev); hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]); hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); @@ -290,7 +289,6 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { static const struct drm_plane_funcs hdlcd_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_plane_cleanup, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, @@ -298,24 +296,19 @@ static const struct drm_plane_funcs hdlcd_plane_funcs = { static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) { - struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); struct drm_plane *plane = NULL; u32 formats[ARRAY_SIZE(supported_formats)], i; - int ret; - - plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL); - if (!plane) - return ERR_PTR(-ENOMEM); for (i = 0; i < ARRAY_SIZE(supported_formats); i++) formats[i] = supported_formats[i].fourcc; - ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs, - formats, ARRAY_SIZE(formats), - NULL, - DRM_PLANE_TYPE_PRIMARY, NULL); - if (ret) - return ERR_PTR(ret); + plane = drmm_universal_plane_alloc(drm, struct drm_plane, dev, 0xff, + &hdlcd_plane_funcs, + formats, ARRAY_SIZE(formats), + NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (IS_ERR(plane)) + return plane; drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs); hdlcd->plane = plane; @@ -325,7 +318,7 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) int hdlcd_setup_crtc(struct drm_device *drm) { - struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); struct drm_plane *primary; int ret; diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index a032003c340c..7043d1c9ed8f 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -26,7 +26,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper.h> @@ -98,7 +98,7 @@ static void hdlcd_irq_uninstall(struct hdlcd_drm_private *hdlcd) static int hdlcd_load(struct drm_device *drm, unsigned long flags) { - struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); struct platform_device *pdev = to_platform_device(drm->dev); struct resource *res; u32 version; @@ -175,14 +175,21 @@ static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static void hdlcd_setup_mode_config(struct drm_device *drm) +static int hdlcd_setup_mode_config(struct drm_device *drm) { - drm_mode_config_init(drm); + int ret; + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + drm->mode_config.min_width = 0; drm->mode_config.min_height = 0; drm->mode_config.max_width = HDLCD_MAX_XRES; drm->mode_config.max_height = HDLCD_MAX_YRES; drm->mode_config.funcs = &hdlcd_mode_config_funcs; + + return 0; } #ifdef CONFIG_DEBUG_FS @@ -190,7 +197,7 @@ static int hdlcd_show_underrun_count(struct seq_file *m, void *arg) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *drm = node->minor->dev; - struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count)); seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count)); @@ -203,7 +210,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *drm = node->minor->dev; - struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); unsigned long clkrate = clk_get_rate(hdlcd->clk); unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000; @@ -247,18 +254,18 @@ static int hdlcd_drm_bind(struct device *dev) struct hdlcd_drm_private *hdlcd; int ret; - hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL); - if (!hdlcd) - return -ENOMEM; + hdlcd = devm_drm_dev_alloc(dev, &hdlcd_driver, typeof(*hdlcd), base); + if (IS_ERR(hdlcd)) + return PTR_ERR(hdlcd); - drm = drm_dev_alloc(&hdlcd_driver, dev); - if (IS_ERR(drm)) - return PTR_ERR(drm); + drm = &hdlcd->base; - drm->dev_private = hdlcd; dev_set_drvdata(dev, drm); - hdlcd_setup_mode_config(drm); + ret = hdlcd_setup_mode_config(drm); + if (ret) + goto err_free; + ret = hdlcd_load(drm, 0); if (ret) goto err_free; @@ -317,17 +324,14 @@ err_unload: hdlcd_irq_uninstall(hdlcd); of_reserved_mem_device_release(drm->dev); err_free: - drm_mode_config_cleanup(drm); dev_set_drvdata(dev, NULL); - drm_dev_put(drm); - return ret; } static void hdlcd_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct hdlcd_drm_private *hdlcd = drm->dev_private; + struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); @@ -341,10 +345,7 @@ static void hdlcd_drm_unbind(struct device *dev) if (pm_runtime_enabled(dev)) pm_runtime_disable(dev); of_reserved_mem_device_release(dev); - drm_mode_config_cleanup(drm); - drm->dev_private = NULL; dev_set_drvdata(dev, NULL); - drm_dev_put(drm); } static const struct component_master_ops hdlcd_master_ops = { diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h index 909c39c28487..f1c1da2ac2db 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.h +++ b/drivers/gpu/drm/arm/hdlcd_drv.h @@ -7,6 +7,7 @@ #define __HDLCD_DRV_H__ struct hdlcd_drm_private { + struct drm_device base; void __iomem *mmio; struct clk *clk; struct drm_crtc crtc; @@ -20,6 +21,7 @@ struct hdlcd_drm_private { #endif }; +#define drm_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, base) #define crtc_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, crtc) static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd, diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 962730772b2f..dc01c43f6193 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -514,7 +514,6 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc) } static const struct drm_crtc_funcs malidp_crtc_funcs = { - .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .reset = malidp_crtc_reset, @@ -526,7 +525,7 @@ static const struct drm_crtc_funcs malidp_crtc_funcs = { int malidp_crtc_init(struct drm_device *drm) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct drm_plane *primary = NULL, *plane; int ret; @@ -548,8 +547,8 @@ int malidp_crtc_init(struct drm_device *drm) return -EINVAL; } - ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL, - &malidp_crtc_funcs, NULL); + ret = drmm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL, + &malidp_crtc_funcs, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 1d0b0c54ccc7..589c1c66a6dc 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -19,10 +19,11 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> #include <drm/drm_of.h> @@ -168,7 +169,7 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, */ static int malidp_set_and_wait_config_valid(struct drm_device *drm) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; int ret; @@ -189,7 +190,7 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm) static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); int loop = 5; malidp->event = malidp->crtc.state->event; @@ -230,7 +231,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) static void malidp_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *drm = state->dev; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int i; @@ -392,10 +393,12 @@ static const struct drm_mode_config_funcs malidp_mode_config_funcs = { static int malidp_init(struct drm_device *drm) { int ret; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) + goto out; drm->mode_config.min_width = hwdev->min_line_size; drm->mode_config.min_height = hwdev->min_line_size; @@ -406,29 +409,21 @@ static int malidp_init(struct drm_device *drm) ret = malidp_crtc_init(drm); if (ret) - goto crtc_fail; + goto out; ret = malidp_mw_connector_init(drm); if (ret) - goto crtc_fail; - - return 0; + goto out; -crtc_fail: - drm_mode_config_cleanup(drm); +out: return ret; } -static void malidp_fini(struct drm_device *drm) -{ - drm_mode_config_cleanup(drm); -} - static int malidp_irq_init(struct platform_device *pdev) { int irq_de, irq_se, ret = 0; struct drm_device *drm = dev_get_drvdata(&pdev->dev); - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; /* fetch the interrupts from DT */ @@ -462,7 +457,7 @@ static int malidp_dumb_create(struct drm_file *file_priv, struct drm_device *drm, struct drm_mode_create_dumb *args) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); /* allocate for the worst case scenario, i.e. rotated buffers */ u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1); @@ -508,7 +503,7 @@ static void malidp_error_stats_dump(const char *prefix, static int malidp_show_stats(struct seq_file *m, void *arg) { struct drm_device *drm = m->private; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); unsigned long irqflags; struct malidp_error_stats de_errors, se_errors; @@ -531,7 +526,7 @@ static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf, { struct seq_file *m = file->private_data; struct drm_device *drm = m->private; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); unsigned long irqflags; spin_lock_irqsave(&malidp->errors_lock, irqflags); @@ -552,7 +547,7 @@ static const struct file_operations malidp_debugfs_fops = { static void malidp_debugfs_init(struct drm_minor *minor) { - struct malidp_drm *malidp = minor->dev->dev_private; + struct malidp_drm *malidp = drm_to_malidp(minor->dev); malidp_error_stats_init(&malidp->de_errors); malidp_error_stats_init(&malidp->se_errors); @@ -652,7 +647,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct drm_device *drm = dev_get_drvdata(dev); - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); } @@ -670,7 +665,7 @@ ATTRIBUTE_GROUPS(mali_dp); static int malidp_runtime_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; /* we can only suspend if the hardware is in config mode */ @@ -689,7 +684,7 @@ static int malidp_runtime_pm_suspend(struct device *dev) static int malidp_runtime_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; clk_prepare_enable(hwdev->pclk); @@ -716,11 +711,13 @@ static int malidp_bind(struct device *dev) int ret = 0, i; u32 version, out_depth = 0; - malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); - if (!malidp) - return -ENOMEM; + malidp = devm_drm_dev_alloc(dev, &malidp_driver, typeof(*malidp), base); + if (IS_ERR(malidp)) + return PTR_ERR(malidp); + + drm = &malidp->base; - hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); + hwdev = drmm_kzalloc(drm, sizeof(*hwdev), GFP_KERNEL); if (!hwdev) return -ENOMEM; @@ -753,13 +750,6 @@ static int malidp_bind(struct device *dev) if (ret && ret != -ENODEV) return ret; - drm = drm_dev_alloc(&malidp_driver, dev); - if (IS_ERR(drm)) { - ret = PTR_ERR(drm); - goto alloc_fail; - } - - drm->dev_private = malidp; dev_set_drvdata(dev, drm); /* Enable power management */ @@ -878,17 +868,13 @@ irq_init_fail: bind_fail: of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; - malidp_fini(drm); query_hw_fail: pm_runtime_put(dev); if (pm_runtime_enabled(dev)) pm_runtime_disable(dev); else malidp_runtime_pm_suspend(dev); - drm->dev_private = NULL; dev_set_drvdata(dev, NULL); - drm_dev_put(drm); -alloc_fail: of_reserved_mem_device_release(dev); return ret; @@ -897,7 +883,7 @@ alloc_fail: static void malidp_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; drm_dev_unregister(drm); @@ -909,15 +895,12 @@ static void malidp_unbind(struct device *dev) component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; - malidp_fini(drm); pm_runtime_put(dev); if (pm_runtime_enabled(dev)) pm_runtime_disable(dev); else malidp_runtime_pm_suspend(dev); - drm->dev_private = NULL; dev_set_drvdata(dev, NULL); - drm_dev_put(drm); of_reserved_mem_device_release(dev); } diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h index cdfddfabf2d1..bc0387876dea 100644 --- a/drivers/gpu/drm/arm/malidp_drv.h +++ b/drivers/gpu/drm/arm/malidp_drv.h @@ -29,6 +29,7 @@ struct malidp_error_stats { }; struct malidp_drm { + struct drm_device base; struct malidp_hw_device *dev; struct drm_crtc crtc; struct drm_writeback_connector mw_connector; @@ -44,6 +45,7 @@ struct malidp_drm { #endif }; +#define drm_to_malidp(x) container_of(x, struct malidp_drm, base) #define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc) struct malidp_plane { diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index e9de542f9b7c..9b845d3f34e1 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -1168,7 +1168,7 @@ static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 ir static irqreturn_t malidp_de_irq(int irq, void *arg) { struct drm_device *drm = arg; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev; struct malidp_hw *hw; const struct malidp_irq_map *de; @@ -1226,7 +1226,7 @@ static irqreturn_t malidp_de_irq(int irq, void *arg) static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg) { struct drm_device *drm = arg; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); wake_up(&malidp->wq); @@ -1252,7 +1252,7 @@ void malidp_de_irq_hw_init(struct malidp_hw_device *hwdev) int malidp_de_irq_init(struct drm_device *drm, int irq) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; int ret; @@ -1286,7 +1286,7 @@ void malidp_de_irq_fini(struct malidp_hw_device *hwdev) static irqreturn_t malidp_se_irq(int irq, void *arg) { struct drm_device *drm = arg; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; struct malidp_hw *hw = hwdev->hw; const struct malidp_irq_map *se = &hw->map.se_irq_map; @@ -1363,7 +1363,7 @@ static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg) int malidp_se_irq_init(struct drm_device *drm, int irq) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct malidp_hw_device *hwdev = malidp->dev; int ret; diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index ef76d0e6ee2f..626709bec6f5 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -129,7 +129,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct malidp_mw_connector_state *mw_state = to_mw_state(conn_state); - struct malidp_drm *malidp = encoder->dev->dev_private; + struct malidp_drm *malidp = drm_to_malidp(encoder->dev); struct drm_framebuffer *fb; int i, n_planes; @@ -207,7 +207,7 @@ static u32 *get_writeback_formats(struct malidp_drm *malidp, int *n_formats) int malidp_mw_connector_init(struct drm_device *drm) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); u32 *formats; int ret, n_formats; @@ -236,7 +236,7 @@ int malidp_mw_connector_init(struct drm_device *drm) void malidp_mw_atomic_commit(struct drm_device *drm, struct drm_atomic_state *old_state) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); struct drm_writeback_connector *mw_conn = &malidp->mw_connector; struct drm_connector_state *conn_state = mw_conn->base.state; struct malidp_hw_device *hwdev = malidp->dev; diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 45f5e35e7f24..34547edf1ee3 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -68,14 +68,6 @@ /* readahead for partial-frame prefetch */ #define MALIDP_MMU_PREFETCH_READAHEAD 8 -static void malidp_de_plane_destroy(struct drm_plane *plane) -{ - struct malidp_plane *mp = to_malidp_plane(plane); - - drm_plane_cleanup(plane); - kfree(mp); -} - /* * Replicate what the default ->reset hook does: free the state pointer and * allocate a new empty object. We just need enough space to store @@ -151,7 +143,7 @@ bool malidp_format_mod_supported(struct drm_device *drm, { const struct drm_format_info *info; const u64 *modifiers; - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); const struct malidp_hw_regmap *map = &malidp->dev->hw->map; if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) @@ -260,7 +252,6 @@ static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane, static const struct drm_plane_funcs malidp_de_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = malidp_de_plane_destroy, .reset = malidp_plane_reset, .atomic_duplicate_state = malidp_duplicate_plane_state, .atomic_destroy_state = malidp_destroy_plane_state, @@ -931,7 +922,7 @@ static const uint64_t linear_only_modifiers[] = { int malidp_de_planes_init(struct drm_device *drm) { - struct malidp_drm *malidp = drm->dev_private; + struct malidp_drm *malidp = drm_to_malidp(drm); const struct malidp_hw_regmap *map = &malidp->dev->hw->map; struct malidp_plane *plane = NULL; enum drm_plane_type plane_type; @@ -972,12 +963,6 @@ int malidp_de_planes_init(struct drm_device *drm) for (i = 0; i < map->n_layers; i++) { u8 id = map->layers[i].id; - plane = kzalloc(sizeof(*plane), GFP_KERNEL); - if (!plane) { - ret = -ENOMEM; - goto cleanup; - } - /* build the list of DRM supported formats based on the map */ for (n = 0, j = 0; j < map->n_pixel_formats; j++) { if ((map->pixel_formats[j].layer & id) == id) @@ -990,13 +975,14 @@ int malidp_de_planes_init(struct drm_device *drm) /* * All the layers except smart layer supports AFBC modifiers. */ - ret = drm_universal_plane_init(drm, &plane->base, crtcs, - &malidp_de_plane_funcs, formats, n, - (id == DE_SMART) ? linear_only_modifiers : modifiers, - plane_type, NULL); - - if (ret < 0) + plane = drmm_universal_plane_alloc(drm, struct malidp_plane, base, + crtcs, &malidp_de_plane_funcs, formats, n, + (id == DE_SMART) ? linear_only_modifiers : + modifiers, plane_type, NULL); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); goto cleanup; + } drm_plane_helper_add(&plane->base, &malidp_de_plane_helper_funcs); diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 38f5170c0fea..584cee123bd8 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -19,6 +19,8 @@ static const struct fb_ops armada_fb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, + .fb_read = drm_fb_helper_cfb_read, + .fb_write = drm_fb_helper_cfb_write, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -72,7 +74,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh, if (IS_ERR(dfb)) return PTR_ERR(dfb); - info = drm_fb_helper_alloc_fbi(fbh); + info = drm_fb_helper_alloc_info(fbh); if (IS_ERR(info)) { ret = PTR_ERR(info); goto err_fballoc; @@ -155,7 +157,7 @@ void armada_fbdev_fini(struct drm_device *dev) struct drm_fb_helper *fbh = priv->fbdev; if (fbh) { - drm_fb_helper_unregister_fbi(fbh); + drm_fb_helper_unregister_info(fbh); drm_fb_helper_fini(fbh); diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index a94f1a9e8f40..718119e168a6 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -16,7 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_device.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index bbeb5defc8f5..420fc75c240e 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -33,6 +33,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_module.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 74f41282444f..d51b81fea9c8 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -38,7 +38,6 @@ #include <drm/drm_encoder.h> #include <drm/drm_mode.h> #include <drm/drm_framebuffer.h> -#include <drm/drm_fb_helper.h> #define DRIVER_AUTHOR "Dave Airlie" diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index f7e7f4e919c7..a2bb5b916235 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -19,7 +19,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c index 7f4fce1aa998..0b6a28436885 100644 --- a/drivers/gpu/drm/bridge/tc358762.c +++ b/drivers/gpu/drm/bridge/tc358762.c @@ -11,6 +11,7 @@ */ #include <linux/delay.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/regulator/consumer.h> @@ -19,7 +20,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 1a586b3c454b..d579fd8f7cb8 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -2536,7 +2536,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, if (funcs->prepare_fb) { ret = funcs->prepare_fb(plane, new_plane_state); if (ret) - goto fail; + goto fail_prepare_fb; } else { WARN_ON_ONCE(funcs->cleanup_fb); @@ -2545,13 +2545,34 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state); if (ret) - goto fail; + goto fail_prepare_fb; + } + } + + for_each_new_plane_in_state(state, plane, new_plane_state, i) { + const struct drm_plane_helper_funcs *funcs = plane->helper_private; + + if (funcs->begin_fb_access) { + ret = funcs->begin_fb_access(plane, new_plane_state); + if (ret) + goto fail_begin_fb_access; } } return 0; -fail: +fail_begin_fb_access: + for_each_new_plane_in_state(state, plane, new_plane_state, j) { + const struct drm_plane_helper_funcs *funcs = plane->helper_private; + + if (j >= i) + continue; + + if (funcs->end_fb_access) + funcs->end_fb_access(plane, new_plane_state); + } + i = j; /* set i to upper limit to cleanup all planes */ +fail_prepare_fb: for_each_new_plane_in_state(state, plane, new_plane_state, j) { const struct drm_plane_helper_funcs *funcs; @@ -2828,6 +2849,13 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev, int i; for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { + const struct drm_plane_helper_funcs *funcs = plane->helper_private; + + if (funcs->end_fb_access) + funcs->end_fb_access(plane, new_plane_state); + } + + for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) { const struct drm_plane_helper_funcs *funcs; struct drm_plane_state *plane_state; diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index bbc535cc50dd..d553e793e673 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -1237,3 +1237,7 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode) return ret; } EXPORT_SYMBOL(drm_client_modeset_dpms); + +#ifdef CONFIG_DRM_KUNIT_TEST +#include "tests/drm_client_modeset_test.c" +#endif diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f5fb22e0d033..a209659a996c 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -43,7 +43,6 @@ #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 4671dc23abe0..3841aba17abd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2807,6 +2807,8 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter) if (edid_block_status_valid(status, edid_block_tag(base_block))) panel_id = edid_extract_panel_id(base_block); + else + edid_block_dump(KERN_NOTICE, base_block, 0); kfree(base_block); diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index ef4ab59d6935..5d9ef267ebb3 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -172,20 +172,9 @@ static const struct drm_edid *edid_load(struct drm_connector *connector, const c fwdata = generic_edid[builtin]; fwsize = sizeof(generic_edid[builtin]); } else { - struct platform_device *pdev; int err; - pdev = platform_device_register_simple(connector->name, -1, NULL, 0); - if (IS_ERR(pdev)) { - drm_err(connector->dev, - "[CONNECTOR:%d:%s] Failed to register EDID firmware platform device for connector \"%s\"\n", - connector->base.id, connector->name, - connector->name); - return ERR_CAST(pdev); - } - - err = request_firmware(&fw, name, &pdev->dev); - platform_device_unregister(pdev); + err = request_firmware(&fw, name, connector->dev->dev); if (err) { drm_err(connector->dev, "[CONNECTOR:%d:%s] Requesting EDID firmware \"%s\" failed (err=%d)\n", diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 71edb80fe0fb..a1f86e436ae8 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -30,24 +30,17 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/console.h> -#include <linux/dma-buf.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/slab.h> #include <linux/sysrq.h> -#include <linux/vmalloc.h> #include <drm/drm_atomic.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_vblank.h> -#include "drm_crtc_helper_internal.h" #include "drm_internal.h" static bool drm_fbdev_emulation = true; @@ -74,7 +67,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc, * considered as a broken and legacy behaviour from a modern fbdev device. */ #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) -static bool drm_leak_fbdev_smem = false; +static bool drm_leak_fbdev_smem; module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); MODULE_PARM_DESC(drm_leak_fbdev_smem, "Allow unsafe leaking fbdev physical smem address [default=false]"); @@ -96,11 +89,13 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * It will automatically set up deferred I/O if the driver requires a shadow * buffer. * - * At runtime drivers should restore the fbdev console by using + * Existing fbdev implementations should restore the fbdev console by using * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. * They should also notify the fb helper code from updates to the output * configuration by using drm_fb_helper_output_poll_changed() as their - * &drm_mode_config_funcs.output_poll_changed callback. + * &drm_mode_config_funcs.output_poll_changed callback. New implementations + * of fbdev should be build on top of struct &drm_client_funcs, which handles + * this automatically. Setting the old callbacks should be avoided. * * For suspend/resume consider using drm_mode_config_helper_suspend() and * drm_mode_config_helper_resume() which takes care of fbdev as well. @@ -368,115 +363,30 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) resume_work); console_lock(); - fb_set_suspend(helper->fbdev, 0); + fb_set_suspend(helper->info, 0); console_unlock(); } -static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip, - struct iosys_map *dst) +static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper) { - struct drm_framebuffer *fb = fb_helper->fb; - size_t offset = clip->y1 * fb->pitches[0]; - size_t len = clip->x2 - clip->x1; - unsigned int y; - void *src; - - switch (drm_format_info_bpp(fb->format, 0)) { - case 1: - offset += clip->x1 / 8; - len = DIV_ROUND_UP(len + clip->x1 % 8, 8); - break; - case 2: - offset += clip->x1 / 4; - len = DIV_ROUND_UP(len + clip->x1 % 4, 4); - break; - case 4: - offset += clip->x1 / 2; - len = DIV_ROUND_UP(len + clip->x1 % 2, 2); - break; - default: - offset += clip->x1 * fb->format->cpp[0]; - len *= fb->format->cpp[0]; - break; - } - - src = fb_helper->fbdev->screen_buffer + offset; - iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ - - for (y = clip->y1; y < clip->y2; y++) { - iosys_map_memcpy_to(dst, 0, src, len); - iosys_map_incr(dst, fb->pitches[0]); - src += fb->pitches[0]; - } -} - -static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip) -{ - struct drm_client_buffer *buffer = fb_helper->buffer; - struct iosys_map map, dst; - int ret; - - /* - * We have to pin the client buffer to its current location while - * flushing the shadow buffer. In the general case, concurrent - * modesetting operations could try to move the buffer and would - * fail. The modeset has to be serialized by acquiring the reservation - * object of the underlying BO here. - * - * For fbdev emulation, we only have to protect against fbdev modeset - * operations. Nothing else will involve the client buffer's BO. So it - * is sufficient to acquire struct drm_fb_helper.lock here. - */ - mutex_lock(&fb_helper->lock); - - ret = drm_client_buffer_vmap(buffer, &map); - if (ret) - goto out; - - dst = map; - drm_fb_helper_damage_blit_real(fb_helper, clip, &dst); - - drm_client_buffer_vunmap(buffer); - -out: - mutex_unlock(&fb_helper->lock); - - return ret; -} - -static void drm_fb_helper_damage_work(struct work_struct *work) -{ - struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, - damage_work); struct drm_device *dev = helper->dev; struct drm_clip_rect *clip = &helper->damage_clip; struct drm_clip_rect clip_copy; unsigned long flags; int ret; + if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty)) + return; + spin_lock_irqsave(&helper->damage_lock, flags); clip_copy = *clip; clip->x1 = clip->y1 = ~0; clip->x2 = clip->y2 = 0; spin_unlock_irqrestore(&helper->damage_lock, flags); - /* Call damage handlers only if necessary */ - if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) - return; - - if (helper->buffer) { - ret = drm_fb_helper_damage_blit(helper, &clip_copy); - if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) - goto err; - } - - if (helper->fb->funcs->dirty) { - ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); - if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) - goto err; - } + ret = helper->funcs->fb_dirty(helper, &clip_copy); + if (ret) + goto err; return; @@ -508,7 +418,6 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, INIT_LIST_HEAD(&helper->kernel_fb_list); spin_lock_init(&helper->damage_lock); INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); - INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work); helper->damage_clip.x1 = helper->damage_clip.y1 = ~0; mutex_init(&helper->lock); helper->funcs = funcs; @@ -536,11 +445,6 @@ int drm_fb_helper_init(struct drm_device *dev, { int ret; - if (!drm_fbdev_emulation) { - dev->fb_helper = fb_helper; - return 0; - } - /* * If this is not the generic fbdev client, initialize a drm_client * without callbacks so we can use the modesets. @@ -558,7 +462,7 @@ int drm_fb_helper_init(struct drm_device *dev, EXPORT_SYMBOL(drm_fb_helper_init); /** - * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members + * drm_fb_helper_alloc_info - allocate fb_info and some of its members * @fb_helper: driver-allocated fbdev helper * * A helper to alloc fb_info and the members cmap and apertures. Called @@ -570,7 +474,7 @@ EXPORT_SYMBOL(drm_fb_helper_init); * fb_info pointer if things went okay, pointer containing error code * otherwise */ -struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) +struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper) { struct device *dev = fb_helper->dev->dev; struct fb_info *info; @@ -598,7 +502,7 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) goto err_free_cmap; } - fb_helper->fbdev = info; + fb_helper->info = info; info->skip_vt_switch = true; return info; @@ -609,22 +513,22 @@ err_release: framebuffer_release(info); return ERR_PTR(ret); } -EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); +EXPORT_SYMBOL(drm_fb_helper_alloc_info); /** - * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device + * drm_fb_helper_unregister_info - unregister fb_info framebuffer device * @fb_helper: driver-allocated fbdev helper, can be NULL * * A wrapper around unregister_framebuffer, to release the fb_info * framebuffer device. This must be called before releasing all resources for * @fb_helper by calling drm_fb_helper_fini(). */ -void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) +void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper) { - if (fb_helper && fb_helper->fbdev) - unregister_framebuffer(fb_helper->fbdev); + if (fb_helper && fb_helper->info) + unregister_framebuffer(fb_helper->info); } -EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); +EXPORT_SYMBOL(drm_fb_helper_unregister_info); /** * drm_fb_helper_fini - finialize a &struct drm_fb_helper @@ -645,15 +549,14 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) return; cancel_work_sync(&fb_helper->resume_work); - cancel_work_sync(&fb_helper->damage_work); - info = fb_helper->fbdev; + info = fb_helper->info; if (info) { if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } - fb_helper->fbdev = NULL; + fb_helper->info = NULL; mutex_lock(&kernel_fb_helper_lock); if (!list_empty(&fb_helper->kernel_fb_list)) { @@ -670,34 +573,33 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_fini); -static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) -{ - struct drm_device *dev = fb_helper->dev; - struct drm_framebuffer *fb = fb_helper->fb; - - return dev->mode_config.prefer_shadow_fbdev || - dev->mode_config.prefer_shadow || - fb->funcs->dirty; -} - -static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, - u32 width, u32 height) +static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u32 y, + u32 width, u32 height) { - struct drm_fb_helper *helper = info->par; struct drm_clip_rect *clip = &helper->damage_clip; unsigned long flags; - if (!drm_fbdev_use_shadow_fb(helper)) - return; - spin_lock_irqsave(&helper->damage_lock, flags); clip->x1 = min_t(u32, clip->x1, x); clip->y1 = min_t(u32, clip->y1, y); clip->x2 = max_t(u32, clip->x2, x + width); clip->y2 = max_t(u32, clip->y2, y + height); spin_unlock_irqrestore(&helper->damage_lock, flags); +} - schedule_work(&helper->damage_work); +static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y, + u32 width, u32 height) +{ + struct fb_info *info = helper->info; + + drm_fb_helper_add_damage_clip(helper, x, y, width, height); + + /* + * The current fbdev emulation only flushes buffers if a damage + * update is necessary. And we can assume that deferred I/O has + * been enabled as damage updates require deferred I/O for mmap. + */ + fb_deferred_io_schedule_flush(info); } /* @@ -739,6 +641,7 @@ static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, */ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist) { + struct drm_fb_helper *helper = info->par; unsigned long start, end, min_off, max_off; struct fb_deferred_io_pageref *pageref; struct drm_rect damage_area; @@ -751,8 +654,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli min_off = min(min_off, start); max_off = max(max_off, end); } - if (min_off >= max_off) - return; /* * As we can only track pages, we might reach beyond the end @@ -761,53 +662,166 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli */ max_off = min(max_off, info->screen_size); - drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); - drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, - drm_rect_width(&damage_area), - drm_rect_height(&damage_area)); + if (min_off < max_off) { + drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); + drm_fb_helper_add_damage_clip(helper, damage_area.x1, damage_area.y1, + drm_rect_width(&damage_area), + drm_rect_height(&damage_area)); + } + + /* + * Flushes all dirty pages from mmap's pageref list and the + * areas that have been written by struct fb_ops callbacks. + */ + drm_fb_helper_fb_dirty(helper); } EXPORT_SYMBOL(drm_fb_helper_deferred_io); +typedef ssize_t (*drm_fb_helper_read_screen)(struct fb_info *info, char __user *buf, + size_t count, loff_t pos); + +static ssize_t __drm_fb_helper_read(struct fb_info *info, char __user *buf, size_t count, + loff_t *ppos, drm_fb_helper_read_screen read_screen) +{ + loff_t pos = *ppos; + size_t total_size; + ssize_t ret; + + if (info->screen_size) + total_size = info->screen_size; + else + total_size = info->fix.smem_len; + + if (pos >= total_size) + return 0; + if (count >= total_size) + count = total_size; + if (total_size - count < pos) + count = total_size - pos; + + if (info->fbops->fb_sync) + info->fbops->fb_sync(info); + + ret = read_screen(info, buf, count, pos); + if (ret > 0) + *ppos += ret; + + return ret; +} + +typedef ssize_t (*drm_fb_helper_write_screen)(struct fb_info *info, const char __user *buf, + size_t count, loff_t pos); + +static ssize_t __drm_fb_helper_write(struct fb_info *info, const char __user *buf, size_t count, + loff_t *ppos, drm_fb_helper_write_screen write_screen) +{ + loff_t pos = *ppos; + size_t total_size; + ssize_t ret; + int err = 0; + + if (info->screen_size) + total_size = info->screen_size; + else + total_size = info->fix.smem_len; + + if (pos > total_size) + return -EFBIG; + if (count > total_size) { + err = -EFBIG; + count = total_size; + } + if (total_size - count < pos) { + if (!err) + err = -ENOSPC; + count = total_size - pos; + } + + if (info->fbops->fb_sync) + info->fbops->fb_sync(info); + + /* + * Copy to framebuffer even if we already logged an error. Emulates + * the behavior of the original fbdev implementation. + */ + ret = write_screen(info, buf, count, pos); + if (ret < 0) + return ret; /* return last error, if any */ + else if (!ret) + return err; /* return previous error, if any */ + + *ppos += ret; + + return ret; +} + +static ssize_t drm_fb_helper_read_screen_buffer(struct fb_info *info, char __user *buf, + size_t count, loff_t pos) +{ + const char *src = info->screen_buffer + pos; + + if (copy_to_user(buf, src, count)) + return -EFAULT; + + return count; +} + /** - * drm_fb_helper_sys_read - wrapper around fb_sys_read + * drm_fb_helper_sys_read - Implements struct &fb_ops.fb_read for system memory * @info: fb_info struct pointer * @buf: userspace buffer to read from framebuffer memory * @count: number of bytes to read from framebuffer memory * @ppos: read offset within framebuffer memory * - * A wrapper around fb_sys_read implemented by fbdev core + * Returns: + * The number of bytes read on success, or an error code otherwise. */ ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, size_t count, loff_t *ppos) { - return fb_sys_read(info, buf, count, ppos); + return __drm_fb_helper_read(info, buf, count, ppos, drm_fb_helper_read_screen_buffer); } EXPORT_SYMBOL(drm_fb_helper_sys_read); +static ssize_t drm_fb_helper_write_screen_buffer(struct fb_info *info, const char __user *buf, + size_t count, loff_t pos) +{ + char *dst = info->screen_buffer + pos; + + if (copy_from_user(dst, buf, count)) + return -EFAULT; + + return count; +} + /** - * drm_fb_helper_sys_write - wrapper around fb_sys_write + * drm_fb_helper_sys_write - Implements struct &fb_ops.fb_write for system memory * @info: fb_info struct pointer * @buf: userspace buffer to write to framebuffer memory * @count: number of bytes to write to framebuffer memory * @ppos: write offset within framebuffer memory * - * A wrapper around fb_sys_write implemented by fbdev core + * Returns: + * The number of bytes written on success, or an error code otherwise. */ ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, size_t count, loff_t *ppos) { + struct drm_fb_helper *helper = info->par; loff_t pos = *ppos; ssize_t ret; struct drm_rect damage_area; - ret = fb_sys_write(info, buf, count, ppos); + ret = __drm_fb_helper_write(info, buf, count, ppos, drm_fb_helper_write_screen_buffer); if (ret <= 0) return ret; - drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); - drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, - drm_rect_width(&damage_area), - drm_rect_height(&damage_area)); + if (helper->funcs->fb_dirty) { + drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); + drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, + drm_rect_width(&damage_area), + drm_rect_height(&damage_area)); + } return ret; } @@ -823,8 +837,12 @@ EXPORT_SYMBOL(drm_fb_helper_sys_write); void drm_fb_helper_sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { + struct drm_fb_helper *helper = info->par; + sys_fillrect(info, rect); - drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); + + if (helper->funcs->fb_dirty) + drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height); } EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); @@ -838,8 +856,12 @@ EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); void drm_fb_helper_sys_copyarea(struct fb_info *info, const struct fb_copyarea *area) { + struct drm_fb_helper *helper = info->par; + sys_copyarea(info, area); - drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); + + if (helper->funcs->fb_dirty) + drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height); } EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); @@ -853,11 +875,131 @@ EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); void drm_fb_helper_sys_imageblit(struct fb_info *info, const struct fb_image *image) { + struct drm_fb_helper *helper = info->par; + sys_imageblit(info, image); - drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); + + if (helper->funcs->fb_dirty) + drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height); } EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); +static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, + loff_t pos) +{ + const char __iomem *src = info->screen_base + pos; + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); + ssize_t ret = 0; + int err = 0; + char *tmp; + + tmp = kmalloc(alloc_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + while (count) { + size_t c = min_t(size_t, count, alloc_size); + + memcpy_fromio(tmp, src, c); + if (copy_to_user(buf, tmp, c)) { + err = -EFAULT; + break; + } + + src += c; + buf += c; + ret += c; + count -= c; + } + + kfree(tmp); + + return ret ? ret : err; +} + +/** + * drm_fb_helper_cfb_read - Implements struct &fb_ops.fb_read for I/O memory + * @info: fb_info struct pointer + * @buf: userspace buffer to read from framebuffer memory + * @count: number of bytes to read from framebuffer memory + * @ppos: read offset within framebuffer memory + * + * Returns: + * The number of bytes read on success, or an error code otherwise. + */ +ssize_t drm_fb_helper_cfb_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos) +{ + return __drm_fb_helper_read(info, buf, count, ppos, fb_read_screen_base); +} +EXPORT_SYMBOL(drm_fb_helper_cfb_read); + +static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, + loff_t pos) +{ + char __iomem *dst = info->screen_base + pos; + size_t alloc_size = min_t(size_t, count, PAGE_SIZE); + ssize_t ret = 0; + int err = 0; + u8 *tmp; + + tmp = kmalloc(alloc_size, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + while (count) { + size_t c = min_t(size_t, count, alloc_size); + + if (copy_from_user(tmp, buf, c)) { + err = -EFAULT; + break; + } + memcpy_toio(dst, tmp, c); + + dst += c; + buf += c; + ret += c; + count -= c; + } + + kfree(tmp); + + return ret ? ret : err; +} + +/** + * drm_fb_helper_cfb_write - Implements struct &fb_ops.fb_write for I/O memory + * @info: fb_info struct pointer + * @buf: userspace buffer to write to framebuffer memory + * @count: number of bytes to write to framebuffer memory + * @ppos: write offset within framebuffer memory + * + * Returns: + * The number of bytes written on success, or an error code otherwise. + */ +ssize_t drm_fb_helper_cfb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct drm_fb_helper *helper = info->par; + loff_t pos = *ppos; + ssize_t ret; + struct drm_rect damage_area; + + ret = __drm_fb_helper_write(info, buf, count, ppos, fb_write_screen_base); + if (ret <= 0) + return ret; + + if (helper->funcs->fb_dirty) { + drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); + drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1, + drm_rect_width(&damage_area), + drm_rect_height(&damage_area)); + } + + return ret; +} +EXPORT_SYMBOL(drm_fb_helper_cfb_write); + /** * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect * @info: fbdev registered by the helper @@ -868,8 +1010,12 @@ EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); void drm_fb_helper_cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { + struct drm_fb_helper *helper = info->par; + cfb_fillrect(info, rect); - drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); + + if (helper->funcs->fb_dirty) + drm_fb_helper_damage(helper, rect->dx, rect->dy, rect->width, rect->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); @@ -883,8 +1029,12 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); void drm_fb_helper_cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { + struct drm_fb_helper *helper = info->par; + cfb_copyarea(info, area); - drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); + + if (helper->funcs->fb_dirty) + drm_fb_helper_damage(helper, area->dx, area->dy, area->width, area->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); @@ -898,8 +1048,12 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); void drm_fb_helper_cfb_imageblit(struct fb_info *info, const struct fb_image *image) { + struct drm_fb_helper *helper = info->par; + cfb_imageblit(info, image); - drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); + + if (helper->funcs->fb_dirty) + drm_fb_helper_damage(helper, image->dx, image->dy, image->width, image->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); @@ -914,8 +1068,8 @@ EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); */ void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) { - if (fb_helper && fb_helper->fbdev) - fb_set_suspend(fb_helper->fbdev, suspend); + if (fb_helper && fb_helper->info) + fb_set_suspend(fb_helper->info, suspend); } EXPORT_SYMBOL(drm_fb_helper_set_suspend); @@ -938,20 +1092,20 @@ EXPORT_SYMBOL(drm_fb_helper_set_suspend); void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, bool suspend) { - if (!fb_helper || !fb_helper->fbdev) + if (!fb_helper || !fb_helper->info) return; /* make sure there's no pending/ongoing resume */ flush_work(&fb_helper->resume_work); if (suspend) { - if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING) + if (fb_helper->info->state != FBINFO_STATE_RUNNING) return; console_lock(); } else { - if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING) + if (fb_helper->info->state == FBINFO_STATE_RUNNING) return; if (!console_trylock()) { @@ -960,7 +1114,7 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, } } - fb_set_suspend(fb_helper->fbdev, suspend); + fb_set_suspend(fb_helper->info, suspend); console_unlock(); } EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked); @@ -1749,6 +1903,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, sizes.surface_height = config->max_height; } +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + fb_helper->hint_leak_smem_start = drm_leak_fbdev_smem; +#endif + /* push down into drivers */ ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); if (ret < 0) @@ -1850,7 +2008,7 @@ EXPORT_SYMBOL(drm_fb_helper_fill_info); /* * This is a continuation of drm_setup_crtcs() that sets up anything related * to the framebuffer. During initialization, drm_setup_crtcs() is called before - * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev). + * the framebuffer has been allocated (fb_helper->fb and fb_helper->info). * So, any setup that touches those fields needs to be done here instead of in * drm_setup_crtcs(). */ @@ -1858,7 +2016,7 @@ static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper) { struct drm_client_dev *client = &fb_helper->client; struct drm_connector_list_iter conn_iter; - struct fb_info *info = fb_helper->fbdev; + struct fb_info *info = fb_helper->info; unsigned int rotation, sw_rotations = 0; struct drm_connector *connector; struct drm_mode_set *modeset; @@ -1942,11 +2100,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper, fb_helper->deferred_setup = false; - info = fb_helper->fbdev; + info = fb_helper->info; info->var.pixclock = 0; /* Shamelessly allow physical address leaking to userspace */ #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (!drm_leak_fbdev_smem) + if (!fb_helper->hint_leak_smem_start) #endif /* don't leak any physical addresses to userspace */ info->flags |= FBINFO_HIDE_SMEM_START; @@ -2077,7 +2235,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) drm_setup_crtcs_fb(fb_helper); mutex_unlock(&fb_helper->lock); - drm_fb_helper_set_par(fb_helper->fbdev); + drm_fb_helper_set_par(fb_helper->info); return 0; } @@ -2103,530 +2261,10 @@ EXPORT_SYMBOL(drm_fb_helper_lastclose); * * This function can be used as the * &drm_mode_config_funcs.output_poll_changed callback for drivers that only - * need to call drm_fb_helper_hotplug_event(). + * need to call drm_fbdev.hotplug_event(). */ void drm_fb_helper_output_poll_changed(struct drm_device *dev) { drm_fb_helper_hotplug_event(dev->fb_helper); } EXPORT_SYMBOL(drm_fb_helper_output_poll_changed); - -/* @user: 1=userspace, 0=fbcon */ -static int drm_fbdev_fb_open(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - - /* No need to take a ref for fbcon because it unbinds on unregister */ - if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) - return -ENODEV; - - return 0; -} - -static int drm_fbdev_fb_release(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - - if (user) - module_put(fb_helper->dev->driver->fops->owner); - - return 0; -} - -static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) -{ - struct fb_info *fbi = fb_helper->fbdev; - void *shadow = NULL; - - if (!fb_helper->dev) - return; - - if (fbi) { - if (fbi->fbdefio) - fb_deferred_io_cleanup(fbi); - if (drm_fbdev_use_shadow_fb(fb_helper)) - shadow = fbi->screen_buffer; - } - - drm_fb_helper_fini(fb_helper); - - if (shadow) - vfree(shadow); - else if (fb_helper->buffer) - drm_client_buffer_vunmap(fb_helper->buffer); - - drm_client_framebuffer_delete(fb_helper->buffer); -} - -static void drm_fbdev_release(struct drm_fb_helper *fb_helper) -{ - drm_fbdev_cleanup(fb_helper); - drm_client_release(&fb_helper->client); - kfree(fb_helper); -} - -/* - * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of - * unregister_framebuffer() or fb_release(). - */ -static void drm_fbdev_fb_destroy(struct fb_info *info) -{ - drm_fbdev_release(info->par); -} - -static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) -{ - struct drm_fb_helper *fb_helper = info->par; - - if (drm_fbdev_use_shadow_fb(fb_helper)) - return fb_deferred_io_mmap(info, vma); - else if (fb_helper->dev->driver->gem_prime_mmap) - return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); - else - return -ENODEV; -} - -static bool drm_fbdev_use_iomem(struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_client_buffer *buffer = fb_helper->buffer; - - return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; -} - -static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, - loff_t pos) -{ - const char __iomem *src = info->screen_base + pos; - size_t alloc_size = min_t(size_t, count, PAGE_SIZE); - ssize_t ret = 0; - int err = 0; - char *tmp; - - tmp = kmalloc(alloc_size, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - while (count) { - size_t c = min_t(size_t, count, alloc_size); - - memcpy_fromio(tmp, src, c); - if (copy_to_user(buf, tmp, c)) { - err = -EFAULT; - break; - } - - src += c; - buf += c; - ret += c; - count -= c; - } - - kfree(tmp); - - return ret ? ret : err; -} - -static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count, - loff_t pos) -{ - const char *src = info->screen_buffer + pos; - - if (copy_to_user(buf, src, count)) - return -EFAULT; - - return count; -} - -static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, - size_t count, loff_t *ppos) -{ - loff_t pos = *ppos; - size_t total_size; - ssize_t ret; - - if (info->screen_size) - total_size = info->screen_size; - else - total_size = info->fix.smem_len; - - if (pos >= total_size) - return 0; - if (count >= total_size) - count = total_size; - if (total_size - count < pos) - count = total_size - pos; - - if (drm_fbdev_use_iomem(info)) - ret = fb_read_screen_base(info, buf, count, pos); - else - ret = fb_read_screen_buffer(info, buf, count, pos); - - if (ret > 0) - *ppos += ret; - - return ret; -} - -static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, - loff_t pos) -{ - char __iomem *dst = info->screen_base + pos; - size_t alloc_size = min_t(size_t, count, PAGE_SIZE); - ssize_t ret = 0; - int err = 0; - u8 *tmp; - - tmp = kmalloc(alloc_size, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - while (count) { - size_t c = min_t(size_t, count, alloc_size); - - if (copy_from_user(tmp, buf, c)) { - err = -EFAULT; - break; - } - memcpy_toio(dst, tmp, c); - - dst += c; - buf += c; - ret += c; - count -= c; - } - - kfree(tmp); - - return ret ? ret : err; -} - -static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count, - loff_t pos) -{ - char *dst = info->screen_buffer + pos; - - if (copy_from_user(dst, buf, count)) - return -EFAULT; - - return count; -} - -static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, - size_t count, loff_t *ppos) -{ - loff_t pos = *ppos; - size_t total_size; - ssize_t ret; - struct drm_rect damage_area; - int err = 0; - - if (info->screen_size) - total_size = info->screen_size; - else - total_size = info->fix.smem_len; - - if (pos > total_size) - return -EFBIG; - if (count > total_size) { - err = -EFBIG; - count = total_size; - } - if (total_size - count < pos) { - if (!err) - err = -ENOSPC; - count = total_size - pos; - } - - /* - * Copy to framebuffer even if we already logged an error. Emulates - * the behavior of the original fbdev implementation. - */ - if (drm_fbdev_use_iomem(info)) - ret = fb_write_screen_base(info, buf, count, pos); - else - ret = fb_write_screen_buffer(info, buf, count, pos); - - if (ret < 0) - return ret; /* return last error, if any */ - else if (!ret) - return err; /* return previous error, if any */ - - *ppos += ret; - - drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); - drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, - drm_rect_width(&damage_area), - drm_rect_height(&damage_area)); - - return ret; -} - -static void drm_fbdev_fb_fillrect(struct fb_info *info, - const struct fb_fillrect *rect) -{ - if (drm_fbdev_use_iomem(info)) - drm_fb_helper_cfb_fillrect(info, rect); - else - drm_fb_helper_sys_fillrect(info, rect); -} - -static void drm_fbdev_fb_copyarea(struct fb_info *info, - const struct fb_copyarea *area) -{ - if (drm_fbdev_use_iomem(info)) - drm_fb_helper_cfb_copyarea(info, area); - else - drm_fb_helper_sys_copyarea(info, area); -} - -static void drm_fbdev_fb_imageblit(struct fb_info *info, - const struct fb_image *image) -{ - if (drm_fbdev_use_iomem(info)) - drm_fb_helper_cfb_imageblit(info, image); - else - drm_fb_helper_sys_imageblit(info, image); -} - -static const struct fb_ops drm_fbdev_fb_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_open = drm_fbdev_fb_open, - .fb_release = drm_fbdev_fb_release, - .fb_destroy = drm_fbdev_fb_destroy, - .fb_mmap = drm_fbdev_fb_mmap, - .fb_read = drm_fbdev_fb_read, - .fb_write = drm_fbdev_fb_write, - .fb_fillrect = drm_fbdev_fb_fillrect, - .fb_copyarea = drm_fbdev_fb_copyarea, - .fb_imageblit = drm_fbdev_fb_imageblit, -}; - -static struct fb_deferred_io drm_fbdev_defio = { - .delay = HZ / 20, - .deferred_io = drm_fb_helper_deferred_io, -}; - -/* - * This function uses the client API to create a framebuffer backed by a dumb buffer. - * - * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, - * fb_copyarea, fb_imageblit. - */ -static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct drm_client_dev *client = &fb_helper->client; - struct drm_device *dev = fb_helper->dev; - struct drm_client_buffer *buffer; - struct drm_framebuffer *fb; - struct fb_info *fbi; - u32 format; - struct iosys_map map; - int ret; - - drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", - sizes->surface_width, sizes->surface_height, - sizes->surface_bpp); - - format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); - buffer = drm_client_framebuffer_create(client, sizes->surface_width, - sizes->surface_height, format); - if (IS_ERR(buffer)) - return PTR_ERR(buffer); - - fb_helper->buffer = buffer; - fb_helper->fb = buffer->fb; - fb = buffer->fb; - - fbi = drm_fb_helper_alloc_fbi(fb_helper); - if (IS_ERR(fbi)) - return PTR_ERR(fbi); - - fbi->fbops = &drm_fbdev_fb_ops; - fbi->screen_size = sizes->surface_height * fb->pitches[0]; - fbi->fix.smem_len = fbi->screen_size; - fbi->flags = FBINFO_DEFAULT; - - drm_fb_helper_fill_info(fbi, fb_helper, sizes); - - if (drm_fbdev_use_shadow_fb(fb_helper)) { - fbi->screen_buffer = vzalloc(fbi->screen_size); - if (!fbi->screen_buffer) - return -ENOMEM; - fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; - - fbi->fbdefio = &drm_fbdev_defio; - fb_deferred_io_init(fbi); - } else { - /* buffer is mapped for HW framebuffer */ - ret = drm_client_buffer_vmap(fb_helper->buffer, &map); - if (ret) - return ret; - if (map.is_iomem) { - fbi->screen_base = map.vaddr_iomem; - } else { - fbi->screen_buffer = map.vaddr; - fbi->flags |= FBINFO_VIRTFB; - } - - /* - * Shamelessly leak the physical address to user-space. As - * page_to_phys() is undefined for I/O memory, warn in this - * case. - */ -#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) - if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 && - !drm_WARN_ON_ONCE(dev, map.is_iomem)) - fbi->fix.smem_start = - page_to_phys(virt_to_page(fbi->screen_buffer)); -#endif - } - - return 0; -} - -static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { - .fb_probe = drm_fb_helper_generic_probe, -}; - -static void drm_fbdev_client_unregister(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - - if (fb_helper->fbdev) - /* drm_fbdev_fb_destroy() takes care of cleanup */ - drm_fb_helper_unregister_fbi(fb_helper); - else - drm_fbdev_release(fb_helper); -} - -static int drm_fbdev_client_restore(struct drm_client_dev *client) -{ - drm_fb_helper_lastclose(client->dev); - - return 0; -} - -static int drm_fbdev_client_hotplug(struct drm_client_dev *client) -{ - struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); - struct drm_device *dev = client->dev; - int ret; - - /* Setup is not retried if it has failed */ - if (!fb_helper->dev && fb_helper->funcs) - return 0; - - if (dev->fb_helper) - return drm_fb_helper_hotplug_event(dev->fb_helper); - - if (!dev->mode_config.num_connector) { - drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n"); - return 0; - } - - drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); - - ret = drm_fb_helper_init(dev, fb_helper); - if (ret) - goto err; - - if (!drm_drv_uses_atomic_modeset(dev)) - drm_helper_disable_unused_functions(dev); - - ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); - if (ret) - goto err_cleanup; - - return 0; - -err_cleanup: - drm_fbdev_cleanup(fb_helper); -err: - fb_helper->dev = NULL; - fb_helper->fbdev = NULL; - - drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); - - return ret; -} - -static const struct drm_client_funcs drm_fbdev_client_funcs = { - .owner = THIS_MODULE, - .unregister = drm_fbdev_client_unregister, - .restore = drm_fbdev_client_restore, - .hotplug = drm_fbdev_client_hotplug, -}; - -/** - * drm_fbdev_generic_setup() - Setup generic fbdev emulation - * @dev: DRM device - * @preferred_bpp: Preferred bits per pixel for the device. - * @dev->mode_config.preferred_depth is used if this is zero. - * - * This function sets up generic fbdev emulation for drivers that supports - * dumb buffers with a virtual address and that can be mmap'ed. - * drm_fbdev_generic_setup() shall be called after the DRM driver registered - * the new DRM device with drm_dev_register(). - * - * Restore, hotplug events and teardown are all taken care of. Drivers that do - * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. - * Simple drivers might use drm_mode_config_helper_suspend(). - * - * Drivers that set the dirty callback on their framebuffer will get a shadow - * fbdev buffer that is blitted onto the real buffer. This is done in order to - * make deferred I/O work with all kinds of buffers. A shadow buffer can be - * requested explicitly by setting struct drm_mode_config.prefer_shadow or - * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is - * required to use generic fbdev emulation with SHMEM helpers. - * - * This function is safe to call even when there are no connectors present. - * Setup will be retried on the next hotplug event. - * - * The fbdev is destroyed by drm_dev_unregister(). - */ -void drm_fbdev_generic_setup(struct drm_device *dev, - unsigned int preferred_bpp) -{ - struct drm_fb_helper *fb_helper; - int ret; - - drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); - drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); - - if (!drm_fbdev_emulation) - return; - - fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); - if (!fb_helper) { - drm_err(dev, "Failed to allocate fb_helper\n"); - return; - } - - ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); - if (ret) { - kfree(fb_helper); - drm_err(dev, "Failed to register client: %d\n", ret); - return; - } - - /* - * FIXME: This mixes up depth with bpp, which results in a glorious - * mess, resulting in some drivers picking wrong fbdev defaults and - * others wrong preferred_depth defaults. - */ - if (!preferred_bpp) - preferred_bpp = dev->mode_config.preferred_depth; - if (!preferred_bpp) - preferred_bpp = 32; - fb_helper->preferred_bpp = preferred_bpp; - - ret = drm_fbdev_client_hotplug(&fb_helper->client); - if (ret) - drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); - - drm_client_register(&fb_helper->client); -} -EXPORT_SYMBOL(drm_fbdev_generic_setup); diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c new file mode 100644 index 000000000000..ab8695669279 --- /dev/null +++ b/drivers/gpu/drm/drm_fbdev_generic.c @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: MIT + +#include <linux/moduleparam.h> +#include <linux/vmalloc.h> + +#include <drm/drm_crtc_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_print.h> + +#include <drm/drm_fbdev_generic.h> + +static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_framebuffer *fb = fb_helper->fb; + + return dev->mode_config.prefer_shadow_fbdev || + dev->mode_config.prefer_shadow || + fb->funcs->dirty; +} + +/* @user: 1=userspace, 0=fbcon */ +static int drm_fbdev_fb_open(struct fb_info *info, int user) +{ + struct drm_fb_helper *fb_helper = info->par; + + /* No need to take a ref for fbcon because it unbinds on unregister */ + if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) + return -ENODEV; + + return 0; +} + +static int drm_fbdev_fb_release(struct fb_info *info, int user) +{ + struct drm_fb_helper *fb_helper = info->par; + + if (user) + module_put(fb_helper->dev->driver->fops->owner); + + return 0; +} + +static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) +{ + struct fb_info *fbi = fb_helper->info; + void *shadow = NULL; + + if (!fb_helper->dev) + return; + + if (fbi) { + if (fbi->fbdefio) + fb_deferred_io_cleanup(fbi); + if (drm_fbdev_use_shadow_fb(fb_helper)) + shadow = fbi->screen_buffer; + } + + drm_fb_helper_fini(fb_helper); + + if (shadow) + vfree(shadow); + else if (fb_helper->buffer) + drm_client_buffer_vunmap(fb_helper->buffer); + + drm_client_framebuffer_delete(fb_helper->buffer); +} + +static void drm_fbdev_release(struct drm_fb_helper *fb_helper) +{ + drm_fbdev_cleanup(fb_helper); + drm_client_release(&fb_helper->client); + kfree(fb_helper); +} + +/* + * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of + * unregister_framebuffer() or fb_release(). + */ +static void drm_fbdev_fb_destroy(struct fb_info *info) +{ + drm_fbdev_release(info->par); +} + +static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *fb_helper = info->par; + + if (drm_fbdev_use_shadow_fb(fb_helper)) + return fb_deferred_io_mmap(info, vma); + else if (fb_helper->dev->driver->gem_prime_mmap) + return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); + else + return -ENODEV; +} + +static bool drm_fbdev_use_iomem(struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct drm_client_buffer *buffer = fb_helper->buffer; + + return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; +} + +static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + + if (drm_fbdev_use_iomem(info)) + ret = drm_fb_helper_cfb_read(info, buf, count, ppos); + else + ret = drm_fb_helper_sys_read(info, buf, count, ppos); + + return ret; +} + +static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + + if (drm_fbdev_use_iomem(info)) + ret = drm_fb_helper_cfb_write(info, buf, count, ppos); + else + ret = drm_fb_helper_sys_write(info, buf, count, ppos); + + return ret; +} + +static void drm_fbdev_fb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_fillrect(info, rect); + else + drm_fb_helper_sys_fillrect(info, rect); +} + +static void drm_fbdev_fb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_copyarea(info, area); + else + drm_fb_helper_sys_copyarea(info, area); +} + +static void drm_fbdev_fb_imageblit(struct fb_info *info, + const struct fb_image *image) +{ + if (drm_fbdev_use_iomem(info)) + drm_fb_helper_cfb_imageblit(info, image); + else + drm_fb_helper_sys_imageblit(info, image); +} + +static const struct fb_ops drm_fbdev_fb_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_open = drm_fbdev_fb_open, + .fb_release = drm_fbdev_fb_release, + .fb_destroy = drm_fbdev_fb_destroy, + .fb_mmap = drm_fbdev_fb_mmap, + .fb_read = drm_fbdev_fb_read, + .fb_write = drm_fbdev_fb_write, + .fb_fillrect = drm_fbdev_fb_fillrect, + .fb_copyarea = drm_fbdev_fb_copyarea, + .fb_imageblit = drm_fbdev_fb_imageblit, +}; + +static struct fb_deferred_io drm_fbdev_defio = { + .delay = HZ / 20, + .deferred_io = drm_fb_helper_deferred_io, +}; + +/* + * This function uses the client API to create a framebuffer backed by a dumb buffer. + */ +static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_client_dev *client = &fb_helper->client; + struct drm_device *dev = fb_helper->dev; + struct drm_client_buffer *buffer; + struct drm_framebuffer *fb; + struct fb_info *fbi; + u32 format; + struct iosys_map map; + int ret; + + drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", + sizes->surface_width, sizes->surface_height, + sizes->surface_bpp); + + format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + buffer = drm_client_framebuffer_create(client, sizes->surface_width, + sizes->surface_height, format); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + fb_helper->buffer = buffer; + fb_helper->fb = buffer->fb; + fb = buffer->fb; + + fbi = drm_fb_helper_alloc_info(fb_helper); + if (IS_ERR(fbi)) + return PTR_ERR(fbi); + + fbi->fbops = &drm_fbdev_fb_ops; + fbi->screen_size = sizes->surface_height * fb->pitches[0]; + fbi->fix.smem_len = fbi->screen_size; + fbi->flags = FBINFO_DEFAULT; + + drm_fb_helper_fill_info(fbi, fb_helper, sizes); + + if (drm_fbdev_use_shadow_fb(fb_helper)) { + fbi->screen_buffer = vzalloc(fbi->screen_size); + if (!fbi->screen_buffer) + return -ENOMEM; + fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; + + fbi->fbdefio = &drm_fbdev_defio; + fb_deferred_io_init(fbi); + } else { + /* buffer is mapped for HW framebuffer */ + ret = drm_client_buffer_vmap(fb_helper->buffer, &map); + if (ret) + return ret; + if (map.is_iomem) { + fbi->screen_base = map.vaddr_iomem; + } else { + fbi->screen_buffer = map.vaddr; + fbi->flags |= FBINFO_VIRTFB; + } + + /* + * Shamelessly leak the physical address to user-space. As + * page_to_phys() is undefined for I/O memory, warn in this + * case. + */ +#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) + if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 && + !drm_WARN_ON_ONCE(dev, map.is_iomem)) + fbi->fix.smem_start = + page_to_phys(virt_to_page(fbi->screen_buffer)); +#endif + } + + return 0; +} + +static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip, + struct iosys_map *dst) +{ + struct drm_framebuffer *fb = fb_helper->fb; + size_t offset = clip->y1 * fb->pitches[0]; + size_t len = clip->x2 - clip->x1; + unsigned int y; + void *src; + + switch (drm_format_info_bpp(fb->format, 0)) { + case 1: + offset += clip->x1 / 8; + len = DIV_ROUND_UP(len + clip->x1 % 8, 8); + break; + case 2: + offset += clip->x1 / 4; + len = DIV_ROUND_UP(len + clip->x1 % 4, 4); + break; + case 4: + offset += clip->x1 / 2; + len = DIV_ROUND_UP(len + clip->x1 % 2, 2); + break; + default: + offset += clip->x1 * fb->format->cpp[0]; + len *= fb->format->cpp[0]; + break; + } + + src = fb_helper->info->screen_buffer + offset; + iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ + + for (y = clip->y1; y < clip->y2; y++) { + iosys_map_memcpy_to(dst, 0, src, len); + iosys_map_incr(dst, fb->pitches[0]); + src += fb->pitches[0]; + } +} + +static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) +{ + struct drm_client_buffer *buffer = fb_helper->buffer; + struct iosys_map map, dst; + int ret; + + /* + * We have to pin the client buffer to its current location while + * flushing the shadow buffer. In the general case, concurrent + * modesetting operations could try to move the buffer and would + * fail. The modeset has to be serialized by acquiring the reservation + * object of the underlying BO here. + * + * For fbdev emulation, we only have to protect against fbdev modeset + * operations. Nothing else will involve the client buffer's BO. So it + * is sufficient to acquire struct drm_fb_helper.lock here. + */ + mutex_lock(&fb_helper->lock); + + ret = drm_client_buffer_vmap(buffer, &map); + if (ret) + goto out; + + dst = map; + drm_fbdev_damage_blit_real(fb_helper, clip, &dst); + + drm_client_buffer_vunmap(buffer); + +out: + mutex_unlock(&fb_helper->lock); + + return ret; +} + +static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) +{ + struct drm_device *dev = helper->dev; + int ret; + + if (!drm_fbdev_use_shadow_fb(helper)) + return 0; + + /* Call damage handlers only if necessary */ + if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2)) + return 0; + + if (helper->buffer) { + ret = drm_fbdev_damage_blit(helper, clip); + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) + return ret; + } + + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + return ret; + } + + return 0; +} + +static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { + .fb_probe = drm_fbdev_fb_probe, + .fb_dirty = drm_fbdev_fb_dirty, +}; + +static void drm_fbdev_client_unregister(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + + if (fb_helper->info) + /* drm_fbdev_fb_destroy() takes care of cleanup */ + drm_fb_helper_unregister_info(fb_helper); + else + drm_fbdev_release(fb_helper); +} + +static int drm_fbdev_client_restore(struct drm_client_dev *client) +{ + drm_fb_helper_lastclose(client->dev); + + return 0; +} + +static int drm_fbdev_client_hotplug(struct drm_client_dev *client) +{ + struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); + struct drm_device *dev = client->dev; + int ret; + + /* Setup is not retried if it has failed */ + if (!fb_helper->dev && fb_helper->funcs) + return 0; + + if (dev->fb_helper) + return drm_fb_helper_hotplug_event(dev->fb_helper); + + if (!dev->mode_config.num_connector) { + drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n"); + return 0; + } + + drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); + + ret = drm_fb_helper_init(dev, fb_helper); + if (ret) + goto err; + + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); + if (ret) + goto err_cleanup; + + return 0; + +err_cleanup: + drm_fbdev_cleanup(fb_helper); +err: + fb_helper->dev = NULL; + fb_helper->info = NULL; + + drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); + + return ret; +} + +static const struct drm_client_funcs drm_fbdev_client_funcs = { + .owner = THIS_MODULE, + .unregister = drm_fbdev_client_unregister, + .restore = drm_fbdev_client_restore, + .hotplug = drm_fbdev_client_hotplug, +}; + +/** + * drm_fbdev_generic_setup() - Setup generic fbdev emulation + * @dev: DRM device + * @preferred_bpp: Preferred bits per pixel for the device. + * @dev->mode_config.preferred_depth is used if this is zero. + * + * This function sets up generic fbdev emulation for drivers that supports + * dumb buffers with a virtual address and that can be mmap'ed. + * drm_fbdev_generic_setup() shall be called after the DRM driver registered + * the new DRM device with drm_dev_register(). + * + * Restore, hotplug events and teardown are all taken care of. Drivers that do + * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. + * Simple drivers might use drm_mode_config_helper_suspend(). + * + * Drivers that set the dirty callback on their framebuffer will get a shadow + * fbdev buffer that is blitted onto the real buffer. This is done in order to + * make deferred I/O work with all kinds of buffers. A shadow buffer can be + * requested explicitly by setting struct drm_mode_config.prefer_shadow or + * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is + * required to use generic fbdev emulation with SHMEM helpers. + * + * This function is safe to call even when there are no connectors present. + * Setup will be retried on the next hotplug event. + * + * The fbdev is destroyed by drm_dev_unregister(). + */ +void drm_fbdev_generic_setup(struct drm_device *dev, + unsigned int preferred_bpp) +{ + struct drm_fb_helper *fb_helper; + int ret; + + drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); + drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); + + fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); + if (!fb_helper) + return; + + ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); + if (ret) { + kfree(fb_helper); + drm_err(dev, "Failed to register client: %d\n", ret); + return; + } + + /* + * FIXME: This mixes up depth with bpp, which results in a glorious + * mess, resulting in some drivers picking wrong fbdev defaults and + * others wrong preferred_depth defaults. + */ + if (!preferred_bpp) + preferred_bpp = dev->mode_config.preferred_depth; + if (!preferred_bpp) + preferred_bpp = 32; + fb_helper->preferred_bpp = preferred_bpp; + + ret = drm_fbdev_client_hotplug(&fb_helper->client); + if (ret) + drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); + + drm_client_register(&fb_helper->client); +} +EXPORT_SYMBOL(drm_fbdev_generic_setup); diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index e09331bb3bc7..6242dfbe9240 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -297,12 +297,12 @@ const struct drm_format_info *__drm_format_info(u32 format) .vsub = 2, .is_yuv = true }, { .format = DRM_FORMAT_Q410, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_Q401, .depth = 0, .num_planes = 3, .char_per_block = { 2, 2, 2 }, - .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 0, - .vsub = 0, .is_yuv = true }, + .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1, + .vsub = 1, .is_yuv = true }, { .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2, .char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2, .vsub = 2, .is_yuv = true}, diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c index b6a0110eb64a..e42800718f51 100644 --- a/drivers/gpu/drm/drm_gem_atomic_helper.c +++ b/drivers/gpu/drm/drm_gem_atomic_helper.c @@ -360,48 +360,43 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane) EXPORT_SYMBOL(drm_gem_reset_shadow_plane); /** - * drm_gem_prepare_shadow_fb - prepares shadow framebuffers + * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct &drm_plane_helper_funcs.prepare_fb. It + * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It * maps all buffer objects of the plane's framebuffer into kernel address - * space and stores them in &struct drm_shadow_plane_state.map. The - * framebuffer will be synchronized as part of the atomic commit. + * space and stores them in struct &drm_shadow_plane_state.map. The first data + * bytes are available in struct &drm_shadow_plane_state.data. * - * See drm_gem_cleanup_shadow_fb() for cleanup. + * See drm_gem_end_shadow_fb_access() for cleanup. * * Returns: * 0 on success, or a negative errno code otherwise. */ -int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) +int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) { struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; - int ret; if (!fb) return 0; - ret = drm_gem_plane_helper_prepare_fb(plane, plane_state); - if (ret) - return ret; - return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data); } -EXPORT_SYMBOL(drm_gem_prepare_shadow_fb); +EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access); /** - * drm_gem_cleanup_shadow_fb - releases shadow framebuffers + * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct &drm_plane_helper_funcs.cleanup_fb. - * This function unmaps all buffer objects of the plane's framebuffer. + * This function implements struct &drm_plane_helper_funcs.end_fb_access. It + * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order. * - * See drm_gem_prepare_shadow_fb() for more information. + * See drm_gem_begin_shadow_fb_access() for more information. */ -void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state) +void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) { struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; @@ -411,46 +406,45 @@ void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state * drm_gem_fb_vunmap(fb, shadow_plane_state->map); } -EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb); +EXPORT_SYMBOL(drm_gem_end_shadow_fb_access); /** - * drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers + * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access * @pipe: the simple display pipe * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct drm_simple_display_funcs.prepare_fb. It - * maps all buffer objects of the plane's framebuffer into kernel address - * space and stores them in struct drm_shadow_plane_state.map. The - * framebuffer will be synchronized as part of the atomic commit. + * This function implements struct drm_simple_display_funcs.begin_fb_access. * - * See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. + * See drm_gem_begin_shadow_fb_access() for details and + * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. * * Returns: * 0 on success, or a negative errno code otherwise. */ -int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state) +int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) { - return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state); + return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state); } -EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb); +EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access); /** - * drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers + * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access * @pipe: the simple display pipe * @plane_state: the plane state of type struct drm_shadow_plane_state * - * This function implements struct drm_simple_display_funcs.cleanup_fb. - * This function unmaps all buffer objects of the plane's framebuffer. + * This function implements struct drm_simple_display_funcs.end_fb_access. + * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in + * reverse order. * - * See drm_gem_simple_kms_prepare_shadow_fb(). + * See drm_gem_simple_kms_begin_shadow_fb_access(). */ -void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *plane_state) +void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe, + struct drm_plane_state *plane_state) { - drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state); + drm_gem_end_shadow_fb_access(&pipe->plane, plane_state); } -EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb); +EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access); /** * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index e35e224e6303..e93533b86037 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -9,7 +9,6 @@ #include <linux/module.h> #include <drm/drm_damage_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 125160b534be..b6c7e3803bb3 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -433,25 +433,19 @@ int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map) { int ret; - ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); - if (ret) - return ret; + dma_resv_assert_held(gbo->bo.base.resv); ret = drm_gem_vram_pin_locked(gbo, 0); if (ret) - goto err_ttm_bo_unreserve; + return ret; ret = drm_gem_vram_kmap_locked(gbo, map); if (ret) goto err_drm_gem_vram_unpin_locked; - ttm_bo_unreserve(&gbo->bo); - return 0; err_drm_gem_vram_unpin_locked: drm_gem_vram_unpin_locked(gbo); -err_ttm_bo_unreserve: - ttm_bo_unreserve(&gbo->bo); return ret; } EXPORT_SYMBOL(drm_gem_vram_vmap); @@ -467,16 +461,10 @@ EXPORT_SYMBOL(drm_gem_vram_vmap); void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct iosys_map *map) { - int ret; - - ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); - if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) - return; + dma_resv_assert_held(gbo->bo.base.resv); drm_gem_vram_kunmap_locked(gbo, map); drm_gem_vram_unpin_locked(gbo); - - ttm_bo_unreserve(&gbo->bo); } EXPORT_SYMBOL(drm_gem_vram_vunmap); diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 5d4ac79381c4..3c8034a8c27b 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1750,11 +1750,78 @@ static int drm_mode_parse_cmdline_options(const char *str, return 0; } -static const char * const drm_named_modes_whitelist[] = { - "NTSC", - "PAL", +struct drm_named_mode { + const char *name; + unsigned int pixel_clock_khz; + unsigned int xres; + unsigned int yres; + unsigned int flags; +}; + +#define NAMED_MODE(_name, _pclk, _x, _y, _flags) \ + { \ + .name = _name, \ + .pixel_clock_khz = _pclk, \ + .xres = _x, \ + .yres = _y, \ + .flags = _flags, \ + } + +static const struct drm_named_mode drm_named_modes[] = { + NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE), + NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE), }; +static int drm_mode_parse_cmdline_named_mode(const char *name, + unsigned int name_end, + struct drm_cmdline_mode *cmdline_mode) +{ + unsigned int i; + + if (!name_end) + return 0; + + /* If the name starts with a digit, it's not a named mode */ + if (isdigit(name[0])) + return 0; + + /* + * If there's an equal sign in the name, the command-line + * contains only an option and no mode. + */ + if (strnchr(name, name_end, '=')) + return 0; + + /* The connection status extras can be set without a mode. */ + if (name_end == 1 && + (name[0] == 'd' || name[0] == 'D' || name[0] == 'e')) + return 0; + + /* + * We're sure we're a named mode at this point, iterate over the + * list of modes we're aware of. + */ + for (i = 0; i < ARRAY_SIZE(drm_named_modes); i++) { + const struct drm_named_mode *mode = &drm_named_modes[i]; + int ret; + + ret = str_has_prefix(name, mode->name); + if (ret != name_end) + continue; + + strcpy(cmdline_mode->name, mode->name); + cmdline_mode->pixel_clock = mode->pixel_clock_khz; + cmdline_mode->xres = mode->xres; + cmdline_mode->yres = mode->yres; + cmdline_mode->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + cmdline_mode->specified = true; + + return 1; + } + + return -EINVAL; +} + /** * drm_mode_parse_command_line_for_connector - parse command line modeline for connector * @mode_option: optional per connector mode option @@ -1791,7 +1858,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, const char *bpp_ptr = NULL, *refresh_ptr = NULL, *extra_ptr = NULL; const char *options_ptr = NULL; char *bpp_end_ptr = NULL, *refresh_end_ptr = NULL; - int i, len, ret; + int len, ret; memset(mode, 0, sizeof(*mode)); mode->panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; @@ -1832,18 +1899,19 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, parse_extras = true; } - /* First check for a named mode */ - for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++) { - ret = str_has_prefix(name, drm_named_modes_whitelist[i]); - if (ret == mode_end) { - if (refresh_ptr) - return false; /* named + refresh is invalid */ + if (!mode_end) + return false; + + ret = drm_mode_parse_cmdline_named_mode(name, mode_end, mode); + if (ret < 0) + return false; - strcpy(mode->name, drm_named_modes_whitelist[i]); - mode->specified = true; - break; - } - } + /* + * Having a mode that starts by a letter (and thus is named) and + * an at-sign (used to specify a refresh rate) is disallowed. + */ + if (ret && refresh_ptr) + return false; /* No named mode? Check for a normal mode argument, e.g. 1024x768 */ if (!mode->specified && isdigit(name[0])) { diff --git a/drivers/gpu/drm/drm_nomodeset.c b/drivers/gpu/drm/drm_nomodeset.c deleted file mode 100644 index f3978d5bd3a1..000000000000 --- a/drivers/gpu/drm/drm_nomodeset.c +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -#include <linux/module.h> -#include <linux/types.h> - -static bool drm_nomodeset; - -bool drm_firmware_drivers_only(void) -{ - return drm_nomodeset; -} -EXPORT_SYMBOL(drm_firmware_drivers_only); - -static int __init disable_modeset(char *str) -{ - drm_nomodeset = true; - - pr_warn("Booted with the nomodeset parameter. Only the system framebuffer will be available\n"); - - return 1; -} - -/* Disable kernel modesetting */ -__setup("nomodeset", disable_modeset); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 20e109a802ae..f924b8b4ab6b 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -781,6 +781,8 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) struct drm_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->dev; + dma_resv_assert_held(dma_buf->resv); + if (!dev->driver->gem_prime_mmap) return -ENOSYS; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 2fc21df709bc..bcd9611dabfd 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -36,7 +36,6 @@ #include <drm/drm_client.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 31233c6ae3c4..3ef420ec4534 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -285,6 +285,30 @@ static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane, pipe->funcs->cleanup_fb(pipe, state); } +static int drm_simple_kms_plane_begin_fb_access(struct drm_plane *plane, + struct drm_plane_state *new_plane_state) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->begin_fb_access) + return 0; + + return pipe->funcs->begin_fb_access(pipe, new_plane_state); +} + +static void drm_simple_kms_plane_end_fb_access(struct drm_plane *plane, + struct drm_plane_state *new_plane_state) +{ + struct drm_simple_display_pipe *pipe; + + pipe = container_of(plane, struct drm_simple_display_pipe, plane); + if (!pipe->funcs || !pipe->funcs->end_fb_access) + return; + + pipe->funcs->end_fb_access(pipe, new_plane_state); +} + static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) @@ -295,6 +319,8 @@ static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane, static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = { .prepare_fb = drm_simple_kms_plane_prepare_fb, .cleanup_fb = drm_simple_kms_plane_cleanup_fb, + .begin_fb_access = drm_simple_kms_plane_begin_fb_access, + .end_fb_access = drm_simple_kms_plane_end_fb_access, .atomic_check = drm_simple_kms_plane_atomic_check, .atomic_update = drm_simple_kms_plane_atomic_update, }; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index f32f4771dada..2bb4c25565dc 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -6,13 +6,14 @@ #ifndef __ETNAVIV_DRV_H__ #define __ETNAVIV_DRV_H__ +#include <linux/io.h> #include <linux/list.h> #include <linux/mm_types.h> #include <linux/sizes.h> #include <linux/time64.h> #include <linux/types.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_drv.h> #include <drm/drm_gem.h> #include <drm/etnaviv_drm.h> #include <drm/gpu_scheduler.h> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index f418e0b75772..44b5f3c35aab 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -83,10 +83,15 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, { struct etnaviv_dump_registers *reg = iter->data; unsigned int i; + u32 read_addr; for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) { + read_addr = etnaviv_dump_registers[i]; + if (read_addr >= VIVS_PM_POWER_CONTROLS && + read_addr <= VIVS_PM_PULSE_EATER) + read_addr = gpu_fix_power_address(gpu, read_addr); reg->reg = cpu_to_le32(etnaviv_dump_registers[i]); - reg->value = cpu_to_le32(gpu_read(gpu, etnaviv_dump_registers[i])); + reg->value = cpu_to_le32(gpu_read(gpu, read_addr)); } etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index cc386f8a7116..68e4446a94ad 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, { pgprot_t vm_page_prot; - vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; vm_page_prot = vm_get_page_prot(vma->vm_flags); @@ -165,7 +165,8 @@ static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); - struct page **pages, *page; + struct page **pages; + unsigned long pfn; pgoff_t pgoff; int err; @@ -189,12 +190,12 @@ static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf) /* We don't use vmf->pgoff since that has the fake offset: */ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - page = pages[pgoff]; + pfn = page_to_pfn(pages[pgoff]); VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, - page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT); + pfn, pfn << PAGE_SHIFT); - return vmf_insert_page(vma, vmf->address, page); + return vmf_insert_pfn(vma, vmf->address, pfn); } int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) @@ -258,7 +259,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( if (mapping->use == 0) { mutex_lock(&mmu_context->lock); if (mapping->context == mmu_context) - mapping->use += 1; + if (va && mapping->iova != va) { + etnaviv_iommu_reap_mapping(mapping); + mapping = NULL; + } else { + mapping->use += 1; + } else mapping = NULL; mutex_unlock(&mmu_context->lock); @@ -504,7 +510,6 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) kfree(mapping); } - drm_gem_free_mmap_offset(obj); etnaviv_obj->ops->release(etnaviv_obj); drm_gem_object_release(obj); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index 63688e6e4580..baa81cbf701a 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -96,6 +96,7 @@ struct etnaviv_gem_submit { int out_fence_id; struct list_head node; /* GPU active submit list */ struct etnaviv_cmdbuf cmdbuf; + struct pid *pid; /* submitting process */ bool runtime_resumed; u32 exec_state; u32 flags; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 1ac916b24891..1491159d0d20 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -399,6 +399,9 @@ static void submit_cleanup(struct kref *kref) mutex_unlock(&submit->gpu->fence_lock); dma_fence_put(submit->out_fence); } + + put_pid(submit->pid); + kfree(submit->pmrs); kfree(submit); } @@ -422,6 +425,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, struct sync_file *sync_file = NULL; struct ww_acquire_ctx ticket; int out_fence_fd = -1; + struct pid *pid = get_pid(task_pid(current)); void *stream; int ret; @@ -519,6 +523,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, goto err_submit_ww_acquire; } + submit->pid = pid; + ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf, ALIGN(args->stream_size, 8) + 8); if (ret) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 37018bc55810..51320eeebfcf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -416,6 +416,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) if (gpu->identity.model == chipModel_GC700) gpu->identity.features &= ~chipFeatures_FAST_CLEAR; + /* These models/revisions don't have the 2D pipe bit */ + if ((gpu->identity.model == chipModel_GC500 && + gpu->identity.revision <= 2) || + gpu->identity.model == chipModel_GC300) + gpu->identity.features |= chipFeatures_PIPE_2D; + if ((gpu->identity.model == chipModel_GC500 && gpu->identity.revision < 2) || (gpu->identity.model == chipModel_GC300 && @@ -449,8 +455,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); } - /* GC600 idle register reports zero bits where modules aren't present */ - if (gpu->identity.model == chipModel_GC600) + /* GC600/300 idle register reports zero bits where modules aren't present */ + if (gpu->identity.model == chipModel_GC600 || + gpu->identity.model == chipModel_GC300) gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | VIVS_HI_IDLE_STATE_RA | VIVS_HI_IDLE_STATE_SE | @@ -583,7 +590,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) u32 pmc, ppc; /* enable clock gating */ - ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ @@ -591,9 +598,9 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) gpu->identity.revision == 0x4302) ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc); - pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); + pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS); /* Disable PA clock gating for GC400+ without bugfix except for GC420 */ if (gpu->identity.model >= chipModel_GC400 && @@ -616,19 +623,20 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) /* Disable TX clock gating on affected core revisions. */ if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) || - etnaviv_is_model_rev(gpu, GC2000, 0x5108)) + etnaviv_is_model_rev(gpu, GC2000, 0x5108) || + etnaviv_is_model_rev(gpu, GC2000, 0x6202) || + etnaviv_is_model_rev(gpu, GC2000, 0x6203)) pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX; - /* Disable SE, RA and TX clock gating on affected core revisions. */ + /* Disable SE and RA clock gating on affected core revisions. */ if (etnaviv_is_model_rev(gpu, GC7000, 0x6202)) pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE | - VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA | - VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX; + VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; - gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); + gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc); } void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) @@ -688,11 +696,11 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) (gpu->identity.features & chipFeatures_PIPE_3D)) { /* Performance fix: disable internal DFS */ - pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER); + pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER); pulse_eater |= BIT(18); } - gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater); + gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater); } static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) @@ -1045,12 +1053,28 @@ pm_put: } #endif -void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) +void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit) { + struct etnaviv_gpu *gpu = submit->gpu; + char *comm = NULL, *cmd = NULL; + struct task_struct *task; unsigned int i; dev_err(gpu->dev, "recover hung GPU!\n"); + task = get_pid_task(submit->pid, PIDTYPE_PID); + if (task) { + comm = kstrdup(task->comm, GFP_KERNEL); + cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); + put_task_struct(task); + } + + if (comm && cmd) + dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd); + + kfree(cmd); + kfree(comm); + if (pm_runtime_get_sync(gpu->dev) < 0) goto pm_put; @@ -1294,9 +1318,9 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, u32 val; /* disable clock gating */ - val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); /* enable debug register */ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); @@ -1327,9 +1351,9 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); /* enable clock gating */ - val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 85eddd492774..f1204b070fb8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -10,6 +10,7 @@ #include "etnaviv_gem.h" #include "etnaviv_mmu.h" #include "etnaviv_drv.h" +#include "common.xml.h" struct etnaviv_gem_submit; struct etnaviv_vram_mapping; @@ -159,6 +160,26 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) return readl(gpu->mmio + reg); } +static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) +{ + /* Power registers in GC300 < 2.0 are offset by 0x100 */ + if (gpu->identity.model == chipModel_GC300 && + gpu->identity.revision < 0x2000) + reg += 0x100; + + return reg; +} + +static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) +{ + writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg)); +} + +static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg) +{ + return readl(gpu->mmio + gpu_fix_power_address(gpu, reg)); +} + int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); int etnaviv_gpu_init(struct etnaviv_gpu *gpu); @@ -168,7 +189,7 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu); int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); #endif -void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu); +void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit); void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, u32 fence, struct drm_etnaviv_timespec *timeout); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c index f2fc645c7956..57f334e24189 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c @@ -70,6 +70,37 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = { }, { .model = 0x7000, + .revision = 0x6203, + .product_id = 0x70003, + .customer_id = 0x4, + .eco_id = 0, + .stream_count = 16, + .register_max = 64, + .thread_count = 512, + .shader_core_count = 2, + .vertex_cache_size = 16, + .vertex_output_buffer_size = 1024, + .pixel_pipes = 1, + .instruction_count = 512, + .num_constants = 320, + .buffer_size = 0, + .varyings_count = 16, + .features = 0xe0287c8d, + .minor_features0 = 0xc1589eff, + .minor_features1 = 0xfefbfad9, + .minor_features2 = 0xeb9d4fbf, + .minor_features3 = 0xedfffced, + .minor_features4 = 0xdb0dafc7, + .minor_features5 = 0x3b5ac333, + .minor_features6 = 0xfcce6000, + .minor_features7 = 0xfffbfa6f, + .minor_features8 = 0x00e10ef3, + .minor_features9 = 0x00c8003c, + .minor_features10 = 0x00004040, + .minor_features11 = 0x00000024, + }, + { + .model = 0x7000, .revision = 0x6204, .product_id = ~0U, .customer_id = ~0U, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index dc1aa738c4f1..67bdce5326c6 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -80,10 +80,10 @@ static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, return -EINVAL; for_each_sgtable_dma_sg(sgt, sg, i) { - u32 pa = sg_dma_address(sg) - sg->offset; + phys_addr_t pa = sg_dma_address(sg) - sg->offset; size_t bytes = sg_dma_len(sg) + sg->offset; - VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); + VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); ret = etnaviv_context_map(context, da, pa, bytes, prot); if (ret) @@ -135,6 +135,19 @@ static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, drm_mm_remove_node(&mapping->vram_node); } +void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) +{ + struct etnaviv_iommu_context *context = mapping->context; + + lockdep_assert_held(&context->lock); + WARN_ON(mapping->use); + + etnaviv_iommu_remove_mapping(context, mapping); + etnaviv_iommu_context_put(mapping->context); + mapping->context = NULL; + list_del_init(&mapping->mmu_node); +} + static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, struct drm_mm_node *node, size_t size) { @@ -202,10 +215,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, * this mapping. */ list_for_each_entry_safe(m, n, &list, scan_node) { - etnaviv_iommu_remove_mapping(context, m); - etnaviv_iommu_context_put(m->context); - m->context = NULL; - list_del_init(&m->mmu_node); + etnaviv_iommu_reap_mapping(m); list_del_init(&m->scan_node); } @@ -257,10 +267,7 @@ static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context, } list_for_each_entry_safe(m, n, &scan_list, scan_node) { - etnaviv_iommu_remove_mapping(context, m); - etnaviv_iommu_context_put(m->context); - m->context = NULL; - list_del_init(&m->mmu_node); + etnaviv_iommu_reap_mapping(m); list_del_init(&m->scan_node); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h index e4a0b7d09c2e..c01a147f0dfd 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.h @@ -91,6 +91,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, struct etnaviv_vram_mapping *mapping, u64 va); void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, struct etnaviv_vram_mapping *mapping); +void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping); int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx, struct etnaviv_vram_mapping *mapping, diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 72e2553fbc98..d29f467eee13 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -67,7 +67,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job /* get the GPU back into the init state */ etnaviv_core_dump(submit); - etnaviv_gpu_recover_hang(gpu); + etnaviv_gpu_recover_hang(submit); drm_sched_resubmit_jobs(&gpu->sched); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 767afd2bfa82..55c92372fca0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -49,6 +49,8 @@ static const struct fb_ops exynos_drm_fb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_mmap = exynos_drm_fb_mmap, + .fb_read = drm_fb_helper_cfb_read, + .fb_write = drm_fb_helper_cfb_write, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -63,7 +65,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, unsigned int size = fb->width * fb->height * fb->format->cpp[0]; unsigned long offset; - fbi = drm_fb_helper_alloc_fbi(helper); + fbi = drm_fb_helper_alloc_info(helper); if (IS_ERR(fbi)) { DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to allocate fb info.\n"); @@ -201,7 +203,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev, drm_framebuffer_remove(fb); } - drm_fb_helper_unregister_fbi(fb_helper); + drm_fb_helper_unregister_info(fb_helper); drm_fb_helper_fini(fb_helper); } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index b4acc3422ba4..8579c7629f5e 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -20,7 +20,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_modeset_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 5f502a0048ab..8d5a37b8f110 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -147,6 +147,8 @@ static const struct fb_ops psbfb_unaccel_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_setcolreg = psbfb_setcolreg, + .fb_read = drm_fb_helper_cfb_read, + .fb_write = drm_fb_helper_cfb_write, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -268,7 +270,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, memset(dev_priv->vram_addr + backing->offset, 0, size); - info = drm_fb_helper_alloc_fbi(fb_helper); + info = drm_fb_helper_alloc_info(fb_helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto err_drm_gem_object_put; @@ -383,7 +385,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, { struct drm_framebuffer *fb = fb_helper->fb; - drm_fb_helper_unregister_fbi(fb_helper); + drm_fb_helper_unregister_info(fb_helper); drm_fb_helper_fini(fb_helper); drm_framebuffer_unregister_private(fb); diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index 8d1630b8edac..d57dab104358 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -18,7 +18,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 5a2e1cac06b2..22053c613644 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -17,6 +17,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index 4a0cd22c10e2..f957552c6c50 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -19,7 +19,6 @@ #include <linux/i2c.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_framebuffer.h> struct hibmc_connector { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index c228091fb0e6..8c6d2ea2a472 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -11,6 +11,8 @@ * Jianhua Li <lijianhua@huawei.com> */ +#include <linux/io.h> + #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index a0d5aa727d58..d9978b79828c 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -658,7 +658,7 @@ static enum drm_mode_status dsi_encoder_mode_valid(struct drm_encoder *encoder, * reset adj_mode to the mode value each time, * so we don't adjust the mode twice */ - drm_mode_copy(&adj_mode, mode); + drm_mode_init(&adj_mode, mode); crtc_funcs = crtc->helper_private; if (crtc_funcs && crtc_funcs->mode_fixup) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 73ee7f25f734..9c5d49bf40c9 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -19,7 +19,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index ca127ff797f7..427c20ba3404 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -11,7 +11,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index 28e732f94bf2..6c6b57298797 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -8,7 +8,6 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index ab385d18ddcc..5575d7abdc09 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -124,6 +124,8 @@ static const struct fb_ops intelfb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, .fb_set_par = intel_fbdev_set_par, + .fb_read = drm_fb_helper_cfb_read, + .fb_write = drm_fb_helper_cfb_write, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -254,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper, goto out_unlock; } - info = drm_fb_helper_alloc_fbi(helper); + info = drm_fb_helper_alloc_info(helper); if (IS_ERR(info)) { drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info); ret = PTR_ERR(info); @@ -584,7 +586,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv) if (!current_is_async()) intel_fbdev_sync(ifbdev); - drm_fb_helper_unregister_fbi(&ifbdev->helper); + drm_fb_helper_unregister_info(&ifbdev->helper); } void intel_fbdev_fini(struct drm_i915_private *dev_priv) @@ -627,7 +629,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous if (!ifbdev || !ifbdev->vma) goto set_suspend; - info = ifbdev->helper.fbdev; + info = ifbdev->helper.info; if (synchronous) { /* Flush any pending work to turn the console on, and then diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index ec6f7ae47783..9322ac29008b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -97,6 +97,8 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * struct drm_i915_private *i915 = to_i915(obj->base.dev); int ret; + dma_resv_assert_held(dma_buf->resv); + if (obj->base.size < vma->vm_end - vma->vm_start) return -EINVAL; diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c index b4f82ebca532..18df3888b7f9 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-kms.c +++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c @@ -7,7 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge_connector.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> @@ -21,7 +21,6 @@ DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops); static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = { .fb_create = drm_gem_fb_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 8dd8b0f912af..e060fa6cbcb9 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -16,7 +16,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 41799011f73b..c45fc8f4744d 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <linux/component.h> +#include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> @@ -23,7 +24,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 6b34fac3f73a..d64ebd2cf15e 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -19,7 +19,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 06723b2e9b84..0fa0b590830b 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -8,6 +8,7 @@ #include <linux/component.h> #include <linux/media-bus-format.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/videodev2.h> @@ -16,7 +17,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index ab0515d2c420..3d5af44bf92d 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -32,7 +32,7 @@ #include <drm/drm_encoder.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> @@ -1018,7 +1018,6 @@ static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = { static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { .fb_create = ingenic_drm_gem_fb_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; @@ -1629,7 +1628,11 @@ static int ingenic_drm_init(void) return err; } - return platform_driver_register(&ingenic_drm_driver); + err = platform_driver_register(&ingenic_drm_driver); + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err) + platform_driver_unregister(ingenic_ipu_driver_ptr); + + return err; } module_init(ingenic_drm_init); diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index 2382ccb3ee99..d29c678f6c91 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -15,7 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index a42f63f6f957..d172a302f902 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -9,7 +9,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c index cc9a4e965f77..9de24d9f0c96 100644 --- a/drivers/gpu/drm/logicvc/logicvc_drm.c +++ b/drivers/gpu/drm/logicvc/logicvc_drm.c @@ -17,7 +17,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/logicvc/logicvc_mode.c b/drivers/gpu/drm/logicvc/logicvc_mode.c index d8207ffda1af..9971950ebd4e 100644 --- a/drivers/gpu/drm/logicvc/logicvc_mode.c +++ b/drivers/gpu/drm/logicvc/logicvc_mode.c @@ -10,7 +10,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mode_config.h> @@ -26,7 +25,6 @@ static const struct drm_mode_config_funcs logicvc_mode_config_funcs = { .fb_create = drm_gem_fb_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 1c4482ad507d..4aedb050d2a5 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -69,7 +69,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -203,7 +203,6 @@ DEFINE_DRM_GEM_DMA_FOPS(drm_fops); static const struct drm_driver mcde_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .lastclose = drm_fb_helper_lastclose, .ioctls = NULL, .fops = &drm_fops, .name = "mcde", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 91f58db5915f..39a42dc8fb85 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -17,7 +17,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_gem_dma_helper.h> diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 4c80b6896dc3..12fa78f286ff 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1217,7 +1217,7 @@ static int mtk_hdmi_bridge_mode_valid(struct drm_bridge *bridge, if (next_bridge) { struct drm_display_mode adjusted_mode; - drm_mode_copy(&adjusted_mode, mode); + drm_mode_init(&adjusted_mode, mode); if (!drm_bridge_chain_mode_fixup(next_bridge, mode, &adjusted_mode)) return MODE_BAD; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 3b24a924b7b9..79bfe3938d3c 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -18,7 +18,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_modeset_helper_vtables.h> diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c index 5675bc2a92cf..3f73b211fa8e 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -116,9 +116,10 @@ static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge, return i; } -static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, - const struct drm_display_info *display_info, - const struct drm_display_mode *mode) +static enum drm_mode_status +meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) { if (meson_cvbs_get_mode(mode)) return MODE_OK; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index ece6cd102dbb..976f0ab2006b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -11,6 +11,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index f0c2349404b4..9e604dbb8e44 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -18,7 +18,6 @@ #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_plane.h> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c index 2c14646661b7..0f71e8fe7be7 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c @@ -237,12 +237,13 @@ static void dpu_encoder_phys_vid_setup_timing_engine( unsigned long lock_flags; struct dpu_hw_intf_cfg intf_cfg = { 0 }; + drm_mode_init(&mode, &phys_enc->cached_mode); + if (!phys_enc->hw_ctl->ops.setup_intf_cfg) { DPU_ERROR("invalid encoder %d\n", phys_enc != NULL); return; } - mode = phys_enc->cached_mode; if (!phys_enc->hw_intf->ops.setup_timing_gen) { DPU_ERROR("timing engine setup is not supported\n"); return; @@ -634,7 +635,9 @@ static int dpu_encoder_phys_vid_get_frame_count( { struct intf_status s = {0}; u32 fetch_start = 0; - struct drm_display_mode mode = phys_enc->cached_mode; + struct drm_display_mode mode; + + drm_mode_init(&mode, &phys_enc->cached_mode); if (!dpu_encoder_phys_vid_is_master(phys_enc)) return -EINVAL; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index bfd0aeff3f0d..39c9e55f07cc 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -857,7 +857,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display, dp = container_of(dp_display, struct dp_display_private, dp_display); - dp->panel->dp_mode.drm_mode = mode->drm_mode; + drm_mode_copy(&dp->panel->dp_mode.drm_mode, &mode->drm_mode); dp->panel->dp_mode.bpp = mode->bpp; dp->panel->dp_mode.capabilities = mode->capabilities; dp_panel_init_panel_info(dp->panel); diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index b373e3000320..31e1e30cb52a 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -93,7 +93,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, goto fail; } - fbi = drm_fb_helper_alloc_fbi(helper); + fbi = drm_fb_helper_alloc_info(helper); if (IS_ERR(fbi)) { DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n"); ret = PTR_ERR(fbi); @@ -182,7 +182,7 @@ void msm_fbdev_free(struct drm_device *dev) DBG(); - drm_fb_helper_unregister_fbi(helper); + drm_fb_helper_unregister_info(helper); drm_fb_helper_fini(helper); diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index 075002ed6fb0..cc2ceb301b96 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -16,7 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mode_config.h> diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c index f0ad6e2a9352..262bc43b1079 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_kms.c +++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c @@ -5,6 +5,7 @@ * This code is based on drivers/gpu/drm/mxsfb/mxsfb* */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -332,6 +333,18 @@ static void lcdif_enable_controller(struct lcdif_drm_private *lcdif) { u32 reg; + /* Set FIFO Panic watermarks, low 1/3, high 2/3 . */ + writel(FIELD_PREP(PANIC0_THRES_LOW_MASK, 1 * PANIC0_THRES_MAX / 3) | + FIELD_PREP(PANIC0_THRES_HIGH_MASK, 2 * PANIC0_THRES_MAX / 3), + lcdif->base + LCDC_V8_PANIC0_THRES); + + /* + * Enable FIFO Panic, this does not generate interrupt, but + * boosts NoC priority based on FIFO Panic watermarks. + */ + writel(INT_ENABLE_D1_PLANE_PANIC_EN, + lcdif->base + LCDC_V8_INT_ENABLE_D1); + reg = readl(lcdif->base + LCDC_V8_DISP_PARA); reg |= DISP_PARA_DISP_ON; writel(reg, lcdif->base + LCDC_V8_DISP_PARA); @@ -359,6 +372,9 @@ static void lcdif_disable_controller(struct lcdif_drm_private *lcdif) reg = readl(lcdif->base + LCDC_V8_DISP_PARA); reg &= ~DISP_PARA_DISP_ON; writel(reg, lcdif->base + LCDC_V8_DISP_PARA); + + /* Disable FIFO Panic NoC priority booster. */ + writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D1); } static void lcdif_reset_block(struct lcdif_drm_private *lcdif) diff --git a/drivers/gpu/drm/mxsfb/lcdif_regs.h b/drivers/gpu/drm/mxsfb/lcdif_regs.h index fb74eb5ccbf1..c55dfb236c1d 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_regs.h +++ b/drivers/gpu/drm/mxsfb/lcdif_regs.h @@ -255,6 +255,7 @@ #define PANIC0_THRES_LOW_MASK GENMASK(24, 16) #define PANIC0_THRES_HIGH_MASK GENMASK(8, 0) +#define PANIC0_THRES_MAX 511 #define LCDIF_MIN_XRES 120 #define LCDIF_MIN_YRES 120 diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index b29b332ed381..810edea0a31e 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -20,7 +20,7 @@ #include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b7084c17f9c1..edcb2529b402 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -739,14 +739,14 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, struct nouveau_drm *drm = nouveau_drm(encoder->dev); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_hdmi_info *hdmi = &nv_connector->base.display_info.hdmi; - union hdmi_infoframe infoframe; + union hdmi_infoframe infoframe = { 0 }; const u8 rekey = 56; /* binary driver, and tegra, constant */ - u8 config, scdc = 0; + u8 scdc = 0; u32 max_ac_packet; struct { struct nvif_outp_infoframe_v0 infoframe; u8 data[17]; - } args; + } args = { 0 }; int ret, size; max_ac_packet = mode->htotal - mode->hdisplay; @@ -757,27 +757,22 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, if (hdmi->scdc.scrambling.supported) { const bool high_tmds_clock_ratio = mode->clock > 340000; - ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &config); + ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &scdc); if (ret < 0) { NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret); return; } - config &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE); + scdc &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE); if (high_tmds_clock_ratio || hdmi->scdc.scrambling.low_rates) - config |= SCDC_SCRAMBLING_ENABLE; + scdc |= SCDC_SCRAMBLING_ENABLE; if (high_tmds_clock_ratio) - config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40; + scdc |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40; - ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, config); + ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, scdc); if (ret < 0) NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n", - config, ret); - - if (high_tmds_clock_ratio || hdmi->scdc.scrambling.low_rates) - scdc |= NVIF_OUTP_ACQUIRE_V0_TMDS_HDMI_SCDC_SCRAMBLE; - if (high_tmds_clock_ratio) - scdc |= NVIF_OUTP_ACQUIRE_V0_TMDS_HDMI_SCDC_DIV_BY_4; + scdc, ret); } ret = nvif_outp_acquire_tmds(&nv_encoder->outp, nv_crtc->index, true, @@ -793,7 +788,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, drm_hdmi_avi_infoframe_quant_range(&infoframe.avi, &nv_connector->base, mode, HDMI_QUANTIZATION_RANGE_FULL); - size = hdmi_infoframe_pack(&infoframe, args.data, 17); + size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data)); } else { size = 0; } @@ -801,9 +796,10 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc, nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_AVI, &args.infoframe, size); /* Vendor InfoFrame. */ + memset(&args.data, 0, sizeof(args.data)); if (!drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi, &nv_connector->base, mode)) - size = hdmi_infoframe_pack(&infoframe, args.data, 17); + size = hdmi_infoframe_pack(&infoframe, args.data, ARRAY_SIZE(args.data)); else size = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index a614582779ca..40409a29f5b6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -264,7 +264,11 @@ nva3_set_intensity(struct backlight_device *bd) u32 div, val; div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); - val = (bd->props.brightness * div) / 100; + + val = backlight_get_brightness(bd); + if (val) + val = (val * div) / 100; + if (div) { nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val | diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index a19f18b251f3..80f154b6adab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -34,6 +34,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_vblank.h> diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c new file mode 100644 index 000000000000..e87de7906f78 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -0,0 +1,613 @@ +/* + * Copyright © 2007 David Airlie + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * David Airlie + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/tty.h> +#include <linux/sysrq.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/screen_info.h> +#include <linux/vga_switcheroo.h> +#include <linux/console.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_atomic.h> + +#include "nouveau_drv.h" +#include "nouveau_gem.h" +#include "nouveau_bo.h" +#include "nouveau_fbcon.h" +#include "nouveau_chan.h" +#include "nouveau_vmm.h" + +#include "nouveau_crtc.h" + +MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); +int nouveau_nofbaccel = 0; +module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); + +MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)"); +static int nouveau_fbcon_bpp; +module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400); + +static void +nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); + struct nvif_device *device = &drm->client.device; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&drm->client.mutex)) { + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) + ret = nv04_fbcon_fillrect(info, rect); + else + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) + ret = nv50_fbcon_fillrect(info, rect); + else + ret = nvc0_fbcon_fillrect(info, rect); + mutex_unlock(&drm->client.mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + drm_fb_helper_cfb_fillrect(info, rect); +} + +static void +nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); + struct nvif_device *device = &drm->client.device; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&drm->client.mutex)) { + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) + ret = nv04_fbcon_copyarea(info, image); + else + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) + ret = nv50_fbcon_copyarea(info, image); + else + ret = nvc0_fbcon_copyarea(info, image); + mutex_unlock(&drm->client.mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + drm_fb_helper_cfb_copyarea(info, image); +} + +static void +nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); + struct nvif_device *device = &drm->client.device; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&drm->client.mutex)) { + if (device->info.family < NV_DEVICE_INFO_V0_TESLA) + ret = nv04_fbcon_imageblit(info, image); + else + if (device->info.family < NV_DEVICE_INFO_V0_FERMI) + ret = nv50_fbcon_imageblit(info, image); + else + ret = nvc0_fbcon_imageblit(info, image); + mutex_unlock(&drm->client.mutex); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + drm_fb_helper_cfb_imageblit(info, image); +} + +static int +nouveau_fbcon_sync(struct fb_info *info) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); + struct nouveau_channel *chan = drm->channel; + int ret; + + if (!chan || !chan->accel_done || in_interrupt() || + info->state != FBINFO_STATE_RUNNING || + info->flags & FBINFO_HWACCEL_DISABLED) + return 0; + + if (!mutex_trylock(&drm->client.mutex)) + return 0; + + ret = nouveau_channel_idle(chan); + mutex_unlock(&drm->client.mutex); + if (ret) { + nouveau_fbcon_gpu_lockup(info); + return 0; + } + + chan->accel_done = false; + return 0; +} + +static int +nouveau_fbcon_open(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); + int ret = pm_runtime_get_sync(drm->dev->dev); + if (ret < 0 && ret != -EACCES) { + pm_runtime_put(drm->dev->dev); + return ret; + } + return 0; +} + +static int +nouveau_fbcon_release(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); + pm_runtime_put(drm->dev->dev); + return 0; +} + +static const struct fb_ops nouveau_fbcon_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, + .fb_fillrect = nouveau_fbcon_fillrect, + .fb_copyarea = nouveau_fbcon_copyarea, + .fb_imageblit = nouveau_fbcon_imageblit, + .fb_sync = nouveau_fbcon_sync, +}; + +static const struct fb_ops nouveau_fbcon_sw_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, + .fb_fillrect = drm_fb_helper_cfb_fillrect, + .fb_copyarea = drm_fb_helper_cfb_copyarea, + .fb_imageblit = drm_fb_helper_cfb_imageblit, +}; + +void +nouveau_fbcon_accel_save_disable(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + if (drm->fbcon && drm->fbcon->helper.info) { + drm->fbcon->saved_flags = drm->fbcon->helper.info->flags; + drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED; + } +} + +void +nouveau_fbcon_accel_restore(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + if (drm->fbcon && drm->fbcon->helper.info) + drm->fbcon->helper.info->flags = drm->fbcon->saved_flags; +} + +static void +nouveau_fbcon_accel_fini(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fbdev *fbcon = drm->fbcon; + if (fbcon && drm->channel) { + console_lock(); + if (fbcon->helper.info) + fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED; + console_unlock(); + nouveau_channel_idle(drm->channel); + nvif_object_dtor(&fbcon->twod); + nvif_object_dtor(&fbcon->blit); + nvif_object_dtor(&fbcon->gdi); + nvif_object_dtor(&fbcon->patt); + nvif_object_dtor(&fbcon->rop); + nvif_object_dtor(&fbcon->clip); + nvif_object_dtor(&fbcon->surf2d); + } +} + +static void +nouveau_fbcon_accel_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fbdev *fbcon = drm->fbcon; + struct fb_info *info = fbcon->helper.info; + int ret; + + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) + ret = nv04_fbcon_accel_init(info); + else + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) + ret = nv50_fbcon_accel_init(info); + else + ret = nvc0_fbcon_accel_init(info); + + if (ret == 0) + info->fbops = &nouveau_fbcon_ops; +} + +static void +nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon) +{ + struct fb_info *info = fbcon->helper.info; + struct fb_fillrect rect; + + /* Clear the entire fbcon. The drm will program every connector + * with it's preferred mode. If the sizes differ, one display will + * quite likely have garbage around the console. + */ + rect.dx = rect.dy = 0; + rect.width = info->var.xres_virtual; + rect.height = info->var.yres_virtual; + rect.color = 0; + rect.rop = ROP_COPY; + info->fbops->fb_fillrect(info, &rect); +} + +static int +nouveau_fbcon_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct nouveau_fbdev *fbcon = + container_of(helper, struct nouveau_fbdev, helper); + struct drm_device *dev = fbcon->helper.dev; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvif_device *device = &drm->client.device; + struct fb_info *info; + struct drm_framebuffer *fb; + struct nouveau_channel *chan; + struct nouveau_bo *nvbo; + struct drm_mode_fb_cmd2 mode_cmd = {}; + int ret; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + + mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3); + mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256); + + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + + ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] * + mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM, + 0, 0x0000, &nvbo); + if (ret) { + NV_ERROR(drm, "failed to allocate framebuffer\n"); + goto out; + } + + ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb); + if (ret) + goto out_unref; + + ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false); + if (ret) { + NV_ERROR(drm, "failed to pin fb: %d\n", ret); + goto out_unref; + } + + ret = nouveau_bo_map(nvbo); + if (ret) { + NV_ERROR(drm, "failed to map fb: %d\n", ret); + goto out_unpin; + } + + chan = nouveau_nofbaccel ? NULL : drm->channel; + if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma); + if (ret) { + NV_ERROR(drm, "failed to map fb into chan: %d\n", ret); + chan = NULL; + } + } + + info = drm_fb_helper_alloc_info(helper); + if (IS_ERR(info)) { + ret = PTR_ERR(info); + goto out_unlock; + } + + /* setup helper */ + fbcon->helper.fb = fb; + + if (!chan) + info->flags = FBINFO_HWACCEL_DISABLED; + else + info->flags = FBINFO_HWACCEL_COPYAREA | + FBINFO_HWACCEL_FILLRECT | + FBINFO_HWACCEL_IMAGEBLIT; + info->fbops = &nouveau_fbcon_sw_ops; + info->fix.smem_start = nvbo->bo.resource->bus.offset; + info->fix.smem_len = nvbo->bo.base.size; + + info->screen_base = nvbo_kmap_obj_iovirtual(nvbo); + info->screen_size = nvbo->bo.base.size; + + drm_fb_helper_fill_info(info, &fbcon->helper, sizes); + + /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ + + if (chan) + nouveau_fbcon_accel_init(dev); + nouveau_fbcon_zfill(dev, fbcon); + + /* To allow resizeing without swapping buffers */ + NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", + fb->width, fb->height, nvbo->offset, nvbo); + + if (dev_is_pci(dev->dev)) + vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info); + + return 0; + +out_unlock: + if (chan) + nouveau_vma_del(&fbcon->vma); + nouveau_bo_unmap(nvbo); +out_unpin: + nouveau_bo_unpin(nvbo); +out_unref: + nouveau_bo_ref(NULL, &nvbo); +out: + return ret; +} + +static int +nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) +{ + struct drm_framebuffer *fb = fbcon->helper.fb; + struct nouveau_bo *nvbo; + + drm_fb_helper_unregister_info(&fbcon->helper); + drm_fb_helper_fini(&fbcon->helper); + + if (fb && fb->obj[0]) { + nvbo = nouveau_gem_object(fb->obj[0]); + nouveau_vma_del(&fbcon->vma); + nouveau_bo_unmap(nvbo); + nouveau_bo_unpin(nvbo); + drm_framebuffer_put(fb); + } + + return 0; +} + +void nouveau_fbcon_gpu_lockup(struct fb_info *info) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); + + NV_ERROR(drm, "GPU lockup - switching to software fbcon\n"); + info->flags |= FBINFO_HWACCEL_DISABLED; +} + +static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { + .fb_probe = nouveau_fbcon_create, +}; + +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); + int state = READ_ONCE(drm->fbcon_new_state); + + if (state == FBINFO_STATE_RUNNING) + pm_runtime_get_sync(drm->dev->dev); + + console_lock(); + if (state == FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_restore(drm->dev); + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); + if (state != FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_save_disable(drm->dev); + console_unlock(); + + if (state == FBINFO_STATE_RUNNING) { + nouveau_fbcon_hotplug_resume(drm->fbcon); + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_autosuspend(drm->dev->dev); + } +} + +void +nouveau_fbcon_set_suspend(struct drm_device *dev, int state) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (!drm->fbcon) + return; + + drm->fbcon_new_state = state; + /* Since runtime resume can happen as a result of a sysfs operation, + * it's possible we already have the console locked. So handle fbcon + * init/deinit from a seperate work thread + */ + schedule_work(&drm->fbcon_work); +} + +void +nouveau_fbcon_output_poll_changed(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fbdev *fbcon = drm->fbcon; + int ret; + + if (!fbcon) + return; + + mutex_lock(&fbcon->hotplug_lock); + + ret = pm_runtime_get(dev->dev); + if (ret == 1 || ret == -EACCES) { + drm_fb_helper_hotplug_event(&fbcon->helper); + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + } else if (ret == 0) { + /* If the GPU was already in the process of suspending before + * this event happened, then we can't block here as we'll + * deadlock the runtime pmops since they wait for us to + * finish. So, just defer this event for when we runtime + * resume again. It will be handled by fbcon_work. + */ + NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n"); + fbcon->hotplug_waiting = true; + pm_runtime_put_noidle(drm->dev->dev); + } else { + DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n", + ret); + } + + mutex_unlock(&fbcon->hotplug_lock); +} + +void +nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon) +{ + struct nouveau_drm *drm; + + if (!fbcon) + return; + drm = nouveau_drm(fbcon->helper.dev); + + mutex_lock(&fbcon->hotplug_lock); + if (fbcon->hotplug_waiting) { + fbcon->hotplug_waiting = false; + + NV_DEBUG(drm, "Handling deferred fbcon HPD events\n"); + drm_fb_helper_hotplug_event(&fbcon->helper); + } + mutex_unlock(&fbcon->hotplug_lock); +} + +int +nouveau_fbcon_init(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fbdev *fbcon; + int preferred_bpp = nouveau_fbcon_bpp; + int ret; + + if (!dev->mode_config.num_crtc || + (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA) + return 0; + + fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); + if (!fbcon) + return -ENOMEM; + + drm->fbcon = fbcon; + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); + mutex_init(&fbcon->hotplug_lock); + + drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); + + ret = drm_fb_helper_init(dev, &fbcon->helper); + if (ret) + goto free; + + if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) { + if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) + preferred_bpp = 8; + else + if (drm->client.device.info.ram_size <= 64 * 1024 * 1024) + preferred_bpp = 16; + else + preferred_bpp = 32; + } + + /* disable all the possible outputs/crtcs before entering KMS mode */ + if (!drm_drv_uses_atomic_modeset(dev)) + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); + if (ret) + goto fini; + + if (fbcon->helper.info) + fbcon->helper.info->pixmap.buf_align = 4; + return 0; + +fini: + drm_fb_helper_fini(&fbcon->helper); +free: + kfree(fbcon); + drm->fbcon = NULL; + return ret; +} + +void +nouveau_fbcon_fini(struct drm_device *dev) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (!drm->fbcon) + return; + + drm_kms_helper_poll_fini(dev); + nouveau_fbcon_accel_fini(dev); + nouveau_fbcon_destroy(dev, drm->fbcon); + kfree(drm->fbcon); + drm->fbcon = NULL; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c index d10ce1e04d32..4f0ca709c85a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c @@ -104,6 +104,7 @@ nvkm_uoutp_mthd_infoframe(struct nvkm_outp *outp, void *argv, u32 argc) { struct nvkm_ior *ior = outp->ior; union nvif_outp_infoframe_args *args = argv; + ssize_t size = argc - sizeof(*args); if (argc < sizeof(args->v0) || args->v0.version != 0) return -ENOSYS; @@ -112,10 +113,10 @@ nvkm_uoutp_mthd_infoframe(struct nvkm_outp *outp, void *argv, u32 argc) switch (ior->func->hdmi ? args->v0.type : 0xff) { case NVIF_OUTP_INFOFRAME_V0_AVI: - ior->func->hdmi->infoframe_avi(ior, args->v0.head, argv, argc); + ior->func->hdmi->infoframe_avi(ior, args->v0.head, &args->v0.data, size); return 0; case NVIF_OUTP_INFOFRAME_V0_VSI: - ior->func->hdmi->infoframe_vsi(ior, args->v0.head, argv, argc); + ior->func->hdmi->infoframe_vsi(ior, args->v0.head, &args->v0.data, size); return 0; default: break; diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index ed67dd25794c..98d8758048fc 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -38,7 +38,7 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi); static void pan_worker(struct work_struct *work) { struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work); - struct fb_info *fbi = fbdev->base.fbdev; + struct fb_info *fbi = fbdev->base.info; int npages; /* DMM roll shifts in 4K pages: */ @@ -161,7 +161,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, goto fail; } - fbi = drm_fb_helper_alloc_fbi(helper); + fbi = drm_fb_helper_alloc_info(helper); if (IS_ERR(fbi)) { dev_err(dev->dev, "failed to allocate fb info\n"); ret = PTR_ERR(fbi); @@ -272,7 +272,7 @@ void omap_fbdev_fini(struct drm_device *dev) if (!helper) return; - drm_fb_helper_unregister_fbi(helper); + drm_fb_helper_unregister_info(helper); drm_fb_helper_fini(helper); diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index 8e194dbc9506..3abc47521b2c 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -66,6 +66,8 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, struct drm_gem_object *obj = buffer->priv; int ret = 0; + dma_resv_assert_held(buffer->resv); + ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index a582ddd583c2..737edcdf9eef 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -203,6 +203,16 @@ config DRM_PANEL_INNOLUX_P079ZCA 24 bit RGB per pixel. It provides a MIPI DSI interface to the host and has a built-in LED backlight. +config DRM_PANEL_JADARD_JD9365DA_H3 + tristate "Jadard JD9365DA-H3 WXGA DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Jadard JD9365DA-H3 + WXGA MIPI DSI panel. The panel support TFT dot matrix LCD with + 800RGBx1280 dots at maximum. + config DRM_PANEL_JDI_LT070ME05000 tristate "JDI LT070ME05000 WUXGA DSI panel" depends on OF @@ -296,6 +306,15 @@ config DRM_PANEL_NEC_NL8048HL11 panel (found on the Zoom2/3/3630 SDP boards). To compile this driver as a module, choose M here. +config DRM_PANEL_NEWVISION_NV3051D + tristate "NewVision NV3051D DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + This driver supports the NV3051D based panel found on the Anbernic + RG353P and RG353V. + config DRM_PANEL_NEWVISION_NV3052C tristate "NewVision NV3052C RGB/SPI panel" depends on OF && SPI diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 34e717382dbb..f8f9d9f6a307 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o +obj-$(CONFIG_DRM_PANEL_JADARD_JD9365DA_H3) += panel-jadard-jd9365da-h3.o obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o @@ -27,6 +28,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o +obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3052C) += panel-newvision-nv3052c.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35560) += panel-novatek-nt35560.o diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 39dc40cf681f..384a724f2822 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -18,6 +18,7 @@ * Copyright 2018 David Lechner <david@lechnology.com> */ +#include <linux/backlight.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> @@ -30,7 +31,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c new file mode 100644 index 000000000000..48c1702a863b --- /dev/null +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2019 Radxa Limited + * Copyright (c) 2022 Edgeble AI Technologies Pvt. Ltd. + * + * Author: + * - Jagan Teki <jagan@amarulasolutions.com> + * - Stephen Chen <stephen@radxa.com> + */ + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_print.h> + +#include <linux/gpio/consumer.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> + +#define JD9365DA_INIT_CMD_LEN 2 + +struct jadard_init_cmd { + u8 data[JD9365DA_INIT_CMD_LEN]; +}; + +struct jadard_panel_desc { + const struct drm_display_mode mode; + unsigned int lanes; + enum mipi_dsi_pixel_format format; + const struct jadard_init_cmd *init_cmds; + u32 num_init_cmds; +}; + +struct jadard { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + const struct jadard_panel_desc *desc; + + struct regulator *vdd; + struct regulator *vccio; + struct gpio_desc *reset; +}; + +static inline struct jadard *panel_to_jadard(struct drm_panel *panel) +{ + return container_of(panel, struct jadard, panel); +} + +static int jadard_enable(struct drm_panel *panel) +{ + struct device *dev = panel->dev; + struct jadard *jadard = panel_to_jadard(panel); + const struct jadard_panel_desc *desc = jadard->desc; + struct mipi_dsi_device *dsi = jadard->dsi; + unsigned int i; + int err; + + msleep(10); + + for (i = 0; i < desc->num_init_cmds; i++) { + const struct jadard_init_cmd *cmd = &desc->init_cmds[i]; + + err = mipi_dsi_dcs_write_buffer(dsi, cmd->data, JD9365DA_INIT_CMD_LEN); + if (err < 0) + return err; + } + + msleep(120); + + err = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (err < 0) + DRM_DEV_ERROR(dev, "failed to exit sleep mode ret = %d\n", err); + + err = mipi_dsi_dcs_set_display_on(dsi); + if (err < 0) + DRM_DEV_ERROR(dev, "failed to set display on ret = %d\n", err); + + return 0; +} + +static int jadard_disable(struct drm_panel *panel) +{ + struct device *dev = panel->dev; + struct jadard *jadard = panel_to_jadard(panel); + int ret; + + ret = mipi_dsi_dcs_set_display_off(jadard->dsi); + if (ret < 0) + DRM_DEV_ERROR(dev, "failed to set display off: %d\n", ret); + + ret = mipi_dsi_dcs_enter_sleep_mode(jadard->dsi); + if (ret < 0) + DRM_DEV_ERROR(dev, "failed to enter sleep mode: %d\n", ret); + + return 0; +} + +static int jadard_prepare(struct drm_panel *panel) +{ + struct jadard *jadard = panel_to_jadard(panel); + int ret; + + ret = regulator_enable(jadard->vccio); + if (ret) + return ret; + + ret = regulator_enable(jadard->vdd); + if (ret) + return ret; + + gpiod_set_value(jadard->reset, 1); + msleep(5); + + gpiod_set_value(jadard->reset, 0); + msleep(10); + + gpiod_set_value(jadard->reset, 1); + msleep(120); + + return 0; +} + +static int jadard_unprepare(struct drm_panel *panel) +{ + struct jadard *jadard = panel_to_jadard(panel); + + gpiod_set_value(jadard->reset, 1); + msleep(120); + + regulator_disable(jadard->vdd); + regulator_disable(jadard->vccio); + + return 0; +} + +static int jadard_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct jadard *jadard = panel_to_jadard(panel); + const struct drm_display_mode *desc_mode = &jadard->desc->mode; + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, desc_mode); + if (!mode) { + DRM_DEV_ERROR(&jadard->dsi->dev, "failed to add mode %ux%ux@%u\n", + desc_mode->hdisplay, desc_mode->vdisplay, + drm_mode_vrefresh(desc_mode)); + return -ENOMEM; + } + + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + + return 1; +} + +static const struct drm_panel_funcs jadard_funcs = { + .disable = jadard_disable, + .unprepare = jadard_unprepare, + .prepare = jadard_prepare, + .enable = jadard_enable, + .get_modes = jadard_get_modes, +}; + +static const struct jadard_init_cmd cz101b4001_init_cmds[] = { + { .data = { 0xE0, 0x00 } }, + { .data = { 0xE1, 0x93 } }, + { .data = { 0xE2, 0x65 } }, + { .data = { 0xE3, 0xF8 } }, + { .data = { 0x80, 0x03 } }, + { .data = { 0xE0, 0x01 } }, + { .data = { 0x00, 0x00 } }, + { .data = { 0x01, 0x3B } }, + { .data = { 0x0C, 0x74 } }, + { .data = { 0x17, 0x00 } }, + { .data = { 0x18, 0xAF } }, + { .data = { 0x19, 0x00 } }, + { .data = { 0x1A, 0x00 } }, + { .data = { 0x1B, 0xAF } }, + { .data = { 0x1C, 0x00 } }, + { .data = { 0x35, 0x26 } }, + { .data = { 0x37, 0x09 } }, + { .data = { 0x38, 0x04 } }, + { .data = { 0x39, 0x00 } }, + { .data = { 0x3A, 0x01 } }, + { .data = { 0x3C, 0x78 } }, + { .data = { 0x3D, 0xFF } }, + { .data = { 0x3E, 0xFF } }, + { .data = { 0x3F, 0x7F } }, + { .data = { 0x40, 0x06 } }, + { .data = { 0x41, 0xA0 } }, + { .data = { 0x42, 0x81 } }, + { .data = { 0x43, 0x14 } }, + { .data = { 0x44, 0x23 } }, + { .data = { 0x45, 0x28 } }, + { .data = { 0x55, 0x02 } }, + { .data = { 0x57, 0x69 } }, + { .data = { 0x59, 0x0A } }, + { .data = { 0x5A, 0x2A } }, + { .data = { 0x5B, 0x17 } }, + { .data = { 0x5D, 0x7F } }, + { .data = { 0x5E, 0x6B } }, + { .data = { 0x5F, 0x5C } }, + { .data = { 0x60, 0x4F } }, + { .data = { 0x61, 0x4D } }, + { .data = { 0x62, 0x3F } }, + { .data = { 0x63, 0x42 } }, + { .data = { 0x64, 0x2B } }, + { .data = { 0x65, 0x44 } }, + { .data = { 0x66, 0x43 } }, + { .data = { 0x67, 0x43 } }, + { .data = { 0x68, 0x63 } }, + { .data = { 0x69, 0x52 } }, + { .data = { 0x6A, 0x5A } }, + { .data = { 0x6B, 0x4F } }, + { .data = { 0x6C, 0x4E } }, + { .data = { 0x6D, 0x20 } }, + { .data = { 0x6E, 0x0F } }, + { .data = { 0x6F, 0x00 } }, + { .data = { 0x70, 0x7F } }, + { .data = { 0x71, 0x6B } }, + { .data = { 0x72, 0x5C } }, + { .data = { 0x73, 0x4F } }, + { .data = { 0x74, 0x4D } }, + { .data = { 0x75, 0x3F } }, + { .data = { 0x76, 0x42 } }, + { .data = { 0x77, 0x2B } }, + { .data = { 0x78, 0x44 } }, + { .data = { 0x79, 0x43 } }, + { .data = { 0x7A, 0x43 } }, + { .data = { 0x7B, 0x63 } }, + { .data = { 0x7C, 0x52 } }, + { .data = { 0x7D, 0x5A } }, + { .data = { 0x7E, 0x4F } }, + { .data = { 0x7F, 0x4E } }, + { .data = { 0x80, 0x20 } }, + { .data = { 0x81, 0x0F } }, + { .data = { 0x82, 0x00 } }, + { .data = { 0xE0, 0x02 } }, + { .data = { 0x00, 0x02 } }, + { .data = { 0x01, 0x02 } }, + { .data = { 0x02, 0x00 } }, + { .data = { 0x03, 0x00 } }, + { .data = { 0x04, 0x1E } }, + { .data = { 0x05, 0x1E } }, + { .data = { 0x06, 0x1F } }, + { .data = { 0x07, 0x1F } }, + { .data = { 0x08, 0x1F } }, + { .data = { 0x09, 0x17 } }, + { .data = { 0x0A, 0x17 } }, + { .data = { 0x0B, 0x37 } }, + { .data = { 0x0C, 0x37 } }, + { .data = { 0x0D, 0x47 } }, + { .data = { 0x0E, 0x47 } }, + { .data = { 0x0F, 0x45 } }, + { .data = { 0x10, 0x45 } }, + { .data = { 0x11, 0x4B } }, + { .data = { 0x12, 0x4B } }, + { .data = { 0x13, 0x49 } }, + { .data = { 0x14, 0x49 } }, + { .data = { 0x15, 0x1F } }, + { .data = { 0x16, 0x01 } }, + { .data = { 0x17, 0x01 } }, + { .data = { 0x18, 0x00 } }, + { .data = { 0x19, 0x00 } }, + { .data = { 0x1A, 0x1E } }, + { .data = { 0x1B, 0x1E } }, + { .data = { 0x1C, 0x1F } }, + { .data = { 0x1D, 0x1F } }, + { .data = { 0x1E, 0x1F } }, + { .data = { 0x1F, 0x17 } }, + { .data = { 0x20, 0x17 } }, + { .data = { 0x21, 0x37 } }, + { .data = { 0x22, 0x37 } }, + { .data = { 0x23, 0x46 } }, + { .data = { 0x24, 0x46 } }, + { .data = { 0x25, 0x44 } }, + { .data = { 0x26, 0x44 } }, + { .data = { 0x27, 0x4A } }, + { .data = { 0x28, 0x4A } }, + { .data = { 0x29, 0x48 } }, + { .data = { 0x2A, 0x48 } }, + { .data = { 0x2B, 0x1F } }, + { .data = { 0x2C, 0x01 } }, + { .data = { 0x2D, 0x01 } }, + { .data = { 0x2E, 0x00 } }, + { .data = { 0x2F, 0x00 } }, + { .data = { 0x30, 0x1F } }, + { .data = { 0x31, 0x1F } }, + { .data = { 0x32, 0x1E } }, + { .data = { 0x33, 0x1E } }, + { .data = { 0x34, 0x1F } }, + { .data = { 0x35, 0x17 } }, + { .data = { 0x36, 0x17 } }, + { .data = { 0x37, 0x37 } }, + { .data = { 0x38, 0x37 } }, + { .data = { 0x39, 0x08 } }, + { .data = { 0x3A, 0x08 } }, + { .data = { 0x3B, 0x0A } }, + { .data = { 0x3C, 0x0A } }, + { .data = { 0x3D, 0x04 } }, + { .data = { 0x3E, 0x04 } }, + { .data = { 0x3F, 0x06 } }, + { .data = { 0x40, 0x06 } }, + { .data = { 0x41, 0x1F } }, + { .data = { 0x42, 0x02 } }, + { .data = { 0x43, 0x02 } }, + { .data = { 0x44, 0x00 } }, + { .data = { 0x45, 0x00 } }, + { .data = { 0x46, 0x1F } }, + { .data = { 0x47, 0x1F } }, + { .data = { 0x48, 0x1E } }, + { .data = { 0x49, 0x1E } }, + { .data = { 0x4A, 0x1F } }, + { .data = { 0x4B, 0x17 } }, + { .data = { 0x4C, 0x17 } }, + { .data = { 0x4D, 0x37 } }, + { .data = { 0x4E, 0x37 } }, + { .data = { 0x4F, 0x09 } }, + { .data = { 0x50, 0x09 } }, + { .data = { 0x51, 0x0B } }, + { .data = { 0x52, 0x0B } }, + { .data = { 0x53, 0x05 } }, + { .data = { 0x54, 0x05 } }, + { .data = { 0x55, 0x07 } }, + { .data = { 0x56, 0x07 } }, + { .data = { 0x57, 0x1F } }, + { .data = { 0x58, 0x40 } }, + { .data = { 0x5B, 0x30 } }, + { .data = { 0x5C, 0x16 } }, + { .data = { 0x5D, 0x34 } }, + { .data = { 0x5E, 0x05 } }, + { .data = { 0x5F, 0x02 } }, + { .data = { 0x63, 0x00 } }, + { .data = { 0x64, 0x6A } }, + { .data = { 0x67, 0x73 } }, + { .data = { 0x68, 0x1D } }, + { .data = { 0x69, 0x08 } }, + { .data = { 0x6A, 0x6A } }, + { .data = { 0x6B, 0x08 } }, + { .data = { 0x6C, 0x00 } }, + { .data = { 0x6D, 0x00 } }, + { .data = { 0x6E, 0x00 } }, + { .data = { 0x6F, 0x88 } }, + { .data = { 0x75, 0xFF } }, + { .data = { 0x77, 0xDD } }, + { .data = { 0x78, 0x3F } }, + { .data = { 0x79, 0x15 } }, + { .data = { 0x7A, 0x17 } }, + { .data = { 0x7D, 0x14 } }, + { .data = { 0x7E, 0x82 } }, + { .data = { 0xE0, 0x04 } }, + { .data = { 0x00, 0x0E } }, + { .data = { 0x02, 0xB3 } }, + { .data = { 0x09, 0x61 } }, + { .data = { 0x0E, 0x48 } }, + { .data = { 0xE0, 0x00 } }, + { .data = { 0xE6, 0x02 } }, + { .data = { 0xE7, 0x0C } }, +}; + +static const struct jadard_panel_desc cz101b4001_desc = { + .mode = { + .clock = 70000, + + .hdisplay = 800, + .hsync_start = 800 + 40, + .hsync_end = 800 + 40 + 18, + .htotal = 800 + 40 + 18 + 20, + + .vdisplay = 1280, + .vsync_start = 1280 + 20, + .vsync_end = 1280 + 20 + 4, + .vtotal = 1280 + 20 + 4 + 20, + + .width_mm = 62, + .height_mm = 110, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + }, + .lanes = 4, + .format = MIPI_DSI_FMT_RGB888, + .init_cmds = cz101b4001_init_cmds, + .num_init_cmds = ARRAY_SIZE(cz101b4001_init_cmds), +}; + +static int jadard_dsi_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct jadard_panel_desc *desc; + struct jadard *jadard; + int ret; + + jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL); + if (!jadard) + return -ENOMEM; + + desc = of_device_get_match_data(dev); + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_NO_EOT_PACKET; + dsi->format = desc->format; + dsi->lanes = desc->lanes; + + jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(jadard->reset)) { + DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); + return PTR_ERR(jadard->reset); + } + + jadard->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(jadard->vdd)) { + DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n"); + return PTR_ERR(jadard->vdd); + } + + jadard->vccio = devm_regulator_get(dev, "vccio"); + if (IS_ERR(jadard->vccio)) { + DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n"); + return PTR_ERR(jadard->vccio); + } + + drm_panel_init(&jadard->panel, dev, &jadard_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&jadard->panel); + if (ret) + return ret; + + drm_panel_add(&jadard->panel); + + mipi_dsi_set_drvdata(dsi, jadard); + jadard->dsi = dsi; + jadard->desc = desc; + + ret = mipi_dsi_attach(dsi); + if (ret < 0) + drm_panel_remove(&jadard->panel); + + return ret; +} + +static void jadard_dsi_remove(struct mipi_dsi_device *dsi) +{ + struct jadard *jadard = mipi_dsi_get_drvdata(dsi); + + mipi_dsi_detach(dsi); + drm_panel_remove(&jadard->panel); +} + +static const struct of_device_id jadard_of_match[] = { + { .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, jadard_of_match); + +static struct mipi_dsi_driver jadard_driver = { + .probe = jadard_dsi_probe, + .remove = jadard_dsi_remove, + .driver = { + .name = "jadard-jd9365da", + .of_match_table = jadard_of_match, + }, +}; +module_mipi_dsi_driver(jadard_driver); + +MODULE_AUTHOR("Jagan Teki <jagan@edgeble.ai>"); +MODULE_AUTHOR("Stephen Chen <stephen@radxa.com>"); +MODULE_DESCRIPTION("Jadard JD9365DA-H3 WXGA DSI panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c new file mode 100644 index 000000000000..a07958038ffd --- /dev/null +++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NV3051D MIPI-DSI panel driver for Anbernic RG353x + * Copyright (C) 2022 Chris Morgan + * + * based on + * + * Elida kd35t133 3.5" MIPI-DSI panel driver + * Copyright (C) Theobroma Systems 2020 + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/media-bus-format.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> + +#include <video/display_timing.h> +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct nv3051d_panel_info { + const struct drm_display_mode *display_modes; + unsigned int num_modes; + u16 width_mm, height_mm; + u32 bus_flags; +}; + +struct panel_nv3051d { + struct device *dev; + struct drm_panel panel; + struct gpio_desc *reset_gpio; + const struct nv3051d_panel_info *panel_info; + struct regulator *vdd; +}; + +static inline struct panel_nv3051d *panel_to_panelnv3051d(struct drm_panel *panel) +{ + return container_of(panel, struct panel_nv3051d, panel); +} + +static int panel_nv3051d_init_sequence(struct panel_nv3051d *ctx) +{ + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + + /* + * Init sequence was supplied by device vendor with no + * documentation. + */ + + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0xE3, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0x03, 0x40); + mipi_dsi_dcs_write_seq(dsi, 0x04, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0x05, 0x03); + mipi_dsi_dcs_write_seq(dsi, 0x24, 0x12); + mipi_dsi_dcs_write_seq(dsi, 0x25, 0x1E); + mipi_dsi_dcs_write_seq(dsi, 0x26, 0x28); + mipi_dsi_dcs_write_seq(dsi, 0x27, 0x52); + mipi_dsi_dcs_write_seq(dsi, 0x28, 0x57); + mipi_dsi_dcs_write_seq(dsi, 0x29, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0x2A, 0xDF); + mipi_dsi_dcs_write_seq(dsi, 0x38, 0x9C); + mipi_dsi_dcs_write_seq(dsi, 0x39, 0xA7); + mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x53); + mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0x49, 0x3C); + mipi_dsi_dcs_write_seq(dsi, 0x59, 0xFE); + mipi_dsi_dcs_write_seq(dsi, 0x5C, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0x91, 0x77); + mipi_dsi_dcs_write_seq(dsi, 0x92, 0x77); + mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x55); + mipi_dsi_dcs_write_seq(dsi, 0xA1, 0x50); + mipi_dsi_dcs_write_seq(dsi, 0xA4, 0x9C); + mipi_dsi_dcs_write_seq(dsi, 0xA7, 0x02); + mipi_dsi_dcs_write_seq(dsi, 0xA8, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0xA9, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0xAA, 0xFC); + mipi_dsi_dcs_write_seq(dsi, 0xAB, 0x28); + mipi_dsi_dcs_write_seq(dsi, 0xAC, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0xAD, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0xAE, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0xAF, 0x03); + mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x08); + mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x26); + mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x28); + mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28); + mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x33); + mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x08); + mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x26); + mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x08); + mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x26); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02); + mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x0E); + mipi_dsi_dcs_write_seq(dsi, 0xD1, 0x0E); + mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x29); + mipi_dsi_dcs_write_seq(dsi, 0xD4, 0x2B); + mipi_dsi_dcs_write_seq(dsi, 0xB2, 0x0C); + mipi_dsi_dcs_write_seq(dsi, 0xD2, 0x0A); + mipi_dsi_dcs_write_seq(dsi, 0xB3, 0x28); + mipi_dsi_dcs_write_seq(dsi, 0xD3, 0x28); + mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0xD6, 0x0D); + mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x32); + mipi_dsi_dcs_write_seq(dsi, 0xD7, 0x30); + mipi_dsi_dcs_write_seq(dsi, 0xC1, 0x04); + mipi_dsi_dcs_write_seq(dsi, 0xE1, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x0A); + mipi_dsi_dcs_write_seq(dsi, 0xD8, 0x0A); + mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0xD9, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0xBD, 0x13); + mipi_dsi_dcs_write_seq(dsi, 0xDD, 0x13); + mipi_dsi_dcs_write_seq(dsi, 0xBC, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0xDC, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0xBB, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xDB, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xDA, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xBE, 0x18); + mipi_dsi_dcs_write_seq(dsi, 0xDE, 0x18); + mipi_dsi_dcs_write_seq(dsi, 0xBF, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xDF, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xC0, 0x17); + mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x17); + mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x3B); + mipi_dsi_dcs_write_seq(dsi, 0xD5, 0x3C); + mipi_dsi_dcs_write_seq(dsi, 0xB0, 0x0B); + mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0C); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x03); + mipi_dsi_dcs_write_seq(dsi, 0x00, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x01, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x02, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x03, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x04, 0x61); + mipi_dsi_dcs_write_seq(dsi, 0x05, 0x80); + mipi_dsi_dcs_write_seq(dsi, 0x06, 0xC7); + mipi_dsi_dcs_write_seq(dsi, 0x07, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0x08, 0x82); + mipi_dsi_dcs_write_seq(dsi, 0x09, 0x83); + mipi_dsi_dcs_write_seq(dsi, 0x30, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x31, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x32, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x33, 0x2A); + mipi_dsi_dcs_write_seq(dsi, 0x34, 0x61); + mipi_dsi_dcs_write_seq(dsi, 0x35, 0xC5); + mipi_dsi_dcs_write_seq(dsi, 0x36, 0x80); + mipi_dsi_dcs_write_seq(dsi, 0x37, 0x23); + mipi_dsi_dcs_write_seq(dsi, 0x40, 0x82); + mipi_dsi_dcs_write_seq(dsi, 0x41, 0x83); + mipi_dsi_dcs_write_seq(dsi, 0x42, 0x80); + mipi_dsi_dcs_write_seq(dsi, 0x43, 0x81); + mipi_dsi_dcs_write_seq(dsi, 0x44, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0x45, 0xF2); + mipi_dsi_dcs_write_seq(dsi, 0x46, 0xF1); + mipi_dsi_dcs_write_seq(dsi, 0x47, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0x48, 0xF4); + mipi_dsi_dcs_write_seq(dsi, 0x49, 0xF3); + mipi_dsi_dcs_write_seq(dsi, 0x50, 0x02); + mipi_dsi_dcs_write_seq(dsi, 0x51, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0x52, 0x04); + mipi_dsi_dcs_write_seq(dsi, 0x53, 0x03); + mipi_dsi_dcs_write_seq(dsi, 0x54, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0x55, 0xF6); + mipi_dsi_dcs_write_seq(dsi, 0x56, 0xF5); + mipi_dsi_dcs_write_seq(dsi, 0x57, 0x11); + mipi_dsi_dcs_write_seq(dsi, 0x58, 0xF8); + mipi_dsi_dcs_write_seq(dsi, 0x59, 0xF7); + mipi_dsi_dcs_write_seq(dsi, 0x7E, 0x02); + mipi_dsi_dcs_write_seq(dsi, 0x7F, 0x80); + mipi_dsi_dcs_write_seq(dsi, 0xE0, 0x5A); + mipi_dsi_dcs_write_seq(dsi, 0xB1, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0xB4, 0x0E); + mipi_dsi_dcs_write_seq(dsi, 0xB5, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xB6, 0x04); + mipi_dsi_dcs_write_seq(dsi, 0xB7, 0x07); + mipi_dsi_dcs_write_seq(dsi, 0xB8, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0xB9, 0x05); + mipi_dsi_dcs_write_seq(dsi, 0xBA, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xC7, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0xCA, 0x0E); + mipi_dsi_dcs_write_seq(dsi, 0xCB, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0xCC, 0x04); + mipi_dsi_dcs_write_seq(dsi, 0xCD, 0x07); + mipi_dsi_dcs_write_seq(dsi, 0xCE, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0xCF, 0x05); + mipi_dsi_dcs_write_seq(dsi, 0xD0, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0x81, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0x84, 0x0E); + mipi_dsi_dcs_write_seq(dsi, 0x85, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0x86, 0x07); + mipi_dsi_dcs_write_seq(dsi, 0x87, 0x04); + mipi_dsi_dcs_write_seq(dsi, 0x88, 0x05); + mipi_dsi_dcs_write_seq(dsi, 0x89, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0x8A, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0x97, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0x9A, 0x0E); + mipi_dsi_dcs_write_seq(dsi, 0x9B, 0x0F); + mipi_dsi_dcs_write_seq(dsi, 0x9C, 0x07); + mipi_dsi_dcs_write_seq(dsi, 0x9D, 0x04); + mipi_dsi_dcs_write_seq(dsi, 0x9E, 0x05); + mipi_dsi_dcs_write_seq(dsi, 0x9F, 0x06); + mipi_dsi_dcs_write_seq(dsi, 0xA0, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x02); + mipi_dsi_dcs_write_seq(dsi, 0x01, 0x01); + mipi_dsi_dcs_write_seq(dsi, 0x02, 0xDA); + mipi_dsi_dcs_write_seq(dsi, 0x03, 0xBA); + mipi_dsi_dcs_write_seq(dsi, 0x04, 0xA8); + mipi_dsi_dcs_write_seq(dsi, 0x05, 0x9A); + mipi_dsi_dcs_write_seq(dsi, 0x06, 0x70); + mipi_dsi_dcs_write_seq(dsi, 0x07, 0xFF); + mipi_dsi_dcs_write_seq(dsi, 0x08, 0x91); + mipi_dsi_dcs_write_seq(dsi, 0x09, 0x90); + mipi_dsi_dcs_write_seq(dsi, 0x0A, 0xFF); + mipi_dsi_dcs_write_seq(dsi, 0x0B, 0x8F); + mipi_dsi_dcs_write_seq(dsi, 0x0C, 0x60); + mipi_dsi_dcs_write_seq(dsi, 0x0D, 0x58); + mipi_dsi_dcs_write_seq(dsi, 0x0E, 0x48); + mipi_dsi_dcs_write_seq(dsi, 0x0F, 0x38); + mipi_dsi_dcs_write_seq(dsi, 0x10, 0x2B); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x30); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x52); + mipi_dsi_dcs_write_seq(dsi, 0xFF, 0x00); + mipi_dsi_dcs_write_seq(dsi, 0x36, 0x02); + mipi_dsi_dcs_write_seq(dsi, 0x3A, 0x70); + + dev_dbg(ctx->dev, "Panel init sequence done\n"); + + return 0; +} + +static int panel_nv3051d_unprepare(struct drm_panel *panel) +{ + struct panel_nv3051d *ctx = panel_to_panelnv3051d(panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + ret = mipi_dsi_dcs_set_display_off(dsi); + if (ret < 0) + dev_err(ctx->dev, "failed to set display off: %d\n", ret); + + msleep(20); + + ret = mipi_dsi_dcs_enter_sleep_mode(dsi); + if (ret < 0) { + dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret); + return ret; + } + + usleep_range(10000, 15000); + + regulator_disable(ctx->vdd); + + return 0; +} + +static int panel_nv3051d_prepare(struct drm_panel *panel) +{ + struct panel_nv3051d *ctx = panel_to_panelnv3051d(panel); + struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); + int ret; + + dev_dbg(ctx->dev, "Resetting the panel\n"); + ret = regulator_enable(ctx->vdd); + if (ret < 0) { + dev_err(ctx->dev, "Failed to enable vdd supply: %d\n", ret); + return ret; + } + + usleep_range(2000, 3000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + msleep(150); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(20); + + ret = panel_nv3051d_init_sequence(ctx); + if (ret < 0) { + dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret); + goto disable_vdd; + } + + ret = mipi_dsi_dcs_exit_sleep_mode(dsi); + if (ret < 0) { + dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret); + goto disable_vdd; + } + + msleep(200); + + ret = mipi_dsi_dcs_set_display_on(dsi); + if (ret < 0) { + dev_err(ctx->dev, "Failed to set display on: %d\n", ret); + goto disable_vdd; + } + + usleep_range(10000, 15000); + + return 0; + +disable_vdd: + regulator_disable(ctx->vdd); + return ret; +} + +static int panel_nv3051d_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct panel_nv3051d *ctx = panel_to_panelnv3051d(panel); + const struct nv3051d_panel_info *panel_info = ctx->panel_info; + struct drm_display_mode *mode; + unsigned int i; + + for (i = 0; i < panel_info->num_modes; i++) { + mode = drm_mode_duplicate(connector->dev, + &panel_info->display_modes[i]); + if (!mode) + return -ENOMEM; + + drm_mode_set_name(mode); + + mode->type = DRM_MODE_TYPE_DRIVER; + if (panel_info->num_modes == 1) + mode->type |= DRM_MODE_TYPE_PREFERRED; + + drm_mode_probed_add(connector, mode); + } + + connector->display_info.bpc = 8; + connector->display_info.width_mm = panel_info->width_mm; + connector->display_info.height_mm = panel_info->height_mm; + connector->display_info.bus_flags = panel_info->bus_flags; + + return panel_info->num_modes; +} + +static const struct drm_panel_funcs panel_nv3051d_funcs = { + .unprepare = panel_nv3051d_unprepare, + .prepare = panel_nv3051d_prepare, + .get_modes = panel_nv3051d_get_modes, +}; + +static int panel_nv3051d_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct panel_nv3051d *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->dev = dev; + + ctx->panel_info = of_device_get_match_data(dev); + if (!ctx->panel_info) + return -EINVAL; + + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) { + dev_err(dev, "cannot get reset gpio\n"); + return PTR_ERR(ctx->reset_gpio); + } + + ctx->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(ctx->vdd)) { + ret = PTR_ERR(ctx->vdd); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to request vdd regulator: %d\n", ret); + return ret; + } + + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; + + drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs, + DRM_MODE_CONNECTOR_DSI); + + ret = drm_panel_of_backlight(&ctx->panel); + if (ret) + return ret; + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + dev_err(dev, "mipi_dsi_attach failed: %d\n", ret); + drm_panel_remove(&ctx->panel); + return ret; + } + + return 0; +} + +static void panel_nv3051d_shutdown(struct mipi_dsi_device *dsi) +{ + struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = drm_panel_unprepare(&ctx->panel); + if (ret < 0) + dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret); + + ret = drm_panel_disable(&ctx->panel); + if (ret < 0) + dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret); +} + +static void panel_nv3051d_remove(struct mipi_dsi_device *dsi) +{ + struct panel_nv3051d *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + panel_nv3051d_shutdown(dsi); + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct drm_display_mode nv3051d_rgxx3_modes[] = { + { /* 120hz */ + .hdisplay = 640, + .hsync_start = 640 + 40, + .hsync_end = 640 + 40 + 2, + .htotal = 640 + 40 + 2 + 80, + .vdisplay = 480, + .vsync_start = 480 + 18, + .vsync_end = 480 + 18 + 2, + .vtotal = 480 + 18 + 2 + 28, + .clock = 48300, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, + { /* 100hz */ + .hdisplay = 640, + .hsync_start = 640 + 40, + .hsync_end = 640 + 40 + 2, + .htotal = 640 + 40 + 2 + 80, + .vdisplay = 480, + .vsync_start = 480 + 18, + .vsync_end = 480 + 18 + 2, + .vtotal = 480 + 18 + 2 + 28, + .clock = 40250, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, + { /* 60hz */ + .hdisplay = 640, + .hsync_start = 640 + 40, + .hsync_end = 640 + 40 + 2, + .htotal = 640 + 40 + 2 + 80, + .vdisplay = 480, + .vsync_start = 480 + 18, + .vsync_end = 480 + 18 + 2, + .vtotal = 480 + 18 + 2 + 28, + .clock = 24150, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + }, +}; + +static const struct nv3051d_panel_info nv3051d_rgxx3_info = { + .display_modes = nv3051d_rgxx3_modes, + .num_modes = ARRAY_SIZE(nv3051d_rgxx3_modes), + .width_mm = 70, + .height_mm = 57, + .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE, +}; + +static const struct of_device_id newvision_nv3051d_of_match[] = { + { .compatible = "newvision,nv3051d", .data = &nv3051d_rgxx3_info }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, newvision_nv3051d_of_match); + +static struct mipi_dsi_driver newvision_nv3051d_driver = { + .driver = { + .name = "panel-newvision-nv3051d", + .of_match_table = newvision_nv3051d_of_match, + }, + .probe = panel_nv3051d_probe, + .remove = panel_nv3051d_remove, + .shutdown = panel_nv3051d_shutdown, +}; +module_mipi_dsi_driver(newvision_nv3051d_driver); + +MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>"); +MODULE_DESCRIPTION("DRM driver for Newvision NV3051D based MIPI DSI panels"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index eb25eedb5ee0..00deba0b7271 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -48,7 +48,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 3044ca948ce2..a3b83f89e061 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -37,6 +37,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 432758ad39a3..76f060810f63 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -38,7 +38,6 @@ #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_gem.h> diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index cc6754d88b81..c1710ed1cab8 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -80,6 +80,8 @@ static const struct fb_ops radeonfb_ops = { DRM_FB_HELPER_DEFAULT_OPS, .fb_open = radeonfb_open, .fb_release = radeonfb_release, + .fb_read = drm_fb_helper_cfb_read, + .fb_write = drm_fb_helper_cfb_write, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -243,7 +245,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ - info = drm_fb_helper_alloc_fbi(helper); + info = drm_fb_helper_alloc_info(helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out; @@ -309,7 +311,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb { struct drm_framebuffer *fb = &rfbdev->fb; - drm_fb_helper_unregister_fbi(&rfbdev->helper); + drm_fb_helper_unregister_info(&rfbdev->helper); if (fb->obj[0]) { radeonfb_destroy_pinned_object(fb->obj[0]); diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index c959e8c6be7d..f14686549cbe 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -41,8 +41,6 @@ config DRM_RCAR_LVDS depends on DRM_RCAR_USE_LVDS select DRM_KMS_HELPER select DRM_PANEL - select OF_FLATTREE - select OF_OVERLAY config DRM_RCAR_MIPI_DSI tristate "R-Car DU MIPI DSI Encoder Support" @@ -51,6 +49,14 @@ config DRM_RCAR_MIPI_DSI help Enable support for the R-Car Display Unit embedded MIPI DSI encoders. +config DRM_RZG2L_MIPI_DSI + tristate "RZ/G2L MIPI DSI Encoder Support" + depends on DRM_BRIDGE && OF + depends on ARCH_RENESAS || COMPILE_TEST + select DRM_MIPI_DSI + help + Enable support for the RZ/G2L Display Unit embedded MIPI DSI encoders. + config DRM_RCAR_VSP bool "R-Car DU VSP Compositor Support" if ARM default y if ARM64 diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 6f132325c8b7..b8f2c82651d9 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -14,3 +14,5 @@ obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o + +obj-$(CONFIG_DRM_RZG2L_MIPI_DSI) += rzg2l_mipi_dsi.o diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index a2776f1d6f2c..d003e8d9e7a2 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -20,7 +20,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi.c new file mode 100644 index 000000000000..aa95b85a2964 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi.c @@ -0,0 +1,816 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RZ/G2L MIPI DSI Encoder Driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#include "rzg2l_mipi_dsi_regs.h" + +struct rzg2l_mipi_dsi { + struct device *dev; + void __iomem *mmio; + + struct reset_control *rstc; + struct reset_control *arstc; + struct reset_control *prstc; + + struct mipi_dsi_host host; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + + struct clk *vclk; + + enum mipi_dsi_pixel_format format; + unsigned int num_data_lanes; + unsigned int lanes; + unsigned long mode_flags; +}; + +static inline struct rzg2l_mipi_dsi * +bridge_to_rzg2l_mipi_dsi(struct drm_bridge *bridge) +{ + return container_of(bridge, struct rzg2l_mipi_dsi, bridge); +} + +static inline struct rzg2l_mipi_dsi * +host_to_rzg2l_mipi_dsi(struct mipi_dsi_host *host) +{ + return container_of(host, struct rzg2l_mipi_dsi, host); +} + +struct rzg2l_mipi_dsi_timings { + unsigned long hsfreq_max; + u32 t_init; + u32 tclk_prepare; + u32 ths_prepare; + u32 tclk_zero; + u32 tclk_pre; + u32 tclk_post; + u32 tclk_trail; + u32 ths_zero; + u32 ths_trail; + u32 ths_exit; + u32 tlpx; +}; + +static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = { + { + .hsfreq_max = 80000, + .t_init = 79801, + .tclk_prepare = 8, + .ths_prepare = 13, + .tclk_zero = 33, + .tclk_pre = 24, + .tclk_post = 94, + .tclk_trail = 10, + .ths_zero = 23, + .ths_trail = 17, + .ths_exit = 13, + .tlpx = 6, + }, + { + .hsfreq_max = 125000, + .t_init = 79801, + .tclk_prepare = 8, + .ths_prepare = 12, + .tclk_zero = 33, + .tclk_pre = 15, + .tclk_post = 94, + .tclk_trail = 10, + .ths_zero = 23, + .ths_trail = 17, + .ths_exit = 13, + .tlpx = 6, + }, + { + .hsfreq_max = 250000, + .t_init = 79801, + .tclk_prepare = 8, + .ths_prepare = 12, + .tclk_zero = 33, + .tclk_pre = 13, + .tclk_post = 94, + .tclk_trail = 10, + .ths_zero = 23, + .ths_trail = 16, + .ths_exit = 13, + .tlpx = 6, + }, + { + .hsfreq_max = 360000, + .t_init = 79801, + .tclk_prepare = 8, + .ths_prepare = 10, + .tclk_zero = 33, + .tclk_pre = 4, + .tclk_post = 35, + .tclk_trail = 7, + .ths_zero = 16, + .ths_trail = 9, + .ths_exit = 13, + .tlpx = 6, + }, + { + .hsfreq_max = 720000, + .t_init = 79801, + .tclk_prepare = 8, + .ths_prepare = 9, + .tclk_zero = 33, + .tclk_pre = 4, + .tclk_post = 35, + .tclk_trail = 7, + .ths_zero = 16, + .ths_trail = 9, + .ths_exit = 13, + .tlpx = 6, + }, + { + .hsfreq_max = 1500000, + .t_init = 79801, + .tclk_prepare = 8, + .ths_prepare = 9, + .tclk_zero = 33, + .tclk_pre = 4, + .tclk_post = 35, + .tclk_trail = 7, + .ths_zero = 16, + .ths_trail = 9, + .ths_exit = 13, + .tlpx = 6, + }, +}; + +static void rzg2l_mipi_dsi_phy_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data) +{ + iowrite32(data, dsi->mmio + reg); +} + +static void rzg2l_mipi_dsi_link_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data) +{ + iowrite32(data, dsi->mmio + LINK_REG_OFFSET + reg); +} + +static u32 rzg2l_mipi_dsi_phy_read(struct rzg2l_mipi_dsi *dsi, u32 reg) +{ + return ioread32(dsi->mmio + reg); +} + +static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg) +{ + return ioread32(dsi->mmio + LINK_REG_OFFSET + reg); +} + +/* ----------------------------------------------------------------------------- + * Hardware Setup + */ + +static int rzg2l_mipi_dsi_dphy_init(struct rzg2l_mipi_dsi *dsi, + unsigned long hsfreq) +{ + const struct rzg2l_mipi_dsi_timings *dphy_timings; + unsigned int i; + u32 dphyctrl0; + u32 dphytim0; + u32 dphytim1; + u32 dphytim2; + u32 dphytim3; + int ret; + + /* All DSI global operation timings are set with recommended setting */ + for (i = 0; i < ARRAY_SIZE(rzg2l_mipi_dsi_global_timings); ++i) { + dphy_timings = &rzg2l_mipi_dsi_global_timings[i]; + if (hsfreq <= dphy_timings->hsfreq_max) + break; + } + + /* Initializing DPHY before accessing LINK */ + dphyctrl0 = DSIDPHYCTRL0_CAL_EN_HSRX_OFS | DSIDPHYCTRL0_CMN_MASTER_EN | + DSIDPHYCTRL0_RE_VDD_DETVCCQLV18 | DSIDPHYCTRL0_EN_BGR; + + rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYCTRL0, dphyctrl0); + usleep_range(20, 30); + + dphyctrl0 |= DSIDPHYCTRL0_EN_LDO1200; + rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYCTRL0, dphyctrl0); + usleep_range(10, 20); + + dphytim0 = DSIDPHYTIM0_TCLK_MISS(0) | + DSIDPHYTIM0_T_INIT(dphy_timings->t_init); + dphytim1 = DSIDPHYTIM1_THS_PREPARE(dphy_timings->ths_prepare) | + DSIDPHYTIM1_TCLK_PREPARE(dphy_timings->tclk_prepare) | + DSIDPHYTIM1_THS_SETTLE(0) | + DSIDPHYTIM1_TCLK_SETTLE(0); + dphytim2 = DSIDPHYTIM2_TCLK_TRAIL(dphy_timings->tclk_trail) | + DSIDPHYTIM2_TCLK_POST(dphy_timings->tclk_post) | + DSIDPHYTIM2_TCLK_PRE(dphy_timings->tclk_pre) | + DSIDPHYTIM2_TCLK_ZERO(dphy_timings->tclk_zero); + dphytim3 = DSIDPHYTIM3_TLPX(dphy_timings->tlpx) | + DSIDPHYTIM3_THS_EXIT(dphy_timings->ths_exit) | + DSIDPHYTIM3_THS_TRAIL(dphy_timings->ths_trail) | + DSIDPHYTIM3_THS_ZERO(dphy_timings->ths_zero); + + rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM0, dphytim0); + rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM1, dphytim1); + rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM2, dphytim2); + rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYTIM3, dphytim3); + + ret = reset_control_deassert(dsi->rstc); + if (ret < 0) + return ret; + + udelay(1); + + return 0; +} + +static void rzg2l_mipi_dsi_dphy_exit(struct rzg2l_mipi_dsi *dsi) +{ + u32 dphyctrl0; + + dphyctrl0 = rzg2l_mipi_dsi_phy_read(dsi, DSIDPHYCTRL0); + + dphyctrl0 &= ~(DSIDPHYCTRL0_EN_LDO1200 | DSIDPHYCTRL0_EN_BGR); + rzg2l_mipi_dsi_phy_write(dsi, DSIDPHYCTRL0, dphyctrl0); + + reset_control_assert(dsi->rstc); +} + +static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + unsigned long hsfreq; + unsigned int bpp; + u32 txsetr; + u32 clstptsetr; + u32 lptrnstsetr; + u32 clkkpt; + u32 clkbfht; + u32 clkstpt; + u32 golpbkt; + int ret; + + /* + * Relationship between hsclk and vclk must follow + * vclk * bpp = hsclk * 8 * lanes + * where vclk: video clock (Hz) + * bpp: video pixel bit depth + * hsclk: DSI HS Byte clock frequency (Hz) + * lanes: number of data lanes + * + * hsclk(bit) = hsclk(byte) * 8 + */ + bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); + hsfreq = (mode->clock * bpp * 8) / (8 * dsi->lanes); + + ret = pm_runtime_resume_and_get(dsi->dev); + if (ret < 0) + return ret; + + clk_set_rate(dsi->vclk, mode->clock * 1000); + + ret = rzg2l_mipi_dsi_dphy_init(dsi, hsfreq); + if (ret < 0) + goto err_phy; + + /* Enable Data lanes and Clock lanes */ + txsetr = TXSETR_DLEN | TXSETR_NUMLANEUSE(dsi->lanes - 1) | TXSETR_CLEN; + rzg2l_mipi_dsi_link_write(dsi, TXSETR, txsetr); + + /* + * Global timings characteristic depends on high speed Clock Frequency + * Currently MIPI DSI-IF just supports maximum FHD@60 with: + * - videoclock = 148.5 (MHz) + * - bpp: maximum 24bpp + * - data lanes: maximum 4 lanes + * Therefore maximum hsclk will be 891 Mbps. + */ + if (hsfreq > 445500) { + clkkpt = 12; + clkbfht = 15; + clkstpt = 48; + golpbkt = 75; + } else if (hsfreq > 250000) { + clkkpt = 7; + clkbfht = 8; + clkstpt = 27; + golpbkt = 40; + } else { + clkkpt = 8; + clkbfht = 6; + clkstpt = 24; + golpbkt = 29; + } + + clstptsetr = CLSTPTSETR_CLKKPT(clkkpt) | CLSTPTSETR_CLKBFHT(clkbfht) | + CLSTPTSETR_CLKSTPT(clkstpt); + rzg2l_mipi_dsi_link_write(dsi, CLSTPTSETR, clstptsetr); + + lptrnstsetr = LPTRNSTSETR_GOLPBKT(golpbkt); + rzg2l_mipi_dsi_link_write(dsi, LPTRNSTSETR, lptrnstsetr); + + return 0; + +err_phy: + rzg2l_mipi_dsi_dphy_exit(dsi); + pm_runtime_put(dsi->dev); + + return ret; +} + +static void rzg2l_mipi_dsi_stop(struct rzg2l_mipi_dsi *dsi) +{ + rzg2l_mipi_dsi_dphy_exit(dsi); + pm_runtime_put(dsi->dev); +} + +static void rzg2l_mipi_dsi_set_display_timing(struct rzg2l_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + u32 vich1ppsetr; + u32 vich1vssetr; + u32 vich1vpsetr; + u32 vich1hssetr; + u32 vich1hpsetr; + int dsi_format; + u32 delay[2]; + u8 index; + + /* Configuration for Pixel Packet */ + dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format); + switch (dsi_format) { + case 24: + vich1ppsetr = VICH1PPSETR_DT_RGB24; + break; + case 18: + vich1ppsetr = VICH1PPSETR_DT_RGB18; + break; + } + + if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) && + !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)) + vich1ppsetr |= VICH1PPSETR_TXESYNC_PULSE; + + rzg2l_mipi_dsi_link_write(dsi, VICH1PPSETR, vich1ppsetr); + + /* Configuration for Video Parameters */ + vich1vssetr = VICH1VSSETR_VACTIVE(mode->vdisplay) | + VICH1VSSETR_VSA(mode->vsync_end - mode->vsync_start); + vich1vssetr |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? + VICH1VSSETR_VSPOL_HIGH : VICH1VSSETR_VSPOL_LOW; + + vich1vpsetr = VICH1VPSETR_VFP(mode->vsync_start - mode->vdisplay) | + VICH1VPSETR_VBP(mode->vtotal - mode->vsync_end); + + vich1hssetr = VICH1HSSETR_HACTIVE(mode->hdisplay) | + VICH1HSSETR_HSA(mode->hsync_end - mode->hsync_start); + vich1hssetr |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? + VICH1HSSETR_HSPOL_HIGH : VICH1HSSETR_HSPOL_LOW; + + vich1hpsetr = VICH1HPSETR_HFP(mode->hsync_start - mode->hdisplay) | + VICH1HPSETR_HBP(mode->htotal - mode->hsync_end); + + rzg2l_mipi_dsi_link_write(dsi, VICH1VSSETR, vich1vssetr); + rzg2l_mipi_dsi_link_write(dsi, VICH1VPSETR, vich1vpsetr); + rzg2l_mipi_dsi_link_write(dsi, VICH1HSSETR, vich1hssetr); + rzg2l_mipi_dsi_link_write(dsi, VICH1HPSETR, vich1hpsetr); + + /* + * Configuration for Delay Value + * Delay value based on 2 ranges of video clock. + * 74.25MHz is videoclock of HD@60p or FHD@30p + */ + if (mode->clock > 74250) { + delay[0] = 231; + delay[1] = 216; + } else { + delay[0] = 220; + delay[1] = 212; + } + + if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) + index = 0; + else + index = 1; + + rzg2l_mipi_dsi_link_write(dsi, VICH1SET1R, + VICH1SET1R_DLY(delay[index])); +} + +static int rzg2l_mipi_dsi_start_hs_clock(struct rzg2l_mipi_dsi *dsi) +{ + bool is_clk_cont; + u32 hsclksetr; + u32 status; + int ret; + + is_clk_cont = !(dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS); + + /* Start HS clock */ + hsclksetr = HSCLKSETR_HSCLKRUN_HS | (is_clk_cont ? + HSCLKSETR_HSCLKMODE_CONT : + HSCLKSETR_HSCLKMODE_NON_CONT); + rzg2l_mipi_dsi_link_write(dsi, HSCLKSETR, hsclksetr); + + if (is_clk_cont) { + ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status, + status & PLSR_CLLP2HS, + 2000, 20000, false, dsi, PLSR); + if (ret < 0) { + dev_err(dsi->dev, "failed to start HS clock\n"); + return ret; + } + } + + dev_dbg(dsi->dev, "Start High Speed Clock with %s clock mode", + is_clk_cont ? "continuous" : "non-continuous"); + + return 0; +} + +static int rzg2l_mipi_dsi_stop_hs_clock(struct rzg2l_mipi_dsi *dsi) +{ + bool is_clk_cont; + u32 status; + int ret; + + is_clk_cont = !(dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS); + + /* Stop HS clock */ + rzg2l_mipi_dsi_link_write(dsi, HSCLKSETR, + is_clk_cont ? HSCLKSETR_HSCLKMODE_CONT : + HSCLKSETR_HSCLKMODE_NON_CONT); + + if (is_clk_cont) { + ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status, + status & PLSR_CLHS2LP, + 2000, 20000, false, dsi, PLSR); + if (ret < 0) { + dev_err(dsi->dev, "failed to stop HS clock\n"); + return ret; + } + } + + return 0; +} + +static int rzg2l_mipi_dsi_start_video(struct rzg2l_mipi_dsi *dsi) +{ + u32 vich1set0r; + u32 status; + int ret; + + /* Configuration for Blanking sequence and start video input*/ + vich1set0r = VICH1SET0R_HFPNOLP | VICH1SET0R_HBPNOLP | + VICH1SET0R_HSANOLP | VICH1SET0R_VSTART; + rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, vich1set0r); + + ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status, + status & VICH1SR_VIRDY, + 2000, 20000, false, dsi, VICH1SR); + if (ret < 0) + dev_err(dsi->dev, "Failed to start video signal input\n"); + + return ret; +} + +static int rzg2l_mipi_dsi_stop_video(struct rzg2l_mipi_dsi *dsi) +{ + u32 status; + int ret; + + rzg2l_mipi_dsi_link_write(dsi, VICH1SET0R, VICH1SET0R_VSTPAFT); + ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status, + (status & VICH1SR_STOP) && (!(status & VICH1SR_RUNNING)), + 2000, 20000, false, dsi, VICH1SR); + if (ret < 0) + goto err; + + ret = read_poll_timeout(rzg2l_mipi_dsi_link_read, status, + !(status & LINKSR_HSBUSY), + 2000, 20000, false, dsi, LINKSR); + if (ret < 0) + goto err; + + return 0; + +err: + dev_err(dsi->dev, "Failed to stop video signal input\n"); + return ret; +} + +/* ----------------------------------------------------------------------------- + * Bridge + */ + +static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge); + + return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + flags); +} + +static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge); + const struct drm_display_mode *mode; + struct drm_connector *connector; + struct drm_crtc *crtc; + int ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode; + + ret = rzg2l_mipi_dsi_startup(dsi, mode); + if (ret < 0) + return; + + rzg2l_mipi_dsi_set_display_timing(dsi, mode); + + ret = rzg2l_mipi_dsi_start_hs_clock(dsi); + if (ret < 0) + goto err_stop; + + ret = rzg2l_mipi_dsi_start_video(dsi); + if (ret < 0) + goto err_stop_clock; + + return; + +err_stop_clock: + rzg2l_mipi_dsi_stop_hs_clock(dsi); +err_stop: + rzg2l_mipi_dsi_stop(dsi); +} + +static void rzg2l_mipi_dsi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge); + + rzg2l_mipi_dsi_stop_video(dsi); + rzg2l_mipi_dsi_stop_hs_clock(dsi); + rzg2l_mipi_dsi_stop(dsi); +} + +static enum drm_mode_status +rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock > 148500) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static const struct drm_bridge_funcs rzg2l_mipi_dsi_bridge_ops = { + .attach = rzg2l_mipi_dsi_attach, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_enable = rzg2l_mipi_dsi_atomic_enable, + .atomic_disable = rzg2l_mipi_dsi_atomic_disable, + .mode_valid = rzg2l_mipi_dsi_bridge_mode_valid, +}; + +/* ----------------------------------------------------------------------------- + * Host setting + */ + +static int rzg2l_mipi_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host); + int ret; + + if (device->lanes > dsi->num_data_lanes) { + dev_err(dsi->dev, + "Number of lines of device (%u) exceeds host (%u)\n", + device->lanes, dsi->num_data_lanes); + return -EINVAL; + } + + switch (mipi_dsi_pixel_format_to_bpp(device->format)) { + case 24: + case 18: + break; + default: + dev_err(dsi->dev, "Unsupported format 0x%04x\n", device->format); + return -EINVAL; + } + + dsi->lanes = device->lanes; + dsi->format = device->format; + dsi->mode_flags = device->mode_flags; + + dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, + 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + dev_err(dsi->dev, "failed to get next bridge: %d\n", ret); + return ret; + } + + drm_bridge_add(&dsi->bridge); + + return 0; +} + +static int rzg2l_mipi_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host); + + drm_bridge_remove(&dsi->bridge); + + return 0; +} + +static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = { + .attach = rzg2l_mipi_dsi_host_attach, + .detach = rzg2l_mipi_dsi_host_detach, +}; + +/* ----------------------------------------------------------------------------- + * Power Management + */ + +static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev) +{ + struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev); + + reset_control_assert(dsi->prstc); + reset_control_assert(dsi->arstc); + + return 0; +} + +static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev) +{ + struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev); + int ret; + + ret = reset_control_deassert(dsi->arstc); + if (ret < 0) + return ret; + + ret = reset_control_deassert(dsi->prstc); + if (ret < 0) + reset_control_assert(dsi->arstc); + + return ret; +} + +static const struct dev_pm_ops rzg2l_mipi_pm_ops = { + SET_RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL) +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove + */ + +static int rzg2l_mipi_dsi_probe(struct platform_device *pdev) +{ + unsigned int num_data_lanes; + struct rzg2l_mipi_dsi *dsi; + u32 txsetr; + int ret; + + dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); + if (!dsi) + return -ENOMEM; + + platform_set_drvdata(pdev, dsi); + dsi->dev = &pdev->dev; + + ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4); + if (ret < 0) + return dev_err_probe(dsi->dev, ret, + "missing or invalid data-lanes property\n"); + + num_data_lanes = ret; + + dsi->mmio = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(dsi->mmio)) + return PTR_ERR(dsi->mmio); + + dsi->vclk = devm_clk_get(dsi->dev, "vclk"); + if (IS_ERR(dsi->vclk)) + return PTR_ERR(dsi->vclk); + + dsi->rstc = devm_reset_control_get_exclusive(dsi->dev, "rst"); + if (IS_ERR(dsi->rstc)) + return dev_err_probe(dsi->dev, PTR_ERR(dsi->rstc), + "failed to get rst\n"); + + dsi->arstc = devm_reset_control_get_exclusive(dsi->dev, "arst"); + if (IS_ERR(dsi->arstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(dsi->arstc), + "failed to get arst\n"); + + dsi->prstc = devm_reset_control_get_exclusive(dsi->dev, "prst"); + if (IS_ERR(dsi->prstc)) + return dev_err_probe(dsi->dev, PTR_ERR(dsi->prstc), + "failed to get prst\n"); + + platform_set_drvdata(pdev, dsi); + + pm_runtime_enable(dsi->dev); + + ret = pm_runtime_resume_and_get(dsi->dev); + if (ret < 0) + goto err_pm_disable; + + /* + * TXSETR register can be read only after DPHY init. But during probe + * mode->clock and format are not available. So initialize DPHY with + * timing parameters for 80Mbps. + */ + ret = rzg2l_mipi_dsi_dphy_init(dsi, 80000); + if (ret < 0) + goto err_phy; + + txsetr = rzg2l_mipi_dsi_link_read(dsi, TXSETR); + dsi->num_data_lanes = min(((txsetr >> 16) & 3) + 1, num_data_lanes); + rzg2l_mipi_dsi_dphy_exit(dsi); + pm_runtime_put(dsi->dev); + + /* Initialize the DRM bridge. */ + dsi->bridge.funcs = &rzg2l_mipi_dsi_bridge_ops; + dsi->bridge.of_node = dsi->dev->of_node; + + /* Init host device */ + dsi->host.dev = dsi->dev; + dsi->host.ops = &rzg2l_mipi_dsi_host_ops; + ret = mipi_dsi_host_register(&dsi->host); + if (ret < 0) + goto err_pm_disable; + + return 0; + +err_phy: + rzg2l_mipi_dsi_dphy_exit(dsi); + pm_runtime_put(dsi->dev); +err_pm_disable: + pm_runtime_disable(dsi->dev); + return ret; +} + +static int rzg2l_mipi_dsi_remove(struct platform_device *pdev) +{ + struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev); + + mipi_dsi_host_unregister(&dsi->host); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct of_device_id rzg2l_mipi_dsi_of_table[] = { + { .compatible = "renesas,rzg2l-mipi-dsi" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, rzg2l_mipi_dsi_of_table); + +static struct platform_driver rzg2l_mipi_dsi_platform_driver = { + .probe = rzg2l_mipi_dsi_probe, + .remove = rzg2l_mipi_dsi_remove, + .driver = { + .name = "rzg2l-mipi-dsi", + .pm = &rzg2l_mipi_pm_ops, + .of_match_table = rzg2l_mipi_dsi_of_table, + }, +}; + +module_platform_driver(rzg2l_mipi_dsi_platform_driver); + +MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); +MODULE_DESCRIPTION("Renesas RZ/G2L MIPI DSI Encoder Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi_regs.h new file mode 100644 index 000000000000..1dbc16ec64a4 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rzg2l_mipi_dsi_regs.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * RZ/G2L MIPI DSI Interface Registers Definitions + * + * Copyright (C) 2022 Renesas Electronics Corporation + */ + +#ifndef __RZG2L_MIPI_DSI_REGS_H__ +#define __RZG2L_MIPI_DSI_REGS_H__ + +#include <linux/bits.h> + +/* DPHY Registers */ +#define DSIDPHYCTRL0 0x00 +#define DSIDPHYCTRL0_CAL_EN_HSRX_OFS BIT(16) +#define DSIDPHYCTRL0_CMN_MASTER_EN BIT(8) +#define DSIDPHYCTRL0_RE_VDD_DETVCCQLV18 BIT(2) +#define DSIDPHYCTRL0_EN_LDO1200 BIT(1) +#define DSIDPHYCTRL0_EN_BGR BIT(0) + +#define DSIDPHYTIM0 0x04 +#define DSIDPHYTIM0_TCLK_MISS(x) ((x) << 24) +#define DSIDPHYTIM0_T_INIT(x) ((x) << 0) + +#define DSIDPHYTIM1 0x08 +#define DSIDPHYTIM1_THS_PREPARE(x) ((x) << 24) +#define DSIDPHYTIM1_TCLK_PREPARE(x) ((x) << 16) +#define DSIDPHYTIM1_THS_SETTLE(x) ((x) << 8) +#define DSIDPHYTIM1_TCLK_SETTLE(x) ((x) << 0) + +#define DSIDPHYTIM2 0x0c +#define DSIDPHYTIM2_TCLK_TRAIL(x) ((x) << 24) +#define DSIDPHYTIM2_TCLK_POST(x) ((x) << 16) +#define DSIDPHYTIM2_TCLK_PRE(x) ((x) << 8) +#define DSIDPHYTIM2_TCLK_ZERO(x) ((x) << 0) + +#define DSIDPHYTIM3 0x10 +#define DSIDPHYTIM3_TLPX(x) ((x) << 24) +#define DSIDPHYTIM3_THS_EXIT(x) ((x) << 16) +#define DSIDPHYTIM3_THS_TRAIL(x) ((x) << 8) +#define DSIDPHYTIM3_THS_ZERO(x) ((x) << 0) + +/* --------------------------------------------------------*/ +/* Link Registers */ +#define LINK_REG_OFFSET 0x10000 + +/* Link Status Register */ +#define LINKSR 0x10 +#define LINKSR_LPBUSY BIT(13) +#define LINKSR_HSBUSY BIT(12) +#define LINKSR_VICHRUN1 BIT(8) +#define LINKSR_SQCHRUN1 BIT(4) +#define LINKSR_SQCHRUN0 BIT(0) + +/* Tx Set Register */ +#define TXSETR 0x100 +#define TXSETR_NUMLANECAP (0x3 << 16) +#define TXSETR_DLEN (1 << 9) +#define TXSETR_CLEN (1 << 8) +#define TXSETR_NUMLANEUSE(x) (((x) & 0x3) << 0) + +/* HS Clock Set Register */ +#define HSCLKSETR 0x104 +#define HSCLKSETR_HSCLKMODE_CONT (1 << 1) +#define HSCLKSETR_HSCLKMODE_NON_CONT (0 << 1) +#define HSCLKSETR_HSCLKRUN_HS (1 << 0) +#define HSCLKSETR_HSCLKRUN_LP (0 << 0) + +/* Reset Control Register */ +#define RSTCR 0x110 +#define RSTCR_SWRST BIT(0) +#define RSTCR_FCETXSTP BIT(16) + +/* Reset Status Register */ +#define RSTSR 0x114 +#define RSTSR_DL0DIR (1 << 15) +#define RSTSR_DLSTPST (0xf << 8) +#define RSTSR_SWRSTV1 (1 << 4) +#define RSTSR_SWRSTIB (1 << 3) +#define RSTSR_SWRSTAPB (1 << 2) +#define RSTSR_SWRSTLP (1 << 1) +#define RSTSR_SWRSTHS (1 << 0) + +/* Clock Lane Stop Time Set Register */ +#define CLSTPTSETR 0x314 +#define CLSTPTSETR_CLKKPT(x) ((x) << 24) +#define CLSTPTSETR_CLKBFHT(x) ((x) << 16) +#define CLSTPTSETR_CLKSTPT(x) ((x) << 2) + +/* LP Transition Time Set Register */ +#define LPTRNSTSETR 0x318 +#define LPTRNSTSETR_GOLPBKT(x) ((x) << 0) + +/* Physical Lane Status Register */ +#define PLSR 0x320 +#define PLSR_CLHS2LP BIT(27) +#define PLSR_CLLP2HS BIT(26) + +/* Video-Input Channel 1 Set 0 Register */ +#define VICH1SET0R 0x400 +#define VICH1SET0R_VSEN BIT(12) +#define VICH1SET0R_HFPNOLP BIT(10) +#define VICH1SET0R_HBPNOLP BIT(9) +#define VICH1SET0R_HSANOLP BIT(8) +#define VICH1SET0R_VSTPAFT BIT(1) +#define VICH1SET0R_VSTART BIT(0) + +/* Video-Input Channel 1 Set 1 Register */ +#define VICH1SET1R 0x404 +#define VICH1SET1R_DLY(x) (((x) & 0xfff) << 2) + +/* Video-Input Channel 1 Status Register */ +#define VICH1SR 0x410 +#define VICH1SR_VIRDY BIT(3) +#define VICH1SR_RUNNING BIT(2) +#define VICH1SR_STOP BIT(1) +#define VICH1SR_START BIT(0) + +/* Video-Input Channel 1 Pixel Packet Set Register */ +#define VICH1PPSETR 0x420 +#define VICH1PPSETR_DT_RGB18 (0x1e << 16) +#define VICH1PPSETR_DT_RGB18_LS (0x2e << 16) +#define VICH1PPSETR_DT_RGB24 (0x3e << 16) +#define VICH1PPSETR_TXESYNC_PULSE (1 << 15) +#define VICH1PPSETR_VC(x) ((x) << 22) + +/* Video-Input Channel 1 Vertical Size Set Register */ +#define VICH1VSSETR 0x428 +#define VICH1VSSETR_VACTIVE(x) (((x) & 0x7fff) << 16) +#define VICH1VSSETR_VSPOL_LOW (1 << 15) +#define VICH1VSSETR_VSPOL_HIGH (0 << 15) +#define VICH1VSSETR_VSA(x) (((x) & 0xfff) << 0) + +/* Video-Input Channel 1 Vertical Porch Set Register */ +#define VICH1VPSETR 0x42c +#define VICH1VPSETR_VFP(x) (((x) & 0x1fff) << 16) +#define VICH1VPSETR_VBP(x) (((x) & 0x1fff) << 0) + +/* Video-Input Channel 1 Horizontal Size Set Register */ +#define VICH1HSSETR 0x430 +#define VICH1HSSETR_HACTIVE(x) (((x) & 0x7fff) << 16) +#define VICH1HSSETR_HSPOL_LOW (1 << 15) +#define VICH1HSSETR_HSPOL_HIGH (0 << 15) +#define VICH1HSSETR_HSA(x) (((x) & 0xfff) << 0) + +/* Video-Input Channel 1 Horizontal Porch Set Register */ +#define VICH1HPSETR 0x434 +#define VICH1HPSETR_HFP(x) (((x) & 0x1fff) << 16) +#define VICH1HPSETR_HBP(x) (((x) & 0x1fff) << 0) + +#endif /* __RZG2L_MIPI_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 518ee13b1d6f..8526dda91931 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -571,7 +571,7 @@ static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); - memcpy(&dp->mode, adjusted, sizeof(*mode)); + drm_mode_copy(&dp->mode, adjusted); } static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index 87b2243ea23e..f51774866f41 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -499,7 +499,7 @@ static void inno_hdmi_encoder_mode_set(struct drm_encoder *encoder, inno_hdmi_setup(hdmi, adj_mode); /* Store the display mode for plugin/DPMS poweron events */ - memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); + drm_mode_copy(&hdmi->previous_mode, adj_mode); } static void inno_hdmi_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index cf2cf51091a3..90145ad96984 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -395,7 +395,7 @@ rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder, struct rk3066_hdmi *hdmi = encoder_to_rk3066_hdmi(encoder); /* Store the display mode for plugin/DPMS poweron events. */ - memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode)); + drm_mode_copy(&hdmi->previous_mode, adj_mode); } static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 813f9f8c8698..6e0788d14c10 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -17,7 +17,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 1641440837af..aeb03a57240f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -9,10 +9,10 @@ #ifndef _ROCKCHIP_DRM_DRV_H #define _ROCKCHIP_DRM_DRV_H -#include <drm/drm_fb_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_gem.h> +#include <linux/i2c.h> #include <linux/module.h> #include <linux/component.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 409eaa1bf092..cfe8b793d344 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -9,7 +9,6 @@ #include <drm/drm.h> #include <drm/drm_atomic.h> #include <drm/drm_damage_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> @@ -72,7 +71,6 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file, static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { .fb_create = rockchip_fb_create, - .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 614e97aaac80..bf1120e0f573 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -9,6 +9,7 @@ #include <linux/vmalloc.h> #include <drm/drm.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_prime.h> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index c97bc1149663..fe09e5be79bd 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -140,6 +140,73 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) return true; } +static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) +{ + struct drm_sched_job *job = container_of(wrk, typeof(*job), work); + + drm_sched_fence_finished(job->s_fence); + WARN_ON(job->s_fence->parent); + job->sched->ops->free_job(job); +} + +/* Signal the scheduler finished fence when the entity in question is killed. */ +static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, + struct dma_fence_cb *cb) +{ + struct drm_sched_job *job = container_of(cb, struct drm_sched_job, + finish_cb); + int r; + + dma_fence_put(f); + + /* Wait for all dependencies to avoid data corruptions */ + while (!xa_empty(&job->dependencies)) { + f = xa_erase(&job->dependencies, job->last_dependency++); + r = dma_fence_add_callback(f, &job->finish_cb, + drm_sched_entity_kill_jobs_cb); + if (!r) + return; + + dma_fence_put(f); + } + + INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); + schedule_work(&job->work); +} + +/* Remove the entity from the scheduler and kill all pending jobs */ +static void drm_sched_entity_kill(struct drm_sched_entity *entity) +{ + struct drm_sched_job *job; + struct dma_fence *prev; + + if (!entity->rq) + return; + + spin_lock(&entity->rq_lock); + entity->stopped = true; + drm_sched_rq_remove_entity(entity->rq, entity); + spin_unlock(&entity->rq_lock); + + /* Make sure this entity is not used by the scheduler at the moment */ + wait_for_completion(&entity->entity_idle); + + prev = dma_fence_get(entity->last_scheduled); + while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { + struct drm_sched_fence *s_fence = job->s_fence; + + dma_fence_set_error(&s_fence->finished, -ESRCH); + + dma_fence_get(&s_fence->finished); + if (!prev || dma_fence_add_callback(prev, &job->finish_cb, + drm_sched_entity_kill_jobs_cb)) + drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); + + prev = &s_fence->finished; + } + dma_fence_put(prev); +} + /** * drm_sched_entity_flush - Flush a context entity * @@ -180,91 +247,13 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) /* For killed process disable any more IBs enqueue right now */ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); if ((!last_user || last_user == current->group_leader) && - (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) { - spin_lock(&entity->rq_lock); - entity->stopped = true; - drm_sched_rq_remove_entity(entity->rq, entity); - spin_unlock(&entity->rq_lock); - } + (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) + drm_sched_entity_kill(entity); return ret; } EXPORT_SYMBOL(drm_sched_entity_flush); -static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) -{ - struct drm_sched_job *job = container_of(wrk, typeof(*job), work); - - drm_sched_fence_finished(job->s_fence); - WARN_ON(job->s_fence->parent); - job->sched->ops->free_job(job); -} - - -/* Signal the scheduler finished fence when the entity in question is killed. */ -static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_job *job = container_of(cb, struct drm_sched_job, - finish_cb); - - dma_fence_put(f); - INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); - schedule_work(&job->work); -} - -static struct dma_fence * -drm_sched_job_dependency(struct drm_sched_job *job, - struct drm_sched_entity *entity) -{ - if (!xa_empty(&job->dependencies)) - return xa_erase(&job->dependencies, job->last_dependency++); - - if (job->sched->ops->dependency) - return job->sched->ops->dependency(job, entity); - - return NULL; -} - -static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) -{ - struct drm_sched_job *job; - struct dma_fence *f; - int r; - - while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { - struct drm_sched_fence *s_fence = job->s_fence; - - /* Wait for all dependencies to avoid data corruptions */ - while ((f = drm_sched_job_dependency(job, entity))) { - dma_fence_wait(f, false); - dma_fence_put(f); - } - - drm_sched_fence_scheduled(s_fence); - dma_fence_set_error(&s_fence->finished, -ESRCH); - - /* - * When pipe is hanged by older entity, new entity might - * not even have chance to submit it's first job to HW - * and so entity->last_scheduled will remain NULL - */ - if (!entity->last_scheduled) { - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - continue; - } - - dma_fence_get(entity->last_scheduled); - r = dma_fence_add_callback(entity->last_scheduled, - &job->finish_cb, - drm_sched_entity_kill_jobs_cb); - if (r == -ENOENT) - drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); - else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); - } -} - /** * drm_sched_entity_fini - Destroy a context entity * @@ -278,33 +267,17 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) */ void drm_sched_entity_fini(struct drm_sched_entity *entity) { - struct drm_gpu_scheduler *sched = NULL; - - if (entity->rq) { - sched = entity->rq->sched; - drm_sched_rq_remove_entity(entity->rq, entity); - } - - /* Consumption of existing IBs wasn't completed. Forcefully - * remove them here. + /* + * If consumption of existing IBs wasn't completed. Forcefully remove + * them here. Also makes sure that the scheduler won't touch this entity + * any more. */ - if (spsc_queue_count(&entity->job_queue)) { - if (sched) { - /* - * Wait for thread to idle to make sure it isn't processing - * this entity. - */ - wait_for_completion(&entity->entity_idle); + drm_sched_entity_kill(entity); - } - if (entity->dependency) { - dma_fence_remove_callback(entity->dependency, - &entity->cb); - dma_fence_put(entity->dependency); - entity->dependency = NULL; - } - - drm_sched_entity_kill_jobs(entity); + if (entity->dependency) { + dma_fence_remove_callback(entity->dependency, &entity->cb); + dma_fence_put(entity->dependency); + entity->dependency = NULL; } dma_fence_put(entity->last_scheduled); @@ -417,6 +390,19 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) return false; } +static struct dma_fence * +drm_sched_job_dependency(struct drm_sched_job *job, + struct drm_sched_entity *entity) +{ + if (!xa_empty(&job->dependencies)) + return xa_erase(&job->dependencies, job->last_dependency++); + + if (job->sched->ops->prepare_job) + return job->sched->ops->prepare_job(job, entity); + + return NULL; +} + struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) { struct drm_sched_job *sched_job; diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 0150ac7fbf40..31f3a1267be4 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -286,32 +286,6 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) } /** - * drm_sched_dependency_optimized - test if the dependency can be optimized - * - * @fence: the dependency fence - * @entity: the entity which depends on the above fence - * - * Returns true if the dependency can be optimized and false otherwise - */ -bool drm_sched_dependency_optimized(struct dma_fence* fence, - struct drm_sched_entity *entity) -{ - struct drm_gpu_scheduler *sched = entity->rq->sched; - struct drm_sched_fence *s_fence; - - if (!fence || dma_fence_is_signaled(fence)) - return false; - if (fence->context == entity->fence_context) - return true; - s_fence = to_drm_sched_fence(fence); - if (s_fence && s_fence->sched == sched) - return true; - - return false; -} -EXPORT_SYMBOL(drm_sched_dependency_optimized); - -/** * drm_sched_start_timeout - start timeout for reset worker * * @sched: scheduler instance to start the worker for @@ -734,32 +708,28 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job, EXPORT_SYMBOL(drm_sched_job_add_dependency); /** - * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job - * dependencies + * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job * @job: scheduler job to add the dependencies to - * @obj: the gem object to add new dependencies from. - * @write: whether the job might write the object (so we need to depend on - * shared fences in the reservation object). + * @resv: the dma_resv object to get the fences from + * @usage: the dma_resv_usage to use to filter the fences * - * This should be called after drm_gem_lock_reservations() on your array of - * GEM objects used in the job but before updating the reservations with your - * own fences. + * This adds all fences matching the given usage from @resv to @job. + * Must be called with the @resv lock held. * * Returns: * 0 on success, or an error on failing to expand the array. */ -int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, - struct drm_gem_object *obj, - bool write) +int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, + struct dma_resv *resv, + enum dma_resv_usage usage) { struct dma_resv_iter cursor; struct dma_fence *fence; int ret; - dma_resv_assert_held(obj->resv); + dma_resv_assert_held(resv); - dma_resv_for_each_fence(&cursor, obj->resv, dma_resv_usage_rw(write), - fence) { + dma_resv_for_each_fence(&cursor, resv, usage, fence) { /* Make sure to grab an additional ref on the added fence */ dma_fence_get(fence); ret = drm_sched_job_add_dependency(job, fence); @@ -770,8 +740,31 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, } return 0; } -EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); +EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies); +/** + * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job + * dependencies + * @job: scheduler job to add the dependencies to + * @obj: the gem object to add new dependencies from. + * @write: whether the job might write the object (so we need to depend on + * shared fences in the reservation object). + * + * This should be called after drm_gem_lock_reservations() on your array of + * GEM objects used in the job but before updating the reservations with your + * own fences. + * + * Returns: + * 0 on success, or an error on failing to expand the array. + */ +int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, + struct drm_gem_object *obj, + bool write) +{ + return drm_sched_job_add_resv_dependencies(job, obj->resv, + dma_resv_usage_rw(write)); +} +EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies); /** * drm_sched_job_cleanup - clean up scheduler job resources diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index f2795f90ea69..53464afc2b9a 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -23,7 +23,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_format_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 7abf010a3293..ef6a4e63198f 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -14,7 +14,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index b6ee8a82e656..f3a5616b7daf 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -288,7 +288,7 @@ static void sti_dvo_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); - memcpy(&dvo->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&dvo->mode, mode); /* According to the path used (main or aux), the dvo clocks should * have a different parent clock. */ diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 03cc401ed593..ec6656b9ee7c 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -524,7 +524,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); - memcpy(&hda->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&hda->mode, mode); if (!hda_get_mode_idx(hda->mode, &mode_idx)) { DRM_ERROR("Undefined mode\n"); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index cb82622877d2..fcc2194869d6 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -941,7 +941,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge, DRM_DEBUG_DRIVER("\n"); /* Copy the drm display mode in the connector local structure */ - memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode)); + drm_mode_copy(&hdmi->mode, mode); /* Update clock framerate according to the selected mode */ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000); diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index d7914f5122df..50410bd99dfe 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -18,7 +18,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index d06ffd99d86e..cc94efbbf2d4 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -17,7 +17,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_module.h> #include <drm/drm_of.h> diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 34234a144e87..760ff05eabf4 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -1101,12 +1101,16 @@ static const struct component_ops sun6i_dsi_ops = { static int sun6i_dsi_probe(struct platform_device *pdev) { + const struct sun6i_dsi_variant *variant; struct device *dev = &pdev->dev; - const char *bus_clk_name = NULL; struct sun6i_dsi *dsi; void __iomem *base; int ret; + variant = device_get_match_data(dev); + if (!variant) + return -EINVAL; + dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; @@ -1114,10 +1118,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev) dsi->dev = dev; dsi->host.ops = &sun6i_dsi_host_ops; dsi->host.dev = dev; - - if (of_device_is_compatible(dev->of_node, - "allwinner,sun6i-a31-mipi-dsi")) - bus_clk_name = "bus"; + dsi->variant = variant; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) { @@ -1142,7 +1143,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev) return PTR_ERR(dsi->regs); } - dsi->bus_clk = devm_clk_get(dev, bus_clk_name); + dsi->bus_clk = devm_clk_get(dev, variant->has_mod_clk ? "bus" : NULL); if (IS_ERR(dsi->bus_clk)) return dev_err_probe(dev, PTR_ERR(dsi->bus_clk), "Couldn't get the DSI bus clock\n"); @@ -1151,21 +1152,21 @@ static int sun6i_dsi_probe(struct platform_device *pdev) if (ret) return ret; - if (of_device_is_compatible(dev->of_node, - "allwinner,sun6i-a31-mipi-dsi")) { + if (variant->has_mod_clk) { dsi->mod_clk = devm_clk_get(dev, "mod"); if (IS_ERR(dsi->mod_clk)) { dev_err(dev, "Couldn't get the DSI mod clock\n"); ret = PTR_ERR(dsi->mod_clk); goto err_attach_clk; } - } - /* - * In order to operate properly, that clock seems to be always - * set to 297MHz. - */ - clk_set_rate_exclusive(dsi->mod_clk, 297000000); + /* + * In order to operate properly, the module clock on the + * A31 variant always seems to be set to 297MHz. + */ + if (variant->set_mod_clk) + clk_set_rate_exclusive(dsi->mod_clk, 297000000); + } dsi->dphy = devm_phy_get(dev, "dphy"); if (IS_ERR(dsi->dphy)) { @@ -1191,7 +1192,8 @@ static int sun6i_dsi_probe(struct platform_device *pdev) err_remove_dsi_host: mipi_dsi_host_unregister(&dsi->host); err_unprotect_clk: - clk_rate_exclusive_put(dsi->mod_clk); + if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk) + clk_rate_exclusive_put(dsi->mod_clk); err_attach_clk: regmap_mmio_detach_clk(dsi->regs); @@ -1205,16 +1207,39 @@ static int sun6i_dsi_remove(struct platform_device *pdev) component_del(&pdev->dev, &sun6i_dsi_ops); mipi_dsi_host_unregister(&dsi->host); - clk_rate_exclusive_put(dsi->mod_clk); + if (dsi->variant->has_mod_clk && dsi->variant->set_mod_clk) + clk_rate_exclusive_put(dsi->mod_clk); regmap_mmio_detach_clk(dsi->regs); return 0; } +static const struct sun6i_dsi_variant sun6i_a31_mipi_dsi_variant = { + .has_mod_clk = true, + .set_mod_clk = true, +}; + +static const struct sun6i_dsi_variant sun50i_a64_mipi_dsi_variant = { +}; + +static const struct sun6i_dsi_variant sun50i_a100_mipi_dsi_variant = { + .has_mod_clk = true, +}; + static const struct of_device_id sun6i_dsi_of_table[] = { - { .compatible = "allwinner,sun6i-a31-mipi-dsi" }, - { .compatible = "allwinner,sun50i-a64-mipi-dsi" }, + { + .compatible = "allwinner,sun6i-a31-mipi-dsi", + .data = &sun6i_a31_mipi_dsi_variant, + }, + { + .compatible = "allwinner,sun50i-a64-mipi-dsi", + .data = &sun50i_a64_mipi_dsi_variant, + }, + { + .compatible = "allwinner,sun50i-a100-mipi-dsi", + .data = &sun50i_a100_mipi_dsi_variant, + }, { } }; MODULE_DEVICE_TABLE(of, sun6i_dsi_of_table); diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h index c863900ae3b4..f1ddefe0f554 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h @@ -15,6 +15,11 @@ #define SUN6I_DSI_TCON_DIV 4 +struct sun6i_dsi_variant { + bool has_mod_clk; + bool set_mod_clk; +}; + struct sun6i_dsi { struct drm_connector connector; struct drm_encoder encoder; @@ -31,6 +36,8 @@ struct sun6i_dsi { struct mipi_dsi_device *device; struct drm_device *drm; struct drm_panel *panel; + + const struct sun6i_dsi_variant *variant; }; static inline struct sun6i_dsi *host_to_sun6i_dsi(struct mipi_dsi_host *host) diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index bce71c0ccc9e..a900300ae5bd 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -206,6 +206,8 @@ static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) static const struct fb_ops tegra_fb_ops = { .owner = THIS_MODULE, DRM_FB_HELPER_DEFAULT_OPS, + .fb_read = drm_fb_helper_sys_read, + .fb_write = drm_fb_helper_sys_write, .fb_fillrect = drm_fb_helper_sys_fillrect, .fb_copyarea = drm_fb_helper_sys_copyarea, .fb_imageblit = drm_fb_helper_sys_imageblit, @@ -243,7 +245,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, if (IS_ERR(bo)) return PTR_ERR(bo); - info = drm_fb_helper_alloc_fbi(helper); + info = drm_fb_helper_alloc_info(helper); if (IS_ERR(info)) { dev_err(drm->dev, "failed to allocate framebuffer info\n"); drm_gem_object_put(&bo->gem); @@ -261,7 +263,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, fb = fbdev->fb; helper->fb = fb; - helper->fbdev = info; + helper->info = info; info->fbops = &tegra_fb_ops; @@ -347,7 +349,7 @@ fini: static void tegra_fbdev_exit(struct tegra_fbdev *fbdev) { - drm_fb_helper_unregister_fbi(&fbdev->base); + drm_fb_helper_unregister_info(&fbdev->base); if (fbdev->fb) { struct tegra_bo *bo = tegra_fb_get_plane(fbdev->fb, 0); diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index b09b8ab40ae4..979e7bc902f6 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -694,6 +694,8 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) struct drm_gem_object *gem = buf->priv; int err; + dma_resv_assert_held(buf->resv); + err = drm_gem_mmap_obj(gem, gem->size, vma); if (err < 0) return err; diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 2d9f49b62ecb..b29ef1085cad 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_format_helper_test.o \ drm_format_test.o \ drm_framebuffer_test.o \ + drm_kunit_helpers.o \ drm_mm_test.o \ drm_plane_helper_test.o \ drm_rect_test.o diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c new file mode 100644 index 000000000000..362a5fbd82f5 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022 Maxime Ripard <mripard@kernel.org> + */ + +#include <kunit/test.h> + +#include <drm/drm_connector.h> +#include <drm/drm_edid.h> +#include <drm/drm_drv.h> +#include <drm/drm_modes.h> +#include <drm/drm_modeset_helper_vtables.h> +#include <drm/drm_probe_helper.h> + +#include "drm_kunit_helpers.h" + +struct drm_client_modeset_test_priv { + struct drm_device *drm; + struct drm_connector connector; +}; + +static int drm_client_modeset_connector_get_modes(struct drm_connector *connector) +{ + return drm_add_modes_noedid(connector, 1920, 1200); +} + +static const struct drm_connector_helper_funcs drm_client_modeset_connector_helper_funcs = { + .get_modes = drm_client_modeset_connector_get_modes, +}; + +static const struct drm_connector_funcs drm_client_modeset_connector_funcs = { +}; + +static int drm_client_modeset_test_init(struct kunit *test) +{ + struct drm_client_modeset_test_priv *priv; + int ret; + + priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, priv); + + test->priv = priv; + + priv->drm = drm_kunit_device_init(test, DRIVER_MODESET, "drm-client-modeset-test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm); + + ret = drmm_connector_init(priv->drm, &priv->connector, + &drm_client_modeset_connector_funcs, + DRM_MODE_CONNECTOR_Unknown, + NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_connector_helper_add(&priv->connector, &drm_client_modeset_connector_helper_funcs); + + return 0; +} + +static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test) +{ + struct drm_client_modeset_test_priv *priv = test->priv; + struct drm_device *drm = priv->drm; + struct drm_connector *connector = &priv->connector; + struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode; + struct drm_display_mode *expected_mode, *mode; + const char *cmdline = "1920x1080@60"; + int ret; + + expected_mode = drm_mode_find_dmt(priv->drm, 1920, 1080, 60, false); + KUNIT_ASSERT_NOT_NULL(test, expected_mode); + + KUNIT_ASSERT_TRUE(test, + drm_mode_parse_command_line_for_connector(cmdline, + connector, + cmdline_mode)); + + mutex_lock(&drm->mode_config.mutex); + ret = drm_helper_probe_single_connector_modes(connector, 1920, 1080); + mutex_unlock(&drm->mode_config.mutex); + KUNIT_ASSERT_GT(test, ret, 0); + + mode = drm_connector_pick_cmdline_mode(connector); + KUNIT_ASSERT_NOT_NULL(test, mode); + + KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode)); +} + +static struct kunit_case drm_test_pick_cmdline_tests[] = { + KUNIT_CASE(drm_test_pick_cmdline_res_1920_1080_60), + {} +}; + +static struct kunit_suite drm_test_pick_cmdline_test_suite = { + .name = "drm_test_pick_cmdline", + .init = drm_client_modeset_test_init, + .test_cases = drm_test_pick_cmdline_tests +}; + +kunit_test_suite(drm_test_pick_cmdline_test_suite); diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c new file mode 100644 index 000000000000..f1662091f250 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <drm/drm_drv.h> +#include <drm/drm_managed.h> + +#include <kunit/resource.h> + +#include <linux/device.h> + +#include "drm_kunit_helpers.h" + +struct kunit_dev { + struct drm_device base; +}; + +static const struct drm_mode_config_funcs drm_mode_config_funcs = { +}; + +static int dev_init(struct kunit_resource *res, void *ptr) +{ + char *name = ptr; + struct device *dev; + + dev = root_device_register(name); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + res->data = dev; + return 0; +} + +static void dev_free(struct kunit_resource *res) +{ + struct device *dev = res->data; + + root_device_unregister(dev); +} + +struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char *name) +{ + struct kunit_dev *kdev; + struct drm_device *drm; + struct drm_driver *driver; + struct device *dev; + int ret; + + dev = kunit_alloc_resource(test, dev_init, dev_free, GFP_KERNEL, name); + if (!dev) + return ERR_PTR(-ENOMEM); + + driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL); + if (!driver) + return ERR_PTR(-ENOMEM); + + driver->driver_features = features; + kdev = devm_drm_dev_alloc(dev, driver, struct kunit_dev, base); + if (IS_ERR(kdev)) + return ERR_CAST(kdev); + + drm = &kdev->base; + drm->mode_config.funcs = &drm_mode_config_funcs; + + ret = drmm_mode_config_init(drm); + if (ret) + return ERR_PTR(ret); + + return drm; +} + +MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.h b/drivers/gpu/drm/tests/drm_kunit_helpers.h new file mode 100644 index 000000000000..20ab6eec4c89 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef DRM_KUNIT_HELPERS_H_ +#define DRM_KUNIT_HELPERS_H_ + +struct drm_device; +struct kunit; + +struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char *name); + +#endif // DRM_KUNIT_HELPERS_H_ diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 15cd9b91b7e2..07d94b1e8089 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -14,7 +14,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index afb2879980c6..345bcc3011e4 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -10,7 +10,6 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index f72755b8ea14..80615ecdae0b 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -16,7 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index a300b03a3c7a..f6889f649bc1 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -53,7 +53,7 @@ config DRM_GM12U320 config DRM_OFDRM tristate "Open Firmware display driver" - depends on DRM && OF && (PPC || COMPILE_TEST) + depends on DRM && MMU && OF && (PPC || COMPILE_TEST) select APERTURE_HELPERS select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c index bb302a3fd6b5..611bbee15071 100644 --- a/drivers/gpu/drm/tiny/arcpgu.c +++ b/drivers/gpu/drm/tiny/arcpgu.c @@ -12,7 +12,7 @@ #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 04682f831544..024346054c70 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -7,7 +7,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 354d5e854a6f..678c2ef1cae7 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -30,7 +30,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 7441d992a5d7..130fd07a967d 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -12,7 +12,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c index 48c24aa8c28a..9f634f720817 100644 --- a/drivers/gpu/drm/tiny/hx8357d.c +++ b/drivers/gpu/drm/tiny/hx8357d.c @@ -18,7 +18,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c index 9a1a5943bee0..ca0451f79962 100644 --- a/drivers/gpu/drm/tiny/ili9163.c +++ b/drivers/gpu/drm/tiny/ili9163.c @@ -9,7 +9,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_mipi_dbi.h> diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c index a79da2b4af64..815bab285823 100644 --- a/drivers/gpu/drm/tiny/ili9225.c +++ b/drivers/gpu/drm/tiny/ili9225.c @@ -20,7 +20,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c index 69b265e78096..420f6005a956 100644 --- a/drivers/gpu/drm/tiny/ili9341.c +++ b/drivers/gpu/drm/tiny/ili9341.c @@ -17,7 +17,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c index c80028bb1d11..1bb847466b10 100644 --- a/drivers/gpu/drm/tiny/ili9486.c +++ b/drivers/gpu/drm/tiny/ili9486.c @@ -16,7 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c index bc522fb3d94d..47df2b5a3048 100644 --- a/drivers/gpu/drm/tiny/mi0283qt.c +++ b/drivers/gpu/drm/tiny/mi0283qt.c @@ -15,7 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c index 0e1cc2369afc..dc9e4d71b12a 100644 --- a/drivers/gpu/drm/tiny/ofdrm.c +++ b/drivers/gpu/drm/tiny/ofdrm.c @@ -11,7 +11,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_format_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> @@ -231,7 +231,7 @@ static u64 display_get_address_of(struct drm_device *dev, struct device_node *of return address; } -static bool is_avivo(__be32 vendor, __be32 device) +static bool is_avivo(u32 vendor, u32 device) { /* This will match most R5xx */ return (vendor == PCI_VENDOR_ID_ATI) && @@ -265,8 +265,13 @@ static enum ofdrm_model display_get_model_of(struct drm_device *dev, struct devi of_parent = of_get_parent(of_node); vendor_p = of_get_property(of_parent, "vendor-id", NULL); device_p = of_get_property(of_parent, "device-id", NULL); - if (vendor_p && device_p && is_avivo(*vendor_p, *device_p)) - model = OFDRM_MODEL_AVIVO; + if (vendor_p && device_p) { + u32 vendor = be32_to_cpup(vendor_p); + u32 device = be32_to_cpup(device_p); + + if (is_avivo(vendor, device)) + model = OFDRM_MODEL_AVIVO; + } of_node_put(of_parent); } else if (of_device_is_compatible(of_node, "qemu,std-vga")) { model = OFDRM_MODEL_QEMU; @@ -433,21 +438,21 @@ static void __iomem *get_cmap_address_of(struct ofdrm_device *odev, struct devic if (!addr_p) addr_p = of_get_address(of_node, bar_no, &max_size, &flags); if (!addr_p) - return ERR_PTR(-ENODEV); + return IOMEM_ERR_PTR(-ENODEV); if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) - return ERR_PTR(-ENODEV); + return IOMEM_ERR_PTR(-ENODEV); if ((offset + size) >= max_size) - return ERR_PTR(-ENODEV); + return IOMEM_ERR_PTR(-ENODEV); address = of_translate_address(of_node, addr_p); if (address == OF_BAD_ADDR) - return ERR_PTR(-ENODEV); + return IOMEM_ERR_PTR(-ENODEV); mem = devm_ioremap(dev->dev, address + offset, size); if (!mem) - return ERR_PTR(-ENOMEM); + return IOMEM_ERR_PTR(-ENOMEM); return mem; } @@ -465,7 +470,7 @@ static void __iomem *ofdrm_mach64_cmap_ioremap(struct ofdrm_device *odev, cmap_base = devm_ioremap(dev->dev, address, 0x1000); if (!cmap_base) - return ERR_PTR(-ENOMEM); + return IOMEM_ERR_PTR(-ENOMEM); return cmap_base; } @@ -624,11 +629,11 @@ static void __iomem *ofdrm_qemu_cmap_ioremap(struct ofdrm_device *odev, address = of_translate_address(of_node, io_of_addr); if (address == OF_BAD_ADDR) - return ERR_PTR(-ENODEV); + return IOMEM_ERR_PTR(-ENODEV); cmap_base = devm_ioremap(dev->dev, address + 0x3c8, 2); if (!cmap_base) - return ERR_PTR(-ENOMEM); + return IOMEM_ERR_PTR(-ENOMEM); return cmap_base; } diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c index 955a61d628e7..03a7d569cd56 100644 --- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c +++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c @@ -16,7 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index e62f4d16b2c6..c2677d081a7b 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -26,7 +26,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_format_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index cbb100753154..162eb44dcba8 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -15,7 +15,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_format_helper.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c index b6f620b902e6..ce57fa9917e5 100644 --- a/drivers/gpu/drm/tiny/st7586.c +++ b/drivers/gpu/drm/tiny/st7586.c @@ -16,7 +16,7 @@ #include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_dma_helper.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_format_helper.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_atomic_helper.h> diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c index c36ba08acda1..15d9cf283c66 100644 --- a/drivers/gpu/drm/tiny/st7735r.c +++ b/drivers/gpu/drm/tiny/st7735r.c @@ -18,7 +18,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 21b61631f73a..9f6764bf3b15 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -344,6 +344,28 @@ static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) return p->private; } +/* Called when we got a page, either from a pool or newly allocated */ +static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, + struct page *p, dma_addr_t **dma_addr, + unsigned long *num_pages, + struct page ***pages) +{ + unsigned int i; + int r; + + if (*dma_addr) { + r = ttm_pool_map(pool, order, p, dma_addr); + if (r) + return r; + } + + *num_pages -= 1 << order; + for (i = 1 << order; i; --i, ++(*pages), ++p) + **pages = p; + + return 0; +} + /** * ttm_pool_alloc - Fill a ttm_tt object * @@ -385,45 +407,57 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); num_pages; order = min_t(unsigned int, order, __fls(num_pages))) { - bool apply_caching = false; struct ttm_pool_type *pt; pt = ttm_pool_select_type(pool, tt->caching, order); p = pt ? ttm_pool_type_take(pt) : NULL; if (p) { - apply_caching = true; - } else { - p = ttm_pool_alloc_page(pool, gfp_flags, order); - if (p && PageHighMem(p)) - apply_caching = true; - } - - if (!p) { - if (order) { - --order; - continue; - } - r = -ENOMEM; - goto error_free_all; - } - - if (apply_caching) { r = ttm_pool_apply_caching(caching, pages, tt->caching); if (r) goto error_free_page; - caching = pages + (1 << order); + + do { + r = ttm_pool_page_allocated(pool, order, p, + &dma_addr, + &num_pages, + &pages); + if (r) + goto error_free_page; + + if (num_pages < (1 << order)) + break; + + p = ttm_pool_type_take(pt); + } while (p); + caching = pages; } - if (dma_addr) { - r = ttm_pool_map(pool, order, p, &dma_addr); + while (num_pages >= (1 << order) && + (p = ttm_pool_alloc_page(pool, gfp_flags, order))) { + + if (PageHighMem(p)) { + r = ttm_pool_apply_caching(caching, pages, + tt->caching); + if (r) + goto error_free_page; + } + r = ttm_pool_page_allocated(pool, order, p, &dma_addr, + &num_pages, &pages); if (r) goto error_free_page; + if (PageHighMem(p)) + caching = pages; } - num_pages -= 1 << order; - for (i = 1 << order; i; --i) - *(pages++) = p++; + if (!p) { + if (order) { + --order; + continue; + } + r = -ENOMEM; + goto error_free_all; + } } r = ttm_pool_apply_caching(caching, pages, tt->caching); diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index 04db72e3fa9c..0d05c386d303 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -32,6 +32,7 @@ #include <linux/irq.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/shmem_fs.h> #include <linux/slab.h> @@ -39,7 +40,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 91effdcefb6d..e81352126a0f 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -7,7 +7,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index e8c975b81585..478f1f0f60de 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -22,7 +22,6 @@ #include <linux/reset.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_managed.h> #include <uapi/drm/v3d_drm.h> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b8980440d137..96af1cb5202a 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -10,6 +10,7 @@ #include <linux/sched/signal.h> #include <linux/uaccess.h> +#include <drm/drm_managed.h> #include <drm/drm_syncobj.h> #include <uapi/drm/v3d_drm.h> @@ -1075,10 +1076,18 @@ v3d_gem_init(struct drm_device *dev) spin_lock_init(&v3d->mm_lock); spin_lock_init(&v3d->job_lock); - mutex_init(&v3d->bo_lock); - mutex_init(&v3d->reset_lock); - mutex_init(&v3d->sched_lock); - mutex_init(&v3d->cache_clean_lock); + ret = drmm_mutex_init(dev, &v3d->bo_lock); + if (ret) + return ret; + ret = drmm_mutex_init(dev, &v3d->reset_lock); + if (ret) + return ret; + ret = drmm_mutex_init(dev, &v3d->sched_lock); + if (ret) + return ret; + ret = drmm_mutex_init(dev, &v3d->cache_clean_lock); + if (ret) + return ret; /* Note: We don't allocate address 0. Various bits of HW * treat 0 as special, such as the occlusion query counters diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 48aaaa972c49..e1be7368b87d 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -17,8 +17,10 @@ void v3d_perfmon_get(struct v3d_perfmon *perfmon) void v3d_perfmon_put(struct v3d_perfmon *perfmon) { - if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) + if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) { + mutex_destroy(&perfmon->lock); kfree(perfmon); + } } void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) @@ -113,6 +115,7 @@ void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); idr_destroy(&v3d_priv->perfmon.idr); mutex_unlock(&v3d_priv->perfmon.lock); + mutex_destroy(&v3d_priv->perfmon.lock); } int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, @@ -154,6 +157,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, mutex_unlock(&v3d_priv->perfmon.lock); if (ret < 0) { + mutex_destroy(&perfmon->lock); kfree(perfmon); return ret; } diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index f4f2bd79a7cb..b450f449a3ab 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -14,7 +14,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> #include <drm/drm_managed.h> @@ -178,8 +178,6 @@ static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, - .lastclose = drm_fb_helper_lastclose, - .fops = &vbox_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index c9e8b3a63c62..3b83e550f4df 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -11,7 +11,6 @@ #include <linux/pci.h> #include <linux/vbox_err.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 2027063fdc30..b66bf7aea632 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -33,7 +33,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_vblank.h> #include <soc/bcm2835/raspberrypi-firmware.h> diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 0d78c800ed51..6b223a5fcf6f 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -124,9 +124,8 @@ static unsigned long long vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode, unsigned int bpc, enum vc4_hdmi_output_format fmt); -static bool vc4_hdmi_supports_scrambling(struct drm_encoder *encoder) +static bool vc4_hdmi_supports_scrambling(struct vc4_hdmi *vc4_hdmi) { - struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_display_info *display = &vc4_hdmi->connector.display_info; lockdep_assert_held(&vc4_hdmi->mutex); @@ -319,9 +318,8 @@ out: static int vc4_hdmi_reset_link(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx) { - struct drm_device *drm = connector->dev; - struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector); - struct drm_encoder *encoder = &vc4_hdmi->encoder.base; + struct drm_device *drm; + struct vc4_hdmi *vc4_hdmi; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; @@ -332,6 +330,7 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector, if (!connector) return 0; + drm = connector->dev; ret = drm_modeset_lock(&drm->mode_config.connection_mutex, ctx); if (ret) return ret; @@ -349,7 +348,8 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector, if (!crtc_state->active) return 0; - if (!vc4_hdmi_supports_scrambling(encoder)) + vc4_hdmi = connector_to_vc4_hdmi(connector); + if (!vc4_hdmi_supports_scrambling(vc4_hdmi)) return 0; scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode, @@ -867,7 +867,7 @@ static void vc4_hdmi_enable_scrambling(struct drm_encoder *encoder) lockdep_assert_held(&vc4_hdmi->mutex); - if (!vc4_hdmi_supports_scrambling(encoder)) + if (!vc4_hdmi_supports_scrambling(vc4_hdmi)) return; if (!vc4_hdmi_mode_needs_scrambling(mode, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 0035affc3e59..ae97b98750b6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -35,6 +35,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include "virtgpu_drv.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 9b98470593b0..b7a64c7dcc2c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -35,7 +35,6 @@ #include <drm/drm_atomic.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 0ffe5f0e33f7..293dbca50c31 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -17,7 +17,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_file.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 63496773f714..bd02cb0e6837 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -35,7 +35,7 @@ #include <drm/drm_aperture.h> #include <drm/drm_drv.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_module.h> diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index e31554d7139f..4c95ebcdcc2d 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -12,7 +12,6 @@ #include <linux/scatterlist.h> #include <linux/shmem_fs.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_prime.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 7c9ae167eac7..0a7b466446fb 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -1362,9 +1362,10 @@ static void zynqmp_dp_bridge_detach(struct drm_bridge *bridge) zynqmp_dp_aux_cleanup(dp); } -static int zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge, - const struct drm_display_info *info, - const struct drm_display_mode *mode) +static enum drm_mode_status +zynqmp_dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) { struct zynqmp_dp *dp = bridge_to_dp(bridge); int rate; diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index 1847792cf13d..776ef5480206 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -19,7 +19,7 @@ #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> +#include <drm/drm_fbdev_generic.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem_dma_helper.h> |